code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from cloudbio.galaxy.tools import _install_application
def install_tool(options):
version = options.get("galaxy_tool_version")
name = options.get("galaxy_tool_name")
install_dir = options.get("galaxy_tool_dir", None)
_install_application(name, version, tool_install_dir=install_dir)
configure_actions = {
"install_galaxy_tool": install_tool,
}
|
[
"cloudbio.galaxy.tools._install_application"
] |
[((235, 300), 'cloudbio.galaxy.tools._install_application', '_install_application', (['name', 'version'], {'tool_install_dir': 'install_dir'}), '(name, version, tool_install_dir=install_dir)\n', (255, 300), False, 'from cloudbio.galaxy.tools import _install_application\n')]
|
import os
def getRootPath():
return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
def getProjectAbsPath(*path):
return os.path.join(getRootPath(), *path)
def getCachePath(*path):
return getProjectAbsPath(".cache", *path)
def getTemplatePath(*path):
return getProjectAbsPath("cli", "templates", *path)
def getNodeBinPath(name):
return getProjectAbsPath("node_modules", ".bin", name)
def getPipEnvBinPath(name):
return getProjectAbsPath("env", "bin", name)
def getCurrentAbsPath(path="."):
if os.path.isabs(path):
return os.path.abspath(path)
else:
return getCurrentAbsPath(os.path.join(os.getcwd(), path))
|
[
"os.getcwd",
"os.path.isabs",
"os.path.abspath",
"os.path.dirname"
] |
[((556, 575), 'os.path.isabs', 'os.path.isabs', (['path'], {}), '(path)\n', (569, 575), False, 'import os\n'), ((592, 613), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (607, 613), False, 'import os\n'), ((71, 96), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (86, 96), False, 'import os\n'), ((670, 681), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (679, 681), False, 'import os\n')]
|
from importlib import reload, import_module
# Project imports
from patchy import patchy
default_app_config = 'django_cte.apps.DjangoCTEConfig'
def patch_cte():
""" Apply CTE monkey patches to Django.
At present these patches must be updated manually to conform with new CTE
implementations, but this is only necessary to use new functionality.
Order of patching *matters* due to namespace reload.
"""
with patchy('django.db.models', 'django_cte') as p:
p.mod('expressions').auto()
p.mod('sql.compiler').auto()
p.mod('sql.subqueries').auto()
# Force reload so that new query types are imported into namespace
reload(import_module('django.db.models.sql'))
p.mod('query').auto()
p.cls('manager.BaseManager').auto()
p.cls('base.Model').auto()
|
[
"patchy.patchy",
"importlib.import_module"
] |
[((448, 488), 'patchy.patchy', 'patchy', (['"""django.db.models"""', '"""django_cte"""'], {}), "('django.db.models', 'django_cte')\n", (454, 488), False, 'from patchy import patchy\n'), ((697, 734), 'importlib.import_module', 'import_module', (['"""django.db.models.sql"""'], {}), "('django.db.models.sql')\n", (710, 734), False, 'from importlib import reload, import_module\n')]
|
# Imports pickle library used to store trained model
import pickle
# Open trained model and assigned to variable
with open('property_model_Bristle.pickle', 'rb') as file:
lr = pickle.load(file)
# Predict price based on console input, use for debugging
input_distance = float(input("Please enter the distance to the train station: "))
input_bedrooms = int(input("Please input the number of bedrooms of your property: "))
input_bathrooms = int(input("Please input the number of bathrooms of your property: "))
predicted_price = lr.predict([[input_distance, input_bedrooms, input_bathrooms]])
print(round(predicted_price[0],2))
|
[
"pickle.load"
] |
[((181, 198), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (192, 198), False, 'import pickle\n')]
|
# scrapes AWS resource type and action mapping from AWS docs
import json
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.opera.options import Options
from selenium.webdriver.firefox.options import Options
options = Options()
options.add_argument('--headless')
options.add_argument('--disable-gpu')
# takes in a URL to a IAM service's actions, resources, and condition keys list and scrapes the tables
def get_tables(url):
# browser = webdriver.Chrome(options = options)
# browser = webdriver.Opera(options = options, executable_path = './operadriver')
browser = webdriver.Firefox(options = options, executable_path = './geckodriver')
browser.get(url)
time.sleep(5)
wait = WebDriverWait(browser, 10)
try:
# wait for all the JSON elements to load before scraping.
wait.until(EC.presence_of_element_located((By.TAG_NAME, 'awsdocs-view')))
except TimeoutError:
pass
else:
# get IAM service name and tables
namespace = browser.find_elements_by_xpath("//div[@id='main-col-body']/p/code")[0].text
tables = browser.find_elements_by_tag_name('table')
if len(tables) > 0:
# first table is the list of actions
actions = tables[0].find_elements(By.TAG_NAME, 'tr')
# second table is the list of resource types
if len(tables) > 1:
resources = tables[1].find_elements(By.TAG_NAME, 'tr')
namespace_json = dict()
if len(tables) > 0:
previous_name = ''
# store resource type -> actions mapping
for action in actions:
fields = list(action.find_elements_by_tag_name('td'))
if len(fields) == 3:
resource_type = str(fields[0].text.replace('*', ''))
if resource_type in namespace_json:
namespace_json[resource_type].append(previous_name)
else:
namespace_json[resource_type] = [previous_name]
elif len(fields) > 3:
resource_type = str(fields[3].text.replace('*', ''))
action_name = fields[0].text.replace(' [permission only]', '')
action_name = action_name.lower()
if resource_type in namespace_json:
namespace_json[resource_type].append(action_name)
else:
namespace_json[resource_type] = [action_name]
previous_name = action_name
# save the constraints
actions_json[namespace] = namespace_json
namespace_json = dict()
#if there is a resource type, scrape it and its ARN format
if len(tables) > 1:
for resource in resources:
fields = list(resource.find_elements_by_tag_name('td'))
if len(fields) > 1:
namespace_json[fields[0].text] = fields[1].text
# save the constraints
resources_json[namespace] = namespace_json
finally:
browser.close()
# browser = webdriver.Chrome(options = options)
# browser = webdriver.Opera(options = options, executable_path = './operadriver')
browser = webdriver.Firefox(options = options, executable_path = './geckodriver')
# open the general page listing the actions, resource types, and condition keys for all IAM services.
aws_reference = 'https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_actions-resources-contextkeys.html'
browser.get(aws_reference)
wait = WebDriverWait(browser, 10)
try:
# wait until page has fully loaded all the JSON elements
wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'highlights')))
except:
pass
actions_json = {}
resources_json = {}
# get list of all services
rows = browser.find_elements_by_xpath("//div[@id='main-col-body']/div[@class='highlights']/ul/li")
# iterate through services and scrape their tables
for row in rows:
a_path = row.find_elements_by_tag_name('a')[0]
url = a_path.get_attribute('href')
get_tables(url)
print('{}...done'.format(url))
browser.quit()
# dump constraints to files
file = open('actions.json', 'w')
file.write(json.dumps(actions_json, indent=4))
file.close()
file = open('resources.json', 'w')
file.write(json.dumps(resources_json, indent=4))
file.close()
|
[
"selenium.webdriver.support.expected_conditions.presence_of_element_located",
"selenium.webdriver.Firefox",
"json.dumps",
"time.sleep",
"selenium.webdriver.firefox.options.Options",
"selenium.webdriver.support.ui.WebDriverWait"
] |
[((455, 464), 'selenium.webdriver.firefox.options.Options', 'Options', ([], {}), '()\n', (462, 464), False, 'from selenium.webdriver.firefox.options import Options\n'), ((3025, 3092), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {'options': 'options', 'executable_path': '"""./geckodriver"""'}), "(options=options, executable_path='./geckodriver')\n", (3042, 3092), False, 'from selenium import webdriver\n'), ((3355, 3381), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['browser', '(10)'], {}), '(browser, 10)\n', (3368, 3381), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((806, 873), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {'options': 'options', 'executable_path': '"""./geckodriver"""'}), "(options=options, executable_path='./geckodriver')\n", (823, 873), False, 'from selenium import webdriver\n'), ((899, 912), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (909, 912), False, 'import time\n'), ((921, 947), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['browser', '(10)'], {}), '(browser, 10)\n', (934, 947), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((3993, 4027), 'json.dumps', 'json.dumps', (['actions_json'], {'indent': '(4)'}), '(actions_json, indent=4)\n', (4003, 4027), False, 'import json\n'), ((4089, 4125), 'json.dumps', 'json.dumps', (['resources_json'], {'indent': '(4)'}), '(resources_json, indent=4)\n', (4099, 4125), False, 'import json\n'), ((3458, 3519), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.CLASS_NAME, 'highlights')"], {}), "((By.CLASS_NAME, 'highlights'))\n", (3488, 3519), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1028, 1089), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.TAG_NAME, 'awsdocs-view')"], {}), "((By.TAG_NAME, 'awsdocs-view'))\n", (1058, 1089), True, 'from selenium.webdriver.support import expected_conditions as EC\n')]
|
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring,unused-import,reimported
import pytest # type: ignore
import mc_flow_sim.mc_flow_sim as mc
def test_walk_ok_empty_string():
empty = ''
assert mc.walk(empty) is None
def test_walk_ok_empty_list():
seq = []
assert mc.walk(seq) is None
def test_walk_ok_empty_set():
seq = {}
assert mc.walk(seq) is None
def test_walk_ok_empty_dict():
seq = dict()
assert mc.walk(seq) is None
def test_walk_ok_empty_tuple():
seq = tuple()
assert mc.walk(seq) is None
def test_walk_ok_string():
string = "abc"
step = mc.walk(string)
assert len(step) == 1
assert step in string
def test_walk_ok_string_list():
the_same = "a"
seq = [the_same, the_same, the_same]
assert mc.walk(seq) == the_same
def test_walk_ok_range():
a_range = range(42)
step = mc.walk(a_range)
assert step in a_range
assert isinstance(step, int)
def test_walk_ok_function_list():
the_same = print
seq = [the_same, the_same, the_same]
assert mc.walk(seq) == the_same
def test_walk_ok_iterator_list():
the_same = iter([1, 2, 3])
seq = [the_same, the_same, the_same]
assert mc.walk(seq) == the_same
def test_walk_ok_int_dict():
seq = {0: "a", 1: "b"}
assert mc.walk(seq) in seq.values()
def test_walk_nok_string_dict():
seq = {"a": "b"}
message = r"0"
with pytest.raises(KeyError, match=message):
mc.walk(seq)
def test_walk_nok_wrong_type_none():
bad = None
assert mc.walk(bad) is None
def test_walk_nok_wrong_type_object():
bad = object
message = r"object of type 'type' has no len\(\)"
with pytest.raises(TypeError, match=message):
mc.walk(bad)
def test_walk_nok_wrong_type_int():
bad = 42
message = r"object of type 'int' has no len\(\)"
with pytest.raises(TypeError, match=message):
mc.walk(bad)
def test_walk_nok_wrong_type_float():
bad = 3.1415
message = r"object of type 'float' has no len\(\)"
with pytest.raises(TypeError, match=message):
mc.walk(bad)
def test_walk_nok_wrong_type_complex():
bad = complex(1, -1)
message = r"object of type 'complex' has no len\(\)"
with pytest.raises(TypeError, match=message):
mc.walk(bad)
def test_walk_nok_wrong_type_generator_expression():
message = r"object of type 'generator' has no len\(\)"
with pytest.raises(TypeError, match=message):
mc.walk(n for n in range(1234))
def test_walk_nok_wrong_type_function():
message = r"object of type 'builtin_function_or_method' has no len\(\)"
with pytest.raises(TypeError, match=message):
mc.walk(print)
|
[
"pytest.raises",
"mc_flow_sim.mc_flow_sim.walk"
] |
[((617, 632), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['string'], {}), '(string)\n', (624, 632), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((878, 894), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['a_range'], {}), '(a_range)\n', (885, 894), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((214, 228), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['empty'], {}), '(empty)\n', (221, 228), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((294, 306), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['seq'], {}), '(seq)\n', (301, 306), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((371, 383), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['seq'], {}), '(seq)\n', (378, 383), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((453, 465), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['seq'], {}), '(seq)\n', (460, 465), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((537, 549), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['seq'], {}), '(seq)\n', (544, 549), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((790, 802), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['seq'], {}), '(seq)\n', (797, 802), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((1064, 1076), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['seq'], {}), '(seq)\n', (1071, 1076), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((1208, 1220), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['seq'], {}), '(seq)\n', (1215, 1220), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((1302, 1314), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['seq'], {}), '(seq)\n', (1309, 1314), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((1415, 1453), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': 'message'}), '(KeyError, match=message)\n', (1428, 1453), False, 'import pytest\n'), ((1463, 1475), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['seq'], {}), '(seq)\n', (1470, 1475), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((1541, 1553), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['bad'], {}), '(bad)\n', (1548, 1553), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((1683, 1722), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'message'}), '(TypeError, match=message)\n', (1696, 1722), False, 'import pytest\n'), ((1732, 1744), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['bad'], {}), '(bad)\n', (1739, 1744), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((1858, 1897), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'message'}), '(TypeError, match=message)\n', (1871, 1897), False, 'import pytest\n'), ((1907, 1919), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['bad'], {}), '(bad)\n', (1914, 1919), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((2041, 2080), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'message'}), '(TypeError, match=message)\n', (2054, 2080), False, 'import pytest\n'), ((2090, 2102), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['bad'], {}), '(bad)\n', (2097, 2102), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((2236, 2275), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'message'}), '(TypeError, match=message)\n', (2249, 2275), False, 'import pytest\n'), ((2285, 2297), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['bad'], {}), '(bad)\n', (2292, 2297), True, 'import mc_flow_sim.mc_flow_sim as mc\n'), ((2421, 2460), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'message'}), '(TypeError, match=message)\n', (2434, 2460), False, 'import pytest\n'), ((2630, 2669), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'message'}), '(TypeError, match=message)\n', (2643, 2669), False, 'import pytest\n'), ((2679, 2693), 'mc_flow_sim.mc_flow_sim.walk', 'mc.walk', (['print'], {}), '(print)\n', (2686, 2693), True, 'import mc_flow_sim.mc_flow_sim as mc\n')]
|
""" A test script to start Cassandra. """
import logging
import os
import sys
import cassandra_interface
sys.path.append(os.path.join(os.path.dirname(__file__), "../../lib"))
import monit_interface
def run():
""" Starts up cassandra. """
logging.warning("Starting Cassandra.")
monit_interface.start(cassandra_interface.CASSANDRA_MONIT_WATCH_NAME,
is_group=False)
logging.warning("Done!")
if __name__ == '__main__':
run()
|
[
"logging.warning",
"os.path.dirname",
"monit_interface.start"
] |
[((248, 286), 'logging.warning', 'logging.warning', (['"""Starting Cassandra."""'], {}), "('Starting Cassandra.')\n", (263, 286), False, 'import logging\n'), ((289, 378), 'monit_interface.start', 'monit_interface.start', (['cassandra_interface.CASSANDRA_MONIT_WATCH_NAME'], {'is_group': '(False)'}), '(cassandra_interface.CASSANDRA_MONIT_WATCH_NAME,\n is_group=False)\n', (310, 378), False, 'import monit_interface\n'), ((381, 405), 'logging.warning', 'logging.warning', (['"""Done!"""'], {}), "('Done!')\n", (396, 405), False, 'import logging\n'), ((137, 162), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (152, 162), False, 'import os\n')]
|
from sqlalchemy import create_engine, Column, Integer, String, DATETIME
from sqlalchemy.ext.declarative import declarative_base
from datetime import datetime
# TODO: db_uri
# dialect+driver://username:password@host:port/database?charset=utf8
DB_URI = 'mysql+pymysql://root:[email protected]:3300/alembic_demo?charset=utf8'
engine = create_engine(DB_URI)
Base = declarative_base(bind=engine)
# TODO: 定义User模型
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(50), nullable=False)
# TODO: 增加字段
age = Column(Integer, nullable=False)
country = Column(String(50), nullable=False)
create_time = Column(DATETIME, default=datetime.now)
|
[
"sqlalchemy.create_engine",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.String",
"sqlalchemy.Column"
] |
[((334, 355), 'sqlalchemy.create_engine', 'create_engine', (['DB_URI'], {}), '(DB_URI)\n', (347, 355), False, 'from sqlalchemy import create_engine, Column, Integer, String, DATETIME\n'), ((364, 393), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {'bind': 'engine'}), '(bind=engine)\n', (380, 393), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((467, 520), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(Integer, primary_key=True, autoincrement=True)\n', (473, 520), False, 'from sqlalchemy import create_engine, Column, Integer, String, DATETIME\n'), ((594, 625), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (600, 625), False, 'from sqlalchemy import create_engine, Column, Integer, String, DATETIME\n'), ((693, 731), 'sqlalchemy.Column', 'Column', (['DATETIME'], {'default': 'datetime.now'}), '(DATETIME, default=datetime.now)\n', (699, 731), False, 'from sqlalchemy import create_engine, Column, Integer, String, DATETIME\n'), ((539, 549), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (545, 549), False, 'from sqlalchemy import create_engine, Column, Integer, String, DATETIME\n'), ((647, 657), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (653, 657), False, 'from sqlalchemy import create_engine, Column, Integer, String, DATETIME\n')]
|
import requests
import json
import ehp
# Page Scraper
# Have the programme connect to a site and pulls out all the links, or images, and save them to a list.
class PageScraper:
def __init__(self, url):
self.url = url
self.parser = ehp.Html()
self.dom = self.__dom()
def __dom(self):
req = requests.get(self.url)
html = req.text
dom = self.parser.feed(html)
return dom
def links(self):
for link in self.dom.find('a'):
yield link.attr['href']
def images(self):
for image in self.dom.find('img'):
yield image.attr['src']
def main():
url = 'https://' + input('Enter a URL: https://')
pageScraper = PageScraper(url)
links = [i for i in pageScraper.links()]
images = [i for i in pageScraper.images()]
with open('links.json', 'r') as f:
encoded = f.read()
decoded = json.loads(encoded) if len(encoded) else []
for i in decoded:
if i['site'] == url:
return
decoded.append({ 'site': url, 'links': links, 'images': images })
with open('links.json', 'w') as f:
encoded = json.dumps(decoded, indent=2)
f.write(encoded)
if __name__ == '__main__':
main()
|
[
"ehp.Html",
"json.loads",
"requests.get",
"json.dumps"
] |
[((244, 254), 'ehp.Html', 'ehp.Html', ([], {}), '()\n', (252, 254), False, 'import ehp\n'), ((313, 335), 'requests.get', 'requests.get', (['self.url'], {}), '(self.url)\n', (325, 335), False, 'import requests\n'), ((849, 868), 'json.loads', 'json.loads', (['encoded'], {}), '(encoded)\n', (859, 868), False, 'import json\n'), ((1079, 1108), 'json.dumps', 'json.dumps', (['decoded'], {'indent': '(2)'}), '(decoded, indent=2)\n', (1089, 1108), False, 'import json\n')]
|
"""Illustrates the asyncio engine / connection interface.
In this example, we have an async engine created by
:func:`_engine.create_async_engine`. We then use it using await
within a coroutine.
"""
import asyncio
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.ext.asyncio import create_async_engine
meta = MetaData()
t1 = Table(
"t1", meta, Column("id", Integer, primary_key=True), Column("name", String)
)
async def async_main():
# engine is an instance of AsyncEngine
engine = create_async_engine(
"postgresql+asyncpg://scott:tiger@localhost/test", echo=True,
)
# conn is an instance of AsyncConnection
async with engine.begin() as conn:
# to support SQLAlchemy DDL methods as well as legacy functions, the
# AsyncConnection.run_sync() awaitable method will pass a "sync"
# version of the AsyncConnection object to any synchronous method,
# where synchronous IO calls will be transparently translated for
# await.
await conn.run_sync(meta.drop_all)
await conn.run_sync(meta.create_all)
# for normal statement execution, a traditional "await execute()"
# pattern is used.
await conn.execute(
t1.insert(), [{"name": "some name 1"}, {"name": "some name 2"}]
)
async with engine.connect() as conn:
# the default result object is the
# sqlalchemy.engine.Result object
result = await conn.execute(t1.select())
# the results are buffered so no await call is necessary
# for this case.
print(result.fetchall())
# for a streaming result that buffers only segments of the
# result at time, the AsyncConnection.stream() method is used.
# this returns a sqlalchemy.ext.asyncio.AsyncResult object.
async_result = await conn.stream(t1.select())
# this object supports async iteration and awaitable
# versions of methods like .all(), fetchmany(), etc.
async for row in async_result:
print(row)
asyncio.run(async_main())
|
[
"sqlalchemy.MetaData",
"sqlalchemy.ext.asyncio.create_async_engine",
"sqlalchemy.Column"
] |
[((436, 446), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (444, 446), False, 'from sqlalchemy import MetaData\n'), ((476, 515), 'sqlalchemy.Column', 'Column', (['"""id"""', 'Integer'], {'primary_key': '(True)'}), "('id', Integer, primary_key=True)\n", (482, 515), False, 'from sqlalchemy import Column\n'), ((517, 539), 'sqlalchemy.Column', 'Column', (['"""name"""', 'String'], {}), "('name', String)\n", (523, 539), False, 'from sqlalchemy import Column\n'), ((624, 710), 'sqlalchemy.ext.asyncio.create_async_engine', 'create_async_engine', (['"""postgresql+asyncpg://scott:tiger@localhost/test"""'], {'echo': '(True)'}), "('postgresql+asyncpg://scott:tiger@localhost/test', echo\n =True)\n", (643, 710), False, 'from sqlalchemy.ext.asyncio import create_async_engine\n')]
|
# Root dir
import os
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
# Weather variable vocublary. This should match to the variable name used in the tahmoapi.
RAIN = "precipitation"
TEMP = "temperature"
REL = "humidity"
WINDR= "winddirection"
SRAD = "radiation"
|
[
"os.path.abspath"
] |
[((48, 73), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (63, 73), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-07-23 19:11
from __future__ import unicode_literals
from django.db import migrations
import molo.core.blocks
import molo.core.models
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('wagtailforms', '0003_capitalizeverbose'),
('wagtailcore', '0040_page_draft_title'),
('wagtailredirects', '0006_redirect_increase_max_length'),
('core', '0017_add_google_search_console'),
]
operations = [
migrations.RemoveField(
model_name='formfield',
name='page',
),
migrations.RemoveField(
model_name='formpage',
name='language',
),
migrations.RemoveField(
model_name='formpage',
name='page_ptr',
),
migrations.RemoveField(
model_name='formpage',
name='translated_pages',
),
migrations.AlterField(
model_name='articlepage',
name='body',
field=wagtail.core.fields.StreamField([('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', molo.core.blocks.MarkDownBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('list', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(label='Item'))), ('numbered_list', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(label='Item'))), ('page', wagtail.core.blocks.PageChooserBlock()), ('media', molo.core.models.MoloMediaBlock(icon='media')), ('richtext', wagtail.core.blocks.RichTextBlock()), ('html', wagtail.core.blocks.RawHTMLBlock())], blank=True, null=True),
),
migrations.DeleteModel(
name='FormField',
),
migrations.DeleteModel(
name='FormIndexPage',
),
migrations.DeleteModel(
name='FormPage',
),
]
|
[
"django.db.migrations.RemoveField",
"django.db.migrations.DeleteModel"
] |
[((597, 656), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""formfield"""', 'name': '"""page"""'}), "(model_name='formfield', name='page')\n", (619, 656), False, 'from django.db import migrations\n'), ((701, 763), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""formpage"""', 'name': '"""language"""'}), "(model_name='formpage', name='language')\n", (723, 763), False, 'from django.db import migrations\n'), ((808, 870), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""formpage"""', 'name': '"""page_ptr"""'}), "(model_name='formpage', name='page_ptr')\n", (830, 870), False, 'from django.db import migrations\n'), ((915, 985), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""formpage"""', 'name': '"""translated_pages"""'}), "(model_name='formpage', name='translated_pages')\n", (937, 985), False, 'from django.db import migrations\n'), ((1768, 1808), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""FormField"""'}), "(name='FormField')\n", (1790, 1808), False, 'from django.db import migrations\n'), ((1841, 1885), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""FormIndexPage"""'}), "(name='FormIndexPage')\n", (1863, 1885), False, 'from django.db import migrations\n'), ((1918, 1957), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""FormPage"""'}), "(name='FormPage')\n", (1940, 1957), False, 'from django.db import migrations\n')]
|
# coding: utf-8
#
# Copyright (c) 2020-2021 Hopenly srl.
#
# This file is part of Ilyde.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import unittest
from flask import json
from six import BytesIO
from apis_server.test import BaseTestCase
class TestModelsController(BaseTestCase):
"""ModelsController integration test stubs"""
def test_create_model(self):
"""Test case for create_model
Create a model
"""
model_serializer = {}
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/models',
method='POST',
headers=headers,
data=json.dumps(model_serializer),
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_create_model_version(self):
"""Test case for create_model_version
Create a model version
"""
model_version_serializer = {}
query_string = [('name', 'name_example')]
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/model-versions/create',
method='POST',
headers=headers,
data=json.dumps(model_version_serializer),
content_type='application/json',
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_delete_model(self):
"""Test case for delete_model
Delete a model
"""
query_string = [('name', 'name_example')]
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/models/delete',
method='DELETE',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_delete_model_version(self):
"""Test case for delete_model_version
delete a model version
"""
query_string = [('name', 'name_example'),
('version', 'version_example')]
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/model-versions/delete',
method='DELETE',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_model_version(self):
"""Test case for get_model_version
get a model version
"""
query_string = [('name', 'name_example'),
('version', 'version_example')]
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/model-versions/get',
method='GET',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_list_model_versions(self):
"""Test case for list_model_versions
list versions of a model
"""
query_string = [('name', 'name_example')]
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/model-versions/list',
method='GET',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_retrieve_model(self):
"""Test case for retrieve_model
Retrieve a model
"""
query_string = [('name', 'name_example')]
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/models/get',
method='GET',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_transition_model_version_stage(self):
"""Test case for transition_model_version_stage
Transition model version stage
"""
inline_object7_serializer = {}
query_string = [('name', 'name_example'),
('version', 'version_example')]
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/model-versions/transition-stage',
method='PATCH',
headers=headers,
data=json.dumps(inline_object7_serializer),
content_type='application/json',
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_update_model(self):
"""Test case for update_model
Update a model
"""
inline_object5_serializer = {}
query_string = [('name', 'name_example')]
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/models/update',
method='PATCH',
headers=headers,
data=json.dumps(inline_object5_serializer),
content_type='application/json',
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_update_model_version(self):
"""Test case for update_model_version
update a model version
"""
inline_object6_serializer = {}
query_string = [('name', 'name_example'),
('version', 'version_example')]
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/model-versions/update',
method='PATCH',
headers=headers,
data=json.dumps(inline_object6_serializer),
content_type='application/json',
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"flask.json.dumps"
] |
[((7802, 7817), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7815, 7817), False, 'import unittest\n'), ((1328, 1356), 'flask.json.dumps', 'json.dumps', (['model_serializer'], {}), '(model_serializer)\n', (1338, 1356), False, 'from flask import json\n'), ((2061, 2097), 'flask.json.dumps', 'json.dumps', (['model_version_serializer'], {}), '(model_version_serializer)\n', (2071, 2097), False, 'from flask import json\n'), ((5945, 5982), 'flask.json.dumps', 'json.dumps', (['inline_object7_serializer'], {}), '(inline_object7_serializer)\n', (5955, 5982), False, 'from flask import json\n'), ((6696, 6733), 'flask.json.dumps', 'json.dumps', (['inline_object5_serializer'], {}), '(inline_object5_serializer)\n', (6706, 6733), False, 'from flask import json\n'), ((7535, 7572), 'flask.json.dumps', 'json.dumps', (['inline_object6_serializer'], {}), '(inline_object6_serializer)\n', (7545, 7572), False, 'from flask import json\n')]
|
import datetime as dt
import io
import logging
import os
import time
import PIL.Image
import requests
RADARS = {
'Adelaide': {'id': '643', 'delta': 360, 'frames': 6},
'Albany': {'id': '313', 'delta': 600, 'frames': 4},
'AliceSprings': {'id': '253', 'delta': 600, 'frames': 4},
'Bairnsdale': {'id': '683', 'delta': 600, 'frames': 4},
'Bowen': {'id': '243', 'delta': 600, 'frames': 4},
'Brisbane': {'id': '663', 'delta': 360, 'frames': 6},
'Broome': {'id': '173', 'delta': 600, 'frames': 4},
'Cairns': {'id': '193', 'delta': 360, 'frames': 6},
'Canberra': {'id': '403', 'delta': 360, 'frames': 6},
'Carnarvon': {'id': '053', 'delta': 600, 'frames': 4},
'Ceduna': {'id': '333', 'delta': 600, 'frames': 4},
'Dampier': {'id': '153', 'delta': 600, 'frames': 4},
'Darwin': {'id': '633', 'delta': 360, 'frames': 6},
'Emerald': {'id': '723', 'delta': 600, 'frames': 4},
'Esperance': {'id': '323', 'delta': 600, 'frames': 4},
'Geraldton': {'id': '063', 'delta': 600, 'frames': 4},
'Giles': {'id': '443', 'delta': 600, 'frames': 4},
'Gladstone': {'id': '233', 'delta': 600, 'frames': 4},
'Gove': {'id': '093', 'delta': 600, 'frames': 4},
'Grafton': {'id': '283', 'delta': 600, 'frames': 4},
'Gympie': {'id': '083', 'delta': 360, 'frames': 6},
'HallsCreek': {'id': '393', 'delta': 600, 'frames': 4},
'Hobart': {'id': '763', 'delta': 360, 'frames': 6},
'Kalgoorlie': {'id': '483', 'delta': 360, 'frames': 6},
'Katherine': {'id': '423', 'delta': 360, 'frames': 6},
'Learmonth': {'id': '293', 'delta': 600, 'frames': 4},
'Longreach': {'id': '563', 'delta': 600, 'frames': 4},
'Mackay': {'id': '223', 'delta': 600, 'frames': 4},
'Marburg': {'id': '503', 'delta': 600, 'frames': 4},
'Melbourne': {'id': '023', 'delta': 360, 'frames': 6},
'Mildura': {'id': '303', 'delta': 600, 'frames': 4},
'Moree': {'id': '533', 'delta': 600, 'frames': 4},
'MorningtonIs': {'id': '363', 'delta': 600, 'frames': 4},
'MountIsa': {'id': '753', 'delta': 360, 'frames': 6},
'MtGambier': {'id': '143', 'delta': 600, 'frames': 4},
'Namoi': {'id': '693', 'delta': 600, 'frames': 4},
'Newcastle': {'id': '043', 'delta': 360, 'frames': 6},
'Newdegate': {'id': '383', 'delta': 360, 'frames': 6},
'NorfolkIs': {'id': '623', 'delta': 600, 'frames': 4},
'NWTasmania': {'id': '523', 'delta': 360, 'frames': 6},
'Perth': {'id': '703', 'delta': 360, 'frames': 6},
'PortHedland': {'id': '163', 'delta': 600, 'frames': 4},
'SellicksHill': {'id': '463', 'delta': 600, 'frames': 4},
'SouthDoodlakine': {'id': '583', 'delta': 360, 'frames': 6},
'Sydney': {'id': '713', 'delta': 360, 'frames': 6},
'Townsville': {'id': '733', 'delta': 600, 'frames': 4},
'WaggaWagga': {'id': '553', 'delta': 600, 'frames': 4},
'Warrego': {'id': '673', 'delta': 600, 'frames': 4},
'Warruwi': {'id': '773', 'delta': 360, 'frames': 6},
'Watheroo': {'id': '793', 'delta': 360, 'frames': 6},
'Weipa': {'id': '783', 'delta': 360, 'frames': 6},
'WillisIs': {'id': '413', 'delta': 600, 'frames': 4},
'Wollongong': {'id': '033', 'delta': 360, 'frames': 6},
'Woomera': {'id': '273', 'delta': 600, 'frames': 4},
'Wyndham': {'id': '073', 'delta': 600, 'frames': 4},
'Yarrawonga': {'id': '493', 'delta': 360, 'frames': 6},
}
class BOMRadarLoop:
def __init__(self, location=None, radar_id=None, delta=None, frames=None, outfile=None, logger=None):
self._log = logger or logging.getLogger(__name__)
if isinstance(radar_id, int):
radar_id = '%03d' % radar_id
valids = ', '.join(sorted(RADARS.keys()))
if not radar_id and location not in RADARS:
location = 'Sydney'
self._log.error("Bad 'location' specified, using '%s' (valid locations are: %s)", location, valids)
if radar_id:
if location in RADARS:
radar_id = None
self._log.error("Valid 'location' specified, ignoring 'radar_id'")
elif location:
self._log.error("Bad 'location' specified, using ID %s (valid locations are: %s)", radar_id, valids)
if radar_id and not delta:
delta = 360
self._log.error("No 'delta' specified for radar ID %s, using %s", radar_id, delta)
if radar_id and not frames:
frames = 6
self._log.error("No 'frames' specified for radar ID %s, using %s", radar_id, frames)
self._location = location or 'ID %s' % radar_id
self._delta = delta or RADARS[location]['delta']
self._frames = frames or RADARS[location]['frames']
self._radar_id = radar_id or RADARS[location]['id']
self._outfile = outfile
self._t0 = 0
self._current = self.current
# Public methods
@property
def current(self):
'''
Return the current BOM radar-loop image.
'''
now = int(time.time())
t1 = now - (now % self._delta)
if t1 > self._t0:
self._t0 = t1
self._current = self._get_loop()
return self._current
# Private methods
def _get_background(self):
'''
Fetch the background map, then the topography, locations (e.g. city
names), and distance-from-radar range markings, and merge into a single
image.
'''
self._log.debug('Getting background for %s at %s', self._location, self._t0)
suffix0 = 'products/radar_transparencies/IDR%s.background.png'
url0 = self._get_url(suffix0 % self._radar_id)
background = self._get_image(url0)
if background is None:
return None
for layer in ('topography', 'locations', 'range'):
self._log.debug('Getting %s for %s at %s', layer, self._location, self._t0)
suffix1 = 'products/radar_transparencies/IDR%s.%s.png' % (self._radar_id, layer)
url1 = self._get_url(suffix1)
image = self._get_image(url1)
if image is not None:
background = PIL.Image.alpha_composite(background, image)
return background
def _get_frames(self):
'''
Fetch a radar image for each expected time, composite it with a common
background image, then overlay on the legend to produce a frame. Collect
and return the frames, ignoring any blanks. If no frames were produced,
return None (the caller must expect this).
'''
self._log.debug('Getting frames for %s at %s', self._location, self._t0)
bg = self._get_background()
legend = self._get_legend()
frames = []
if bg and legend:
for time_str in self._get_time_strs():
fg = self._get_wximg(time_str)
if fg is not None:
frames.append(legend.copy())
frames[-1].paste(PIL.Image.alpha_composite(bg, fg), (0, 0))
return frames or None
def _get_image(self, url): # pylint: disable=no-self-use
'''
Fetch an image from the BOM.
'''
self._log.debug('Getting image %s', url)
response = requests.get(url)
if response.status_code == 200:
image = PIL.Image.open(io.BytesIO(response.content))
rgba_img = image.convert('RGBA')
image.close()
return rgba_img
return None
def _get_legend(self):
'''
Fetch the BOM colorbar legend image.
'''
self._log.debug('Getting legend at %s', self._t0)
url = self._get_url('products/radar_transparencies/IDR.legend.0.png')
return self._get_image(url)
def _get_loop(self):
'''
Return an animated GIF comprising a set of frames, where each frame
includes a background, one or more supplemental layers, a colorbar
legend, and a radar image.
'''
self._log.info('Getting loop for %s at %s', self._location, self._t0)
loop = io.BytesIO()
frames = self._get_frames()
if frames is not None:
self._log.debug('Got %s frames for %s at %s', len(frames), self._location, self._t0)
frames[0].save(loop, append_images=frames[1:], duration=500, format='GIF', loop=0, save_all=True)
else:
self._log.warning('Got NO frames for %s at %s', self._location, self._t0)
PIL.Image.new('RGB', (512, 557)).save(loop, format='GIF')
if self._outfile:
outdir = os.path.dirname(self._outfile)
if not os.path.isdir(outdir):
try:
os.makedirs(outdir)
except OSError:
self._log.error('Could not create directory %s', outdir)
try:
with open(self._outfile, 'wb') as outfile:
outfile.write(loop.getvalue())
except IOError:
self._log.error('Could not write image to %s', self._outfile)
return loop.getvalue()
def _get_time_strs(self):
'''
Return a list of strings representing YYYYMMDDHHMM times for the most
recent set of radar images to be used to create the animated GIF.
'''
self._log.debug('Getting time strings starting at %s', self._t0)
frame_numbers = range(self._frames, 0, -1)
tz = dt.timezone.utc
f = lambda n: dt.datetime.fromtimestamp(self._t0 - (self._delta * n), tz=tz).strftime('%Y%m%d%H%M')
return [f(n) for n in frame_numbers]
def _get_url(self, path): # pylint: disable=no-self-use
self._log.debug('Getting URL for path %s', path)
return 'http://www.bom.gov.au/%s' % path
def _get_wximg(self, time_str):
'''
Return a radar weather image from the BOM website. Note that
get_image() returns None if the image could not be fetched, so the
caller must deal with that possibility.
'''
self._log.debug('Getting radar imagery for %s at %s', self._location, time_str)
suffix = 'radar/IDR%s.T.%s.png' % (self._radar_id, time_str)
url = self._get_url(suffix)
return self._get_image(url)
|
[
"io.BytesIO",
"os.makedirs",
"os.path.isdir",
"os.path.dirname",
"time.time",
"requests.get",
"datetime.datetime.fromtimestamp",
"logging.getLogger"
] |
[((7594, 7611), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (7606, 7611), False, 'import requests\n'), ((8434, 8446), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (8444, 8446), False, 'import io\n'), ((3917, 3944), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3934, 3944), False, 'import logging\n'), ((5374, 5385), 'time.time', 'time.time', ([], {}), '()\n', (5383, 5385), False, 'import time\n'), ((8938, 8968), 'os.path.dirname', 'os.path.dirname', (['self._outfile'], {}), '(self._outfile)\n', (8953, 8968), False, 'import os\n'), ((7687, 7715), 'io.BytesIO', 'io.BytesIO', (['response.content'], {}), '(response.content)\n', (7697, 7715), False, 'import io\n'), ((8988, 9009), 'os.path.isdir', 'os.path.isdir', (['outdir'], {}), '(outdir)\n', (9001, 9009), False, 'import os\n'), ((9052, 9071), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (9063, 9071), False, 'import os\n'), ((9827, 9887), 'datetime.datetime.fromtimestamp', 'dt.datetime.fromtimestamp', (['(self._t0 - self._delta * n)'], {'tz': 'tz'}), '(self._t0 - self._delta * n, tz=tz)\n', (9852, 9887), True, 'import datetime as dt\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 <NAME>, <NAME> <<EMAIL>>,
# <NAME> <<EMAIL>>, The University of Vermont
# <<EMAIL>>, github contributors
# Released under the MIT license, as given in the file LICENSE, which must
# accompany any distribution of this code.
import logging
from osgeo import ogr
from osgeo import osr
from .osm_geometries import OsmBoundary, OsmPoint, OsmWay, OsmRelation
class OsmData:
def __init__(self, translation, rounding_digits=7, max_points_in_way=1800, add_bounds=False):
# options
self.translation = translation
self.rounding_digits = rounding_digits
self.max_points_in_way = max_points_in_way
self.add_bounds = add_bounds
self.__bounds = OsmBoundary()
self.__nodes = []
self.__unique_node_index = {}
self.__ways = []
self.__relations = []
self.__long_ways_from_polygons = set()
def __get_layer_fields(self, layer):
layer_fields = []
layer_def = layer.GetLayerDefn()
for i in range(layer_def.GetFieldCount()):
field_def = layer_def.GetFieldDefn(i)
layer_fields.append((i, field_def.GetNameRef(), field_def.GetType()))
return layer_fields
# This function builds up a dictionary with the source data attributes
# and passes them to the filter_tags function, returning the result.
def __get_feature_tags(self, ogrfeature, layer_fields, source_encoding):
tags = {}
for (index, field_name, field_type) in layer_fields:
field_value = ''
if field_type == ogr.OFTString:
field_value = ogrfeature.GetFieldAsBinary(index).decode(source_encoding)
else:
field_value = ogrfeature.GetFieldAsString(index)
tags[field_name] = field_value.strip()
return self.translation.filter_tags(tags)
def __calc_bounds(self, ogrgeometry):
(minx, maxx, miny, maxy) = ogrgeometry.GetEnvelope()
self.__bounds.add_envelope(minx, maxx, miny, maxy)
def __round_number(self, n):
return int(round(n * 10**self.rounding_digits))
def __add_node(self, x, y, tags, is_way_member):
rx = self.__round_number(x)
ry = self.__round_number(y)
unique_node_id = None
if is_way_member:
unique_node_id = (rx, ry)
else:
unique_node_id = self.translation.get_unique_node_identifier(rx, ry, tags)
if unique_node_id in self.__unique_node_index:
return self.__nodes[self.__unique_node_index[unique_node_id]]
else:
node = OsmPoint(x, y, tags)
self.__unique_node_index[unique_node_id] = len(self.__nodes)
self.__nodes.append(node)
return node
def __add_way(self, tags):
way = OsmWay(tags)
self.__ways.append(way)
return way
def __add_relation(self, tags):
relation = OsmRelation(tags)
self.__relations.append(relation)
return relation
def __parse_point(self, ogrgeometry, tags):
return self.__add_node(ogrgeometry.GetX(), ogrgeometry.GetY(), tags, False)
def __parse_linestring(self, ogrgeometry, tags):
way = self.__add_way(tags)
# LineString.GetPoint() returns a tuple, so we can't call parsePoint on it
# and instead have to create the point ourself
previous_node_id = None
for i in range(ogrgeometry.GetPointCount()):
(x, y, z_unused) = ogrgeometry.GetPoint(i)
node = self.__add_node(x, y, {}, True)
if previous_node_id == None or previous_node_id != node.id:
way.points.append(node)
node.addparent(way)
previous_node_id = node.id
return way
def __parse_polygon(self, ogrgeometry, tags):
# Special case polygons with only one ring. This does not (or at least
# should not) change behavior when simplify relations is turned on.
if ogrgeometry.GetGeometryCount() == 0:
logging.warning("Polygon with no rings?")
elif ogrgeometry.GetGeometryCount() == 1:
result = self.__parse_linestring(ogrgeometry.GetGeometryRef(0), tags)
if len(result.points) > self.max_points_in_way:
self.__long_ways_from_polygons.add(result)
return result
else:
relation = self.__add_relation(tags)
try:
exterior = self.__parse_linestring(ogrgeometry.GetGeometryRef(0), {})
exterior.addparent(relation)
except:
logging.warning("Polygon with no exterior ring?")
return None
relation.members.append((exterior, "outer"))
for i in range(1, ogrgeometry.GetGeometryCount()):
interior = self.__parse_linestring(ogrgeometry.GetGeometryRef(i), {})
interior.addparent(relation)
relation.members.append((interior, "inner"))
return relation
def __parse_collection(self, ogrgeometry, tags):
# OGR MultiPolygon maps easily to osm multipolygon, so special case it
# TODO: Does anything else need special casing?
geometry_type = ogrgeometry.GetGeometryType()
if geometry_type in [ ogr.wkbMultiPolygon, ogr.wkbMultiPolygon25D ]:
if ogrgeometry.GetGeometryCount() > 1:
relation = self.__add_relation(tags)
for polygon in range(ogrgeometry.GetGeometryCount()):
ext_geom = ogrgeometry.GetGeometryRef(polygon).GetGeometryRef(0)
exterior = self.__parse_linestring(ext_geom, {})
exterior.addparent(relation)
relation.members.append((exterior, "outer"))
for i in range(1, ogrgeometry.GetGeometryRef(polygon).GetGeometryCount()):
int_geom = ogrgeometry.GetGeometryRef(polygon).GetGeometryRef(i)
interior = self.__parse_linestring(int_geom, {})
interior.addparent(relation)
relation.members.append((interior, "inner"))
return [ relation ]
else:
return [ self.__parse_polygon(ogrgeometry.GetGeometryRef(0), tags) ]
elif geometry_type in [ ogr.wkbMultiLineString, ogr.wkbMultiLineString25D ]:
geometries = []
for linestring in range(ogrgeometry.GetGeometryCount()):
geometries.append(self.__parse_linestring(ogrgeometry.GetGeometryRef(linestring), tags))
return geometries
else:
relation = self.__add_relation(tags)
for i in range(ogrgeometry.GetGeometryCount()):
member = self.__parse_geometry(ogrgeometry.GetGeometryRef(i), {})
member.addparent(relation)
relation.members.append((member, "member"))
return [ relation ]
def __parse_geometry(self, ogrgeometry, tags):
osmgeometries = []
geometry_type = ogrgeometry.GetGeometryType()
if geometry_type in [ ogr.wkbPoint, ogr.wkbPoint25D ]:
osmgeometries.append(self.__parse_point(ogrgeometry, tags))
elif geometry_type in [ ogr.wkbLineString, ogr.wkbLinearRing, ogr.wkbLineString25D ]:
# ogr.wkbLinearRing25D does not exist
osmgeometries.append(self.__parse_linestring(ogrgeometry, tags))
elif geometry_type in [ ogr.wkbPolygon, ogr.wkbPolygon25D ]:
osmgeometries.append(self.__parse_polygon(ogrgeometry, tags))
elif geometry_type in [ ogr.wkbMultiPoint, ogr.wkbMultiLineString, ogr.wkbMultiPolygon, \
ogr.wkbGeometryCollection, ogr.wkbMultiPoint25D, \
ogr.wkbMultiLineString25D, ogr.wkbMultiPolygon25D, \
ogr.wkbGeometryCollection25D ]:
osmgeometries.extend(self.__parse_collection(ogrgeometry, tags))
else:
logging.warning("Unhandled geometry, type %s" % str(geometry_type))
return osmgeometries
def add_feature(self, ogrfeature, layer_fields, source_encoding, reproject = lambda geometry: None):
ogrfilteredfeature = self.translation.filter_feature(ogrfeature, layer_fields, reproject)
if ogrfilteredfeature is None:
return
ogrgeometry = ogrfilteredfeature.GetGeometryRef()
if ogrgeometry is None:
return
feature_tags = self.__get_feature_tags(ogrfilteredfeature, layer_fields, source_encoding)
if feature_tags is None:
return
reproject(ogrgeometry)
if self.add_bounds:
self.__calc_bounds(ogrgeometry)
osmgeometries = self.__parse_geometry(ogrgeometry, feature_tags)
# TODO performance: run in __parse_geometry to avoid second loop
for osmgeometry in [ geom for geom in osmgeometries if geom ]:
self.translation.process_feature_post(osmgeometry, ogrfilteredfeature, ogrgeometry)
def __split_way(self, way, is_way_in_relation):
new_points = [ way.points[i:i + self.max_points_in_way] \
for i in range(0, len(way.points), self.max_points_in_way - 1) ]
new_ways = [ way ] + [ OsmWay(way.get_tags()) for i in range(len(new_points) - 1) ]
if not is_way_in_relation:
for new_way in new_ways[1:]:
self.__ways.append(new_way)
for new_way, points in zip(new_ways, new_points):
new_way.points = points
if new_way.id != way.id:
for point in points:
point.removeparent(way)
point.addparent(new_way)
return new_ways
def __merge_into_new_relation(self, way_parts):
new_relation = self.__add_relation({})
new_relation.members = [ (way, "outer") for way in way_parts ]
for way in way_parts:
way.addparent(new_relation)
def __split_way_in_relation(self, rel, way_parts):
way_roles = [ m[1] for m in rel.members if m[0] == way_parts[0] ]
way_role = "" if len(way_roles) == 0 else way_roles[0]
for way in way_parts[1:]:
way.addparent(rel)
rel.members.append((way, way_role))
def split_long_ways(self):
if self.max_points_in_way < 2:
# pointless :-)
return
logging.debug("Splitting long ways")
for way in self.__ways:
is_way_in_relation = len([ p for p in way.get_parents() if type(p) == OsmRelation ]) > 0
if len(way.points) > self.max_points_in_way:
way_parts = self.__split_way(way, is_way_in_relation)
if not is_way_in_relation:
if way in self.__long_ways_from_polygons:
self.__merge_into_new_relation(way_parts)
else:
for rel in way.get_parents():
self.__split_way_in_relation(rel, way_parts)
def process(self, datasource):
for i in range(datasource.get_layer_count()):
(layer, reproject) = datasource.get_layer(i)
if layer:
layer_fields = self.__get_layer_fields(layer)
for j in range(layer.GetFeatureCount()):
ogrfeature = layer.GetNextFeature()
self.add_feature(ogrfeature, layer_fields, datasource.source_encoding, reproject)
self.split_long_ways()
class DataWriterContextManager:
def __init__(self, datawriter):
self.datawriter = datawriter
def __enter__(self):
self.datawriter.open()
return self.datawriter
def __exit__(self, exception_type, value, traceback):
self.datawriter.close()
def output(self, datawriter):
self.translation.process_output(self.__nodes, self.__ways, self.__relations)
with self.DataWriterContextManager(datawriter) as dw:
dw.write_header(self.__bounds)
dw.write_nodes(self.__nodes)
dw.write_ways(self.__ways)
dw.write_relations(self.__relations)
dw.write_footer()
|
[
"logging.warning",
"logging.debug"
] |
[((10612, 10648), 'logging.debug', 'logging.debug', (['"""Splitting long ways"""'], {}), "('Splitting long ways')\n", (10625, 10648), False, 'import logging\n'), ((4136, 4177), 'logging.warning', 'logging.warning', (['"""Polygon with no rings?"""'], {}), "('Polygon with no rings?')\n", (4151, 4177), False, 'import logging\n'), ((4702, 4751), 'logging.warning', 'logging.warning', (['"""Polygon with no exterior ring?"""'], {}), "('Polygon with no exterior ring?')\n", (4717, 4751), False, 'import logging\n')]
|
import json
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from ribo_api.models.usertypes import TinyIntegerField
from .usertypes import NormalTextField
class UserActivityLog(models.Model):
id = models.AutoField(primary_key=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
ip = models.GenericIPAddressField()
action = models.CharField(_('Action'), max_length=6)
status = models.SmallIntegerField(_('Request status code'), default=200)
url = models.CharField(_('Url'), max_length=2000, default='')
meta = NormalTextField(_('Meta data'), default='{}')
created_at = models.DateTimeField(default=timezone.now)
latest_at = models.DateTimeField(default=timezone.now)
device_type = TinyIntegerField(default=0)
@property
def meta_json(self):
if self.meta:
return json.loads(self.meta)
return {}
class Meta:
verbose_name = _('activity_log')
verbose_name_plural = _('activity_logs')
db_table = 'ribo_user_activity_logs'
|
[
"json.loads",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.GenericIPAddressField",
"ribo_api.models.usertypes.TinyIntegerField",
"django.db.models.DateTimeField",
"django.utils.translation.ugettext_lazy"
] |
[((307, 341), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (323, 341), False, 'from django.db import models\n'), ((353, 396), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (370, 396), False, 'from django.db import models\n'), ((406, 436), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', ([], {}), '()\n', (434, 436), False, 'from django.db import models\n'), ((711, 753), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (731, 753), False, 'from django.db import models\n'), ((770, 812), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (790, 812), False, 'from django.db import models\n'), ((831, 858), 'ribo_api.models.usertypes.TinyIntegerField', 'TinyIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (847, 858), False, 'from ribo_api.models.usertypes import TinyIntegerField\n'), ((467, 478), 'django.utils.translation.ugettext_lazy', '_', (['"""Action"""'], {}), "('Action')\n", (468, 478), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((532, 556), 'django.utils.translation.ugettext_lazy', '_', (['"""Request status code"""'], {}), "('Request status code')\n", (533, 556), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((598, 606), 'django.utils.translation.ugettext_lazy', '_', (['"""Url"""'], {}), "('Url')\n", (599, 606), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((664, 678), 'django.utils.translation.ugettext_lazy', '_', (['"""Meta data"""'], {}), "('Meta data')\n", (665, 678), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1024, 1041), 'django.utils.translation.ugettext_lazy', '_', (['"""activity_log"""'], {}), "('activity_log')\n", (1025, 1041), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1072, 1090), 'django.utils.translation.ugettext_lazy', '_', (['"""activity_logs"""'], {}), "('activity_logs')\n", (1073, 1090), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((944, 965), 'json.loads', 'json.loads', (['self.meta'], {}), '(self.meta)\n', (954, 965), False, 'import json\n')]
|
import os
import re
from collections import defaultdict
input_file = open(os.path.join(os.path.dirname(__file__), 'day6_input.txt'), 'r')
min_x = -1
min_y = -1
max_x = -1
max_y = -1
points = []
for line in input_file:
matcher = re.match("(\d+),\s(\d+)", line)
if matcher is not None:
y = int(matcher.group(1))
x = int(matcher.group(2))
if x > max_x:
max_x = x
if y > max_y:
max_y = y
if min_x == -1 or min_y == -1:
min_x = x
min_y = y
else:
if x < min_x:
min_x = x
if y < min_y:
min_y = y
point_string = f"{y},{x}"
points.append(point_string)
def calculate_distances(minx, maxx, miny, maxy):
distances = defaultdict(lambda:1)
for x_point in range(minx, maxx+1):
for y_point in range(miny, maxy+1):
point_string = f"{y_point},{x_point}"
if point_string not in points:
min_point_value = 100
min_point = None
for point in points:
matcher = re.match("(\d+),(\d+)", point)
if matcher is not None:
y = int(matcher.group(1))
x = int(matcher.group(2))
current_point_value = abs(x - x_point) + abs(y - y_point)
if current_point_value < min_point_value:
min_point_value = current_point_value
min_point = point
else:
raise ValueError(f"Formatting was wrong for {point}")
for point in points:
matcher = re.match("(\d+),(\d+)", point)
if matcher is not None:
y = int(matcher.group(1))
x = int(matcher.group(2))
current_point_value = abs(x_point - x) + abs(y_point -y)
if point != min_point and current_point_value == min_point_value:
min_point = None
else:
raise ValueError(f"Formatting was wrong for {point}")
if min_point is not None:
distances[min_point] += 1
return distances
print(f"Grid dimensions: {min_x}, {min_y} to {max_x}, {max_y}")
max_point_area = 0
max_point = None
orig_distances = calculate_distances(min_x, max_x, min_y, max_y)
bigger_distances = calculate_distances(min_x -1, max_x +1, min_y -1, max_y+1)
distances = dict()
for distance_key in orig_distances:
if orig_distances[distance_key] == bigger_distances[distance_key]:
distances[distance_key] = orig_distances[distance_key]
for point in distances.keys():
if distances[point] > max_point_area:
max_point = point
max_point_area = distances[point]
print(f"{point} = {distances[point]}")
print(f"Max point is {max_point} with distance {max_point_area}")
|
[
"collections.defaultdict",
"os.path.dirname",
"re.match"
] |
[((234, 268), 're.match', 're.match', (['"""(\\\\d+),\\\\s(\\\\d+)"""', 'line'], {}), "('(\\\\d+),\\\\s(\\\\d+)', line)\n", (242, 268), False, 'import re\n'), ((788, 811), 'collections.defaultdict', 'defaultdict', (['(lambda : 1)'], {}), '(lambda : 1)\n', (799, 811), False, 'from collections import defaultdict\n'), ((88, 113), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (103, 113), False, 'import os\n'), ((1125, 1157), 're.match', 're.match', (['"""(\\\\d+),(\\\\d+)"""', 'point'], {}), "('(\\\\d+),(\\\\d+)', point)\n", (1133, 1157), False, 'import re\n'), ((1731, 1763), 're.match', 're.match', (['"""(\\\\d+),(\\\\d+)"""', 'point'], {}), "('(\\\\d+),(\\\\d+)', point)\n", (1739, 1763), False, 'import re\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.sparrows import sparrows
def test_sparrows():
"""Test module sparrows.py by downloading
sparrows.csv and testing shape of
extracted data has 116 rows and 3 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = sparrows(test_path)
try:
assert x_train.shape == (116, 3)
except:
shutil.rmtree(test_path)
raise()
|
[
"observations.r.sparrows.sparrows",
"shutil.rmtree",
"tempfile.mkdtemp"
] |
[((366, 384), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (382, 384), False, 'import tempfile\n'), ((407, 426), 'observations.r.sparrows.sparrows', 'sparrows', (['test_path'], {}), '(test_path)\n', (415, 426), False, 'from observations.r.sparrows import sparrows\n'), ((485, 509), 'shutil.rmtree', 'shutil.rmtree', (['test_path'], {}), '(test_path)\n', (498, 509), False, 'import shutil\n')]
|
#!/usr/bin/env python3
###################################
# Mastering ML Python Mini Course
#
# Inspired by the project here:
#
# https://s3.amazonaws.com/MLMastery/machine_learning_mastery_with_python_mini_course.pdf?__s=mxhvphowryg2sfmzus2q
#
# By <NAME>
#
# Project will soon be found at:
#
# https://www.inertia7.com/projects/
####################################
# Welcome to my repo for the Mastering Machine Learning Python Mini Course
# Here I will be going through each part of the course
# So you can get a feel of the different parts
import numpy as np
import pandas as pd
from pandas import read_csv, Series
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import GradientBoostingClassifier, VotingClassifier
from sklearn.model_selection import cross_val_score, KFold, train_test_split
# Define url and columns
url = 'https://goo.gl/bDdBiA'
columns = np.array(['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class'])
# Read in data
data = read_csv(url, names = columns)
array = data.values
# Divide data into attributes and predictor
X = array[:, 0:8]
y = array[:, 8]
####################################
# Lesson 11: Improve Accuracy with Ensemble Methods
####################################
'''
Here in the course would have been a section to do some ensemble model
training, as it represents an extra layer on top of traditional models
But since I have already done this,
I will instead invoke the one ensemble method I haven't tried:
The Voting Classifier
This method involves literally combining different models
(such as Logsitic Regression + Decision Tree) versus many of the same models
(many Decision Trees in a Random Forest or Gradient Boosted Machine)
Here I will try out a bunch of different things and see where it goes!
Will use cross validation metrics here, nothing too fancy
'''
# Make list for models
models = np.empty([3, 2], dtype = object)
# Voting ensembles
# Number 1: Hard Vote (Predicted class labels used for majority rule voting)
models[0] = ['Voting Classifier 1', VotingClassifier(estimators = [
('lr', LogisticRegression(random_state = 1)),
('gbm', GradientBoostingClassifier(random_state = 1)),],
voting = 'hard')]
# Number 2: Soft Vote (Argmax of sums of predicted probabilities used)
# Recommended for ensemble of well-calibrated classifiers
models[1] = ['Voting Classifier 2', VotingClassifier(estimators = [
('lda', LinearDiscriminantAnalysis()),
('lr', LogisticRegression(random_state = 1))],
voting = 'soft')]
# Number 3: Soft Vote with weights
# Some models will be more valuable than others
models[2] = ['Voting Classifier 3', VotingClassifier(estimators = [
('lr', LogisticRegression(random_state = 1)),
('gbm', GradientBoostingClassifier(random_state = 1)),],
voting = 'soft',
weights = (0.25, 0.75))]
# Iterate through models, then fit & evaluate
for name, model in models:
k_fold = KFold(n_splits = 10, random_state = 1)
for scoring in ('accuracy', 'roc_auc', 'neg_log_loss'):
try:
result = cross_val_score(model, X, y, cv = k_fold, scoring = scoring)
if scoring == 'accuracy':
print("\n%s of %s model:\n %.3f%% (+\-%.3f%%)" %
(scoring, name, result.mean() * 100.0, result.std() * 100.0))
else:
print("\n%s of %s model:\n %.3f (+\-%.3f)" %
(scoring, name, result.mean(), result.std()))
except AttributeError:
print("The %s model cannot perform cross validation with the %s metric" % (name, scoring))
|
[
"pandas.read_csv",
"numpy.empty",
"sklearn.model_selection.cross_val_score",
"sklearn.model_selection.KFold",
"sklearn.ensemble.GradientBoostingClassifier",
"sklearn.linear_model.LogisticRegression",
"numpy.array",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis"
] |
[((963, 1049), 'numpy.array', 'np.array', (["['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']"], {}), "(['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age',\n 'class'])\n", (971, 1049), True, 'import numpy as np\n'), ((1069, 1097), 'pandas.read_csv', 'read_csv', (['url'], {'names': 'columns'}), '(url, names=columns)\n', (1077, 1097), False, 'from pandas import read_csv, Series\n'), ((1969, 1999), 'numpy.empty', 'np.empty', (['[3, 2]'], {'dtype': 'object'}), '([3, 2], dtype=object)\n', (1977, 1999), True, 'import numpy as np\n'), ((3016, 3050), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)', 'random_state': '(1)'}), '(n_splits=10, random_state=1)\n', (3021, 3050), False, 'from sklearn.model_selection import cross_val_score, KFold, train_test_split\n'), ((3150, 3206), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X', 'y'], {'cv': 'k_fold', 'scoring': 'scoring'}), '(model, X, y, cv=k_fold, scoring=scoring)\n', (3165, 3206), False, 'from sklearn.model_selection import cross_val_score, KFold, train_test_split\n'), ((2178, 2212), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(1)'}), '(random_state=1)\n', (2196, 2212), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2229, 2271), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (2255, 2271), False, 'from sklearn.ensemble import GradientBoostingClassifier, VotingClassifier\n'), ((2510, 2538), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {}), '()\n', (2536, 2538), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((2552, 2586), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(1)'}), '(random_state=1)\n', (2570, 2586), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2777, 2811), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(1)'}), '(random_state=1)\n', (2795, 2811), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2828, 2870), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (2854, 2870), False, 'from sklearn.ensemble import GradientBoostingClassifier, VotingClassifier\n')]
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil.types.symbolic import any_symbolic
from coremltools.converters.mil.mil import get_new_symbol, get_new_variadic_symbol
from ._op_reqs import *
"""
Random Op Superclass
"""
class RandomDistribution(Operation):
input_spec = InputSpec(shape=IntTensorInputType(),)
def __init__(self, **kwargs):
super(RandomDistribution, self).__init__(**kwargs)
def type_inference(self):
if any_symbolic(self.shape.shape):
# We can't infer any shape if shape has variable length.
return types.tensor(types.fp32, (get_new_variadic_symbol(),))
# shape has fixed length here.
if self.shape.sym_val is None:
shape = tuple([get_new_symbol() for _ in range(self.shape.shape[0])])
return types.tensor(types.fp32, shape)
return types.tensor(types.fp32, tuple(self.shape.sym_val.tolist()))
"""
Random Op Implementation(s)
"""
@register_op(
doc_str=r"""
Returns a tensor with specified shape with random values from a Bernoulli distribution.
.. math::
f(k) = \begin{cases}1-p &\text{if } k = 0\\
p &\text{if } k = 1\end{cases}
for :math:`k` in :math:`\{0, 1\}`.
Parameters
----------
shape: <K, i32>, required
Target output tensor shape.
K is the rank of the output tensor. shape[k] > 0 for k = 0,..., K-1.
prob: const<f32>, optional
The probability of sampling 1. Defaults to 0.5.
seed: const<i32>, optional
Seed to create a reproducible sequence of values across multiple invokes.
Returns
-------
<*, T>, a tensor of given target output shape filled with random values.
See Also
--------
random_categorical, random_normal, random_uniform
"""
)
class random_bernoulli(RandomDistribution):
input_spec = (
InputSpec(
shape=IntTensorInputType(),
prob=FloatInputType(const=True, default=0.5),
seed=IntInputType(const=True, default=-1),
)
+ RandomDistribution.input_spec
)
def __init__(self, **kwargs):
super(random_bernoulli, self).__init__(**kwargs)
@register_op(
doc_str=r"""
Returns random values from a categorical distribution.
Parameters
----------
shape: <*D_in, T>
N-dimensional tensor, one of logits (event log-probabilities) or probs
(event probabilities). The first N - 1 dimensions specifies distributions,
the last dimension represents a vector of probabilities.
mode: const<str>, optional
One of ['logits', 'probs']. Defaults to 'logits'.
size: const<i32>, optional
Number of samples to draw. Defaults to 1.
seed: const<i32>, optional
Seed to create a reproducible sequence of values across multiple invokes.
Returns
-------
<*D_in[:-1] + [size], T>, a tensor of given target output shape filled with random values.
See Also
--------
random_bernoulli, random_normal, random_uniform
"""
)
class random_categorical(Operation):
input_spec = InputSpec(
x=TensorInputType(),
mode=StringInputType(const=True, default="logits"),
size=IntInputType(const=True, default=1),
seed=IntInputType(const=True, default=-1),
)
def __init__(self, **kwargs):
super(random_categorical, self).__init__(**kwargs)
def type_inference(self):
output_shape = self.x.shape[:-1] + (self.size.val,)
return types.tensor(types.fp32, output_shape)
@register_op(
doc_str=r"""
Returns a tensor with specified shape with random values from a normal distribution.
.. math::
f(x) = \frac{\exp(-x^2/2)}{\sqrt{2\pi}}
for a real number :math:`x`.
Parameters
----------
shape: <K, i32>, required
Target output tensor shape.
K is the rank of the output tensor. shape[k] > 0 for k = 0,..., K-1.
mean: const<f32>, optional
The mean (center) of the normal distribution. Defaults to 0.0.
stddev: const<f32>, optional
The standard deviation (width) of the normal distribution. Defaults to 1.0.
seed: const<i32>, optional
Seed to create a reproducible sequence of values across multiple invokes.
Returns
-------
<*, T>, a tensor of given target output shape filled with random values.
See Also
--------
random_categorical, random_bernoulli, random_uniform
"""
)
class random_normal(RandomDistribution):
input_spec = (
InputSpec(
shape=IntTensorInputType(),
mean=FloatInputType(const=True, default=0.0),
stddev=FloatInputType(const=True, default=1.0),
seed=IntInputType(const=True, default=-1),
)
+ RandomDistribution.input_spec
)
def __init__(self, **kwargs):
super(random_normal, self).__init__(**kwargs)
@register_op(
doc_str=r"""
Returns a tensor with specified shape with random values from a normal distribution.
.. math::
p(x) = \frac{1}{high - low}
for a real number :math:`x`.
Parameters
----------
shape: <K, i32>, required
Target output tensor shape.
K is the rank of the output tensor. shape[k] > 0 for k = 0,..., K-1.
low: const<f32>, optional
Lower boundary of the output interval (inclusive). Defaults to 0.0.
high: const<f32>, optional
Upper boundary of the output interval (exclusive). Defaults to 1.0.
seed: const<i32>, optional
Seed to create a reproducible sequence of values across multiple invokes.
Returns
-------
<*, T>, a tensor of given target output shape filled with random values.
See Also
--------
random_categorical, random_bernoulli, random_normal
"""
)
class random_uniform(RandomDistribution):
input_spec = (
InputSpec(
shape=IntTensorInputType(),
low=FloatInputType(const=True, default=0.0),
high=FloatInputType(const=True, default=1.0),
seed=IntInputType(const=True, default=-1),
)
+ RandomDistribution.input_spec
)
def __init__(self, **kwargs):
super(random_uniform, self).__init__(**kwargs)
|
[
"coremltools.converters.mil.mil.get_new_variadic_symbol",
"coremltools.converters.mil.mil.types.symbolic.any_symbolic",
"coremltools.converters.mil.mil.get_new_symbol"
] |
[((658, 688), 'coremltools.converters.mil.mil.types.symbolic.any_symbolic', 'any_symbolic', (['self.shape.shape'], {}), '(self.shape.shape)\n', (670, 688), False, 'from coremltools.converters.mil.mil.types.symbolic import any_symbolic\n'), ((804, 829), 'coremltools.converters.mil.mil.get_new_variadic_symbol', 'get_new_variadic_symbol', ([], {}), '()\n', (827, 829), False, 'from coremltools.converters.mil.mil import get_new_symbol, get_new_variadic_symbol\n'), ((939, 955), 'coremltools.converters.mil.mil.get_new_symbol', 'get_new_symbol', ([], {}), '()\n', (953, 955), False, 'from coremltools.converters.mil.mil import get_new_symbol, get_new_variadic_symbol\n')]
|
from Ranking.src.Ranker import Ranker, add_player
def test_update(file, update_text, expected):
res = Ranker(file, update_text)
assert res == expected, "Update failed\ngot:\n" + str(res) + "\nexpected:\n" + expected
def test_add(file, player, expected):
res = add_player(file, player)
assert res == expected, "Update failed\ngot:\n" + str(res) + "\nexpected:\n" + expected
if __name__ == "__main__":
test_add("test_files/empty.json","youssef","{'players': [{'name': 'youssef', 'rank': 0, 'points': 0}]}")
|
[
"Ranking.src.Ranker.Ranker",
"Ranking.src.Ranker.add_player"
] |
[((108, 133), 'Ranking.src.Ranker.Ranker', 'Ranker', (['file', 'update_text'], {}), '(file, update_text)\n', (114, 133), False, 'from Ranking.src.Ranker import Ranker, add_player\n'), ((276, 300), 'Ranking.src.Ranker.add_player', 'add_player', (['file', 'player'], {}), '(file, player)\n', (286, 300), False, 'from Ranking.src.Ranker import Ranker, add_player\n')]
|
import numpy as np
from kinematics import to_robot_velocities
from viz.env import Viz
class ControlSignalsViz(Viz):
def __init__(self, marxbot, time_window=10):
super().__init__()
self.marxbot = marxbot
self.marxbot_max_vel = 30
self.time_window = time_window
def _show(self, env):
self.ax = env.get_axes()
self.ax.set_title('Control signals over time')
self.ax.set_xlabel("time [s]")
self.ax.set_xlim(-self.time_window, 0)
self.ax.grid(True)
self.n_dims = 2
self.n_samples = round(self.time_window / env.refresh_interval)
self.time = np.linspace(-self.time_window, 0, self.n_samples)
self.readings = np.full((self.n_dims, self.n_samples), np.nan)
labels = ["linear velocity [cm/s]", "angular velocity [rad/s]"]
colors = ["tab:blue", "tab:orange"]
mins = [-self.marxbot_max_vel, -10]
maxs = [+self.marxbot_max_vel, +10]
self.plots = []
for i in range(self.n_dims):
ax = self.ax
if i > 0:
ax = ax.twinx()
ax.set_ylabel(labels[i], color=colors[i])
ax.tick_params(axis='y', labelcolor=colors[i])
ax.tick_params(labelsize=8)
plot = ax.plot(self.time, self.readings[i], color=colors[i])[0]
ax.set_ylim(
mins[i] - 0.1 * abs(mins[i]),
maxs[i] + 0.1 * abs(maxs[i])
)
self.plots.append(plot)
def _update(self):
robot_velocities = to_robot_velocities(*self.marxbot.wheel_target_speeds)
self.readings = np.roll(self.readings, -1, axis=1)
self.readings[:, -1] = robot_velocities
for i in range(self.n_dims):
self.plots[i].set_ydata(self.readings[i])
|
[
"numpy.full",
"kinematics.to_robot_velocities",
"numpy.linspace",
"numpy.roll"
] |
[((648, 697), 'numpy.linspace', 'np.linspace', (['(-self.time_window)', '(0)', 'self.n_samples'], {}), '(-self.time_window, 0, self.n_samples)\n', (659, 697), True, 'import numpy as np\n'), ((722, 768), 'numpy.full', 'np.full', (['(self.n_dims, self.n_samples)', 'np.nan'], {}), '((self.n_dims, self.n_samples), np.nan)\n', (729, 768), True, 'import numpy as np\n'), ((1567, 1621), 'kinematics.to_robot_velocities', 'to_robot_velocities', (['*self.marxbot.wheel_target_speeds'], {}), '(*self.marxbot.wheel_target_speeds)\n', (1586, 1621), False, 'from kinematics import to_robot_velocities\n'), ((1647, 1681), 'numpy.roll', 'np.roll', (['self.readings', '(-1)'], {'axis': '(1)'}), '(self.readings, -1, axis=1)\n', (1654, 1681), True, 'import numpy as np\n')]
|
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
classes = ('beaver','dolphin','otter','seal','whale','aquarium fish','flatfish','ray','shark','trout','orchids','poppies','roses','sunflowers','tulips','bottles','bowls','cans','cups','plates','apples','mushrooms','oranges','pears','sweet peppers','clock','computer keyboard','lamp','telephone','television','bed','chair','couch','table','wardrobe','bee','beetle','butterfly','caterpillar','cockroach','bear','leopard','lion','tiger','wolf','bridge','castle','house','road','skyscraper','cloud','forest','mountain','plain','sea','camel','cattle','chimpanzee','elephant','kangaroo','fox','porcupine','possum','raccoon','skunk','crab','lobster','snail','spider','worm','baby','boy','girl','man','woman','crocodile','dinosaur','lizard','snake','turtle','hamster','mouse','rabbit','shrew','squirrel','maple','oak','palm','pine','willow','bicycle','bus','motorcycle','pickup truck','train','lawn-mower','rocket','streetcar','tank','tractor')
def _get_transform():
return transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
def get_train_data_loader():
transform = _get_transform()
trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
download=True, transform=transform)
return torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
def get_test_data_loader():
transform = _get_transform()
testset = torchvision.datasets.CIFAR100(root='./data', train=False,
download=True, transform=transform)
return torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
# function to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
|
[
"torch.utils.data.DataLoader",
"numpy.transpose",
"torchvision.datasets.CIFAR100",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor"
] |
[((1293, 1389), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), "(root='./data', train=True, download=True,\n transform=transform)\n", (1322, 1389), False, 'import torchvision\n'), ((1437, 1522), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': '(4)', 'shuffle': '(True)', 'num_workers': '(2)'}), '(trainset, batch_size=4, shuffle=True, num_workers=2\n )\n', (1464, 1522), False, 'import torch\n'), ((1641, 1738), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform'}), "(root='./data', train=False, download=True,\n transform=transform)\n", (1670, 1738), False, 'import torchvision\n'), ((1785, 1870), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(4)', 'shuffle': '(False)', 'num_workers': '(2)'}), '(testset, batch_size=4, shuffle=False, num_workers=2\n )\n', (1812, 1870), False, 'import torch\n'), ((2039, 2069), 'numpy.transpose', 'np.transpose', (['npimg', '(1, 2, 0)'], {}), '(npimg, (1, 2, 0))\n', (2051, 2069), True, 'import numpy as np\n'), ((1125, 1146), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1144, 1146), True, 'import torchvision.transforms as transforms\n'), ((1153, 1207), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1173, 1207), True, 'import torchvision.transforms as transforms\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 19 12:14:06 2018
@author: Admin
"""
#from pandas import Series
#from statsmodels.graphics.tsaplots import plot_acf
#from statsmodels.graphics.tsaplots import plot_pacf
#from matplotlib import pyplot
#from pandas import DataFrame
#from pandas import read_csv
#from pandas import datetime
#
#def parser(x):
# return datetime.strptime(x, '%Y-%m-%d')
#
#series = read_csv('recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
##print(series.head())
#
#pyplot.figure(figsize=(30,10))
#pyplot.subplot(211)
#plot_acf(series, ax=pyplot.gca())
#pyplot.subplot(212)
#plot_pacf(series, ax=pyplot.gca())
#pyplot.show()
import warnings
#from pandas import Series
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
from math import sqrt
from pandas import datetime
from pandas import read_csv
# evaluate an ARIMA model for a given order (p,d,q) and return RMSE
def evaluate_arima_model(X, arima_order):
# prepare training dataset
X = X.astype('float32')
train_size = int(len(X) * 0.50)
train, test = X[0:train_size], X[train_size:]
history = [x for x in train]
# make predictions
predictions = list()
for t in range(len(test)):
model = ARIMA(history, order=arima_order)
model_fit = model.fit(disp=0)
yhat = model_fit.forecast()[0]
predictions.append(yhat)
history.append(test[t])
# calculate out of sample error
mse = mean_squared_error(test, predictions)
rmse = sqrt(mse)
return rmse
# evaluate combinations of p, d and q values for an ARIMA model
def evaluate_models(dataset, p_values, d_values, q_values):
dataset = dataset.astype('float32')
best_score, best_cfg = float("inf"), None
for p in p_values:
for d in d_values:
for q in q_values:
order = (p,d,q)
try:
mse = evaluate_arima_model(dataset, order)
if mse < best_score:
best_score, best_cfg = mse, order
print('ARIMA%s MSE=%.3f' % (order,mse))
except:
continue
print('Best ARIMA%s MSE=%.3f' % (best_cfg, best_score))
# load dataset
def parser(x):
return datetime.strptime(x, '%Y-%m-%d')
series = read_csv('data/recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# evaluate parameters
p_values = range(0,13)
d_values = range(0, 4)
q_values = range(0, 13)
warnings.filterwarnings("ignore")
evaluate_models(series.values, p_values, d_values, q_values)
|
[
"statsmodels.tsa.arima_model.ARIMA",
"math.sqrt",
"warnings.filterwarnings",
"pandas.read_csv",
"pandas.datetime.strptime",
"sklearn.metrics.mean_squared_error"
] |
[((2148, 2258), 'pandas.read_csv', 'read_csv', (['"""data/recom_train.csv"""'], {'header': '(0)', 'parse_dates': '[0]', 'index_col': '(0)', 'squeeze': '(True)', 'date_parser': 'parser'}), "('data/recom_train.csv', header=0, parse_dates=[0], index_col=0,\n squeeze=True, date_parser=parser)\n", (2156, 2258), False, 'from pandas import read_csv\n'), ((2348, 2381), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (2371, 2381), False, 'import warnings\n'), ((1456, 1493), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['test', 'predictions'], {}), '(test, predictions)\n', (1474, 1493), False, 'from sklearn.metrics import mean_squared_error\n'), ((1502, 1511), 'math.sqrt', 'sqrt', (['mse'], {}), '(mse)\n', (1506, 1511), False, 'from math import sqrt\n'), ((2105, 2137), 'pandas.datetime.strptime', 'datetime.strptime', (['x', '"""%Y-%m-%d"""'], {}), "(x, '%Y-%m-%d')\n", (2122, 2137), False, 'from pandas import datetime\n'), ((1264, 1297), 'statsmodels.tsa.arima_model.ARIMA', 'ARIMA', (['history'], {'order': 'arima_order'}), '(history, order=arima_order)\n', (1269, 1297), False, 'from statsmodels.tsa.arima_model import ARIMA\n')]
|
import os
import sys
ROOT_DIR = os.path.dirname(os.path.dirname(os.getcwd()))
if ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR)
import numpy as np
import tensorflow as tf
from DeepSparseCoding.tf1x.analysis.base_analyzer import Analyzer
"""
Test for activity triggered analysis
NOTE: Should be executed from the repository's root directory
"""
class ActivityTriggeredAverageTest(tf.test.TestCase):
def testBasic(self):
rand_state = np.random.RandomState(1234)
rand_mean = 2.0
rand_var = 10
num_images = 50
num_pixels = 12
num_neurons = 24
base_analyzer = Analyzer()
model_weights = rand_state.normal(loc=0.0, scale=1.0, size=(num_pixels, num_neurons))
images = rand_state.normal(loc=rand_mean, scale=rand_var, size=[num_images, num_pixels])
# Batch size is greater than num images (shouldn't use batches)
batch_size = 100
atas_1 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size)
# Batch size is less than num images, but divides evenly
batch_size = 10
atas_2 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size)
# Batch size is less than num_images, but does not divide evenly
batch_size = 13
atas_3 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size)
self.assertAllClose(atas_1, atas_2, rtol=1e-06, atol=1e-06)
self.assertAllClose(atas_1, atas_3, rtol=1e-06, atol=1e-06)
if __name__ == "__main__":
tf.test.main()
|
[
"sys.path.append",
"tensorflow.test.main",
"os.getcwd",
"numpy.random.RandomState",
"DeepSparseCoding.tf1x.analysis.base_analyzer.Analyzer",
"numpy.dot"
] |
[((108, 133), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (123, 133), False, 'import sys\n'), ((1484, 1498), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (1496, 1498), True, 'import tensorflow as tf\n'), ((65, 76), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (74, 76), False, 'import os\n'), ((448, 475), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (469, 475), True, 'import numpy as np\n'), ((596, 606), 'DeepSparseCoding.tf1x.analysis.base_analyzer.Analyzer', 'Analyzer', ([], {}), '()\n', (604, 606), False, 'from DeepSparseCoding.tf1x.analysis.base_analyzer import Analyzer\n'), ((928, 957), 'numpy.dot', 'np.dot', (['images', 'model_weights'], {}), '(images, model_weights)\n', (934, 957), True, 'import numpy as np\n'), ((1101, 1130), 'numpy.dot', 'np.dot', (['images', 'model_weights'], {}), '(images, model_weights)\n', (1107, 1130), True, 'import numpy as np\n'), ((1282, 1311), 'numpy.dot', 'np.dot', (['images', 'model_weights'], {}), '(images, model_weights)\n', (1288, 1311), True, 'import numpy as np\n')]
|
"""
Copyright 2021 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Helper methods for Gym environment registration."""
import logging
from gym.envs import registration as gym_reg
def register(env_id: str, class_path: str, **kwargs):
"""Registers the given class path as a Gym environment.
Args:
env_id: The ID to register the environment as.
class_path: The fully-qualified class path of the environment.
**kwargs: Key-word arguments to pass to gym's register function.
"""
if env_id in gym_reg.registry.env_specs:
# This may happen during test discovery.
logging.warning('Re-registering environment %s', env_id)
del gym_reg.registry.env_specs[env_id]
gym_reg.register(env_id, entry_point=class_path, **kwargs)
|
[
"logging.warning",
"gym.envs.registration.register"
] |
[((1224, 1282), 'gym.envs.registration.register', 'gym_reg.register', (['env_id'], {'entry_point': 'class_path'}), '(env_id, entry_point=class_path, **kwargs)\n', (1240, 1282), True, 'from gym.envs import registration as gym_reg\n'), ((1115, 1171), 'logging.warning', 'logging.warning', (['"""Re-registering environment %s"""', 'env_id'], {}), "('Re-registering environment %s', env_id)\n", (1130, 1171), False, 'import logging\n')]
|
# -*- coding: utf-8 -*-
# API - cs
# FileName: download.py
# Version: 1.0.0
# Create: 2018-10-27
# Modify: 2018-11-07
import mimetypes
from .auth import OSS
from .util import Check
from act import StoreData
from .upload import FolderFile, Source
from .exception import CSCommonErr, CSDownloadErr
class Download(object):
def __init__(self, act=None, app=None):
"""
:param StoreData act:
:param StoreData app:
"""
self.act = act
self.app = app
def normal(self, content_type, expires, folder_file, intranet, source_file):
"""
:param str or None content_type: Content type in headers
:param int or None expires: Url expires
:param str folder_file: Eg: folder/${FolderId}/.../t=${CreateTime}&n=${FileName}&i=${FileId}
:param bool intranet: Return intranet url
:param str source_file: Eg: source/${FileId}.source.cs
:return:
"""
headers = None
if content_type is not None:
if mimetypes.guess_extension(content_type) is not None:
headers = {'Content-Type': content_type}
if not Check.download_expires(expires):
return CSDownloadErr.EXPIRES_LIMIT
if not folder_file.startswith('folder/'):
return CSCommonErr.INVALID_FOLDER
if not source_file.startswith('source/'):
return CSCommonErr.INVALID_SOURCE
appid = self.act.dict['PassiveParty']
if source_file is not None:
source = Source(appid, suffix=source_file)
else:
source = FolderFile(appid, suffix=folder_file).source
oss = OSS(intranet=intranet, extranet=(not intranet))
url = oss.sign_url('GET', source.key, expires, headers, intranet=intranet)
return {
'errcode': 0,
'url': url,
'headers': headers,
'source': source.suffix,
}
|
[
"mimetypes.guess_extension"
] |
[((1055, 1094), 'mimetypes.guess_extension', 'mimetypes.guess_extension', (['content_type'], {}), '(content_type)\n', (1080, 1094), False, 'import mimetypes\n')]
|
from twisted.internet.defer import Deferred, DeferredList
from twisted.web import server
from twisted.internet import reactor
from .base import BaseServer, LOGGER
from ..resources import DataResource
class DataServer(BaseServer):
def __init__(self,
aws_access_key_id,
aws_secret_access_key,
aws_s3_storage_bucket,
aws_sdb_reservation_domain,
port=5002,
log_file='dataserver.log',
log_directory=None,
log_level="debug",
name=None,
max_simultaneous_requests=50):
if name == None:
name = "AWSpider Data Server UUID: %s" % self.uuid
resource = DataResource(self)
self.site_port = reactor.listenTCP(port, server.Site(resource))
BaseServer.__init__(
self,
aws_access_key_id,
aws_secret_access_key,
aws_s3_storage_bucket=aws_s3_storage_bucket,
aws_sdb_reservation_domain=aws_sdb_reservation_domain,
log_file=log_file,
log_directory=log_directory,
log_level=log_level,
name=name,
max_simultaneous_requests=max_simultaneous_requests,
port=port)
def clearStorage(self):
return self.s3.emptyBucket(self.aws_s3_storage_bucket)
def getData(self, uuid):
LOGGER.debug("Getting %s from S3." % uuid)
d = self.s3.getObject(self.aws_s3_storage_bucket, uuid)
d.addCallback(self._getCallback, uuid)
d.addErrback(self._getErrback, uuid)
return d
def _getCallback(self, data, uuid):
LOGGER.debug("Got %s from S3." % (uuid))
return cPickle.loads(data["response"])
def _getErrback(self, error, uuid):
LOGGER.error("Could not get %s from S3.\n%s" % (uuid, error))
return error
def shutdown(self):
deferreds = []
LOGGER.debug("%s stopping on main HTTP interface." % self.name)
d = self.site_port.stopListening()
if isinstance(d, Deferred):
deferreds.append(d)
if len(deferreds) > 0:
d = DeferredList(deferreds)
d.addCallback(self._shutdownCallback)
return d
else:
return self._shutdownCallback(None)
def _shutdownCallback(self, data):
return BaseServer.shutdown(self)
|
[
"twisted.internet.defer.DeferredList",
"twisted.web.server.Site"
] |
[((818, 839), 'twisted.web.server.Site', 'server.Site', (['resource'], {}), '(resource)\n', (829, 839), False, 'from twisted.web import server\n'), ((2210, 2233), 'twisted.internet.defer.DeferredList', 'DeferredList', (['deferreds'], {}), '(deferreds)\n', (2222, 2233), False, 'from twisted.internet.defer import Deferred, DeferredList\n')]
|
"""Checking types and values."""
import os
from typing import Any, List, Type, Union
def raise_if_empty_str(*, val: str, val_name: str) -> None:
"""Raise if ``val`` is an empty :py:class:`str`.
Parameters
----------
val: str
Test target.
val_name: str
Test target name. Mainly used to create error message.
Raises
------
ValueError
When ``val`` is an empty :py:class:`str`.
"""
if not val:
raise ValueError(f'`{val_name}` must be non-empty `str`.')
def raise_if_is_directory(*, path: str) -> None:
"""Raise if ``path`` exists and is a directory.
Parameters
----------
path: str
Test path.
Raises
------
FileExistsError
When ``path`` exists and is a directory.
"""
if os.path.exists(path) and os.path.isdir(path):
raise FileExistsError(f'{path} is a directory.')
def raise_if_is_file(*, path: str) -> None:
"""Raise if ``path`` exists and is a file.
Parameters
----------
path: str
Test path.
Raises
------
FileExistsError
When ``path`` exists and is a file.
"""
if os.path.exists(path) and os.path.isfile(path):
raise FileExistsError(f'{path} is a file.')
def raise_if_not_in(*, val: Any, val_name: str, val_range: List) -> None:
"""Raise if ``val`` is not in ``val_range``.
Parameters
----------
val: Any
Test target.
val_name: str
Test target name. Mainly used to create error message.
val_range: list
Expected value range.
Raises
------
ValueError
When ``val`` is not in ``val_range``.
"""
if val not in val_range:
raise ValueError(
f'`{val_name}` must be one of the following values:' + ''.join(map(lambda v: f'\n- {v}', val_range))
)
def raise_if_not_instance(*, val: Any, val_name: str, val_type: Type) -> None:
"""Raise if ``val`` is not an instance of ``val_type``.
Parameters
----------
val: Any
Test target.
val_name: str
Test target name. Mainly used to create error message.
val_type: Type
Expected target type.
Raises
------
TypeError
When ``val`` is not an instance of ``val_type``.
"""
if not isinstance(val, val_type):
raise TypeError(f'`{val_name}` must be an instance of `{val_type.__name__}`.')
def raise_if_wrong_ordered(*, vals: List[Union[float, int]], val_names: List[str]) -> None:
"""Raise if there exist some ``i < j`` such that ``vals[i] > vals[j]``.
Parameters
----------
vals: list[Union[float, int]]
Test targets.
val_names: list[str]
Test targets' names. Mainly used to create error message.
Raises
------
ValueError
When there exist some ``i < j`` such that ``vals[i] > vals[j]``.
"""
for i in range(len(vals) - 1):
if vals[i] > vals[i + 1]:
raise ValueError(f'Must have `{" <= ".join(val_names)}`.')
|
[
"os.path.isdir",
"os.path.isfile",
"os.path.exists"
] |
[((739, 759), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (753, 759), False, 'import os\n'), ((764, 783), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (777, 783), False, 'import os\n'), ((1071, 1091), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1085, 1091), False, 'import os\n'), ((1096, 1116), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (1110, 1116), False, 'import os\n')]
|
# encoding: utf-8
from leonardo.module.web.models import Widget
from leonardo.module.media.fields.image import ImageField
from django.db import models
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
import datetime
from django.utils.encoding import python_2_unicode_compatible
from leonardo.module.media.fields.multistorage_file import MultiStorageFileField
class RoudnyreslOrders(models.Model):
jmeno = models.CharField(
max_length=255, verbose_name=u"Jméno", default='')
slug = models.CharField(
verbose_name=u"URL ID", max_length=150, blank=True, null=True)
prijmeni = models.CharField(
max_length=255, verbose_name=u"Příjmení", default='')
email = models.EmailField(
verbose_name=u"E-mail", default='')
telefon = models.CharField(
verbose_name=u"Telefon (ve tvaru: +420 123 456 789)", max_length=100)
dorucovaci_adresa = models.CharField(
verbose_name=u"Doručovací adresa", help_text="Př.: Pardubice, Benedettiho 709, 530 03", max_length=255)
firma = models.CharField(
max_length=255, verbose_name=u"Název firmy", default='')
ico = models.CharField(
verbose_name=u"IČO", max_length=255, default='')
dic = models.CharField(
verbose_name=u"DIČ", max_length=255, help_text="Vyplňte, jste-li plátce DPH", blank=True, null=True)
doprava = models.CharField(
verbose_name=u"Doprava", max_length=255)
platba = models.CharField(
verbose_name=u"Platba", max_length=255)
zprava = models.TextField(
verbose_name=u"Poznámka", default='', blank=True)
pub_date = models.DateTimeField(u'Datum objednávky', auto_now_add=True)
def get_absolute_url(self):
from leonardo.module.web.widget.application.reverse import app_reverse
return app_reverse(
'created_order',
'leonardo_form_roudnyresl.apps.roudnyresl',
kwargs={'slug': self.slug})
def get_full_name(self):
return str(self.jmeno.encode("utf-8") + " " + self.prijmeni.encode("utf-8"))
def __unicode__(self):
return self.jmeno
class Meta:
ordering = ['jmeno', ]
verbose_name = u'Objednávka'
verbose_name_plural = u'Objednávky'
class RoudnyreslProduct(models.Model):
objednavka = models.ForeignKey(RoudnyreslOrders,
verbose_name=u"Objednávka", related_name="orderproduct_set")
produkt = models.CharField(
verbose_name=u"Vyberte produkt", max_length=255)
tloustka = models.CharField(
verbose_name=u"Výška podstavy", max_length=255)
vyska = models.CharField(
verbose_name=u"Výška reliéfu", max_length=255)
rozmer_motivu = models.CharField(
verbose_name=u"Rozměr raženého motivu", max_length=255)
soubor = models.FileField(
u'Nahrání dat', upload_to='documents/%Y/%m/%d/')
def __unicode__(self):
return self.produkt
class Meta:
ordering = ['produkt', ]
verbose_name = u'Produkt'
verbose_name_plural = u'Produkty'
|
[
"django.db.models.FileField",
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"leonardo.module.web.widget.application.reverse.app_reverse",
"django.db.models.EmailField",
"django.db.models.DateTimeField"
] |
[((486, 553), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': 'u"""Jméno"""', 'default': '""""""'}), "(max_length=255, verbose_name=u'Jméno', default='')\n", (502, 553), False, 'from django.db import models\n'), ((574, 653), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': 'u"""URL ID"""', 'max_length': '(150)', 'blank': '(True)', 'null': '(True)'}), "(verbose_name=u'URL ID', max_length=150, blank=True, null=True)\n", (590, 653), False, 'from django.db import models\n'), ((678, 748), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': 'u"""Příjmení"""', 'default': '""""""'}), "(max_length=255, verbose_name=u'Příjmení', default='')\n", (694, 748), False, 'from django.db import models\n'), ((770, 823), 'django.db.models.EmailField', 'models.EmailField', ([], {'verbose_name': 'u"""E-mail"""', 'default': '""""""'}), "(verbose_name=u'E-mail', default='')\n", (787, 823), False, 'from django.db import models\n'), ((847, 937), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': 'u"""Telefon (ve tvaru: +420 123 456 789)"""', 'max_length': '(100)'}), "(verbose_name=u'Telefon (ve tvaru: +420 123 456 789)',\n max_length=100)\n", (863, 937), False, 'from django.db import models\n'), ((967, 1092), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': 'u"""Doručovací adresa"""', 'help_text': '"""Př.: Pardubice, Benedettiho 709, 530 03"""', 'max_length': '(255)'}), "(verbose_name=u'Doručovací adresa', help_text=\n 'Př.: Pardubice, Benedettiho 709, 530 03', max_length=255)\n", (983, 1092), False, 'from django.db import models\n'), ((1109, 1182), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': 'u"""Název firmy"""', 'default': '""""""'}), "(max_length=255, verbose_name=u'Název firmy', default='')\n", (1125, 1182), False, 'from django.db import models\n'), ((1202, 1267), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': 'u"""IČO"""', 'max_length': '(255)', 'default': '""""""'}), "(verbose_name=u'IČO', max_length=255, default='')\n", (1218, 1267), False, 'from django.db import models\n'), ((1287, 1409), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': 'u"""DIČ"""', 'max_length': '(255)', 'help_text': '"""Vyplňte, jste-li plátce DPH"""', 'blank': '(True)', 'null': '(True)'}), "(verbose_name=u'DIČ', max_length=255, help_text=\n 'Vyplňte, jste-li plátce DPH', blank=True, null=True)\n", (1303, 1409), False, 'from django.db import models\n'), ((1428, 1485), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': 'u"""Doprava"""', 'max_length': '(255)'}), "(verbose_name=u'Doprava', max_length=255)\n", (1444, 1485), False, 'from django.db import models\n'), ((1508, 1564), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': 'u"""Platba"""', 'max_length': '(255)'}), "(verbose_name=u'Platba', max_length=255)\n", (1524, 1564), False, 'from django.db import models\n'), ((1587, 1653), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': 'u"""Poznámka"""', 'default': '""""""', 'blank': '(True)'}), "(verbose_name=u'Poznámka', default='', blank=True)\n", (1603, 1653), False, 'from django.db import models\n'), ((1678, 1738), 'django.db.models.DateTimeField', 'models.DateTimeField', (['u"""Datum objednávky"""'], {'auto_now_add': '(True)'}), "(u'Datum objednávky', auto_now_add=True)\n", (1698, 1738), False, 'from django.db import models\n'), ((2361, 2461), 'django.db.models.ForeignKey', 'models.ForeignKey', (['RoudnyreslOrders'], {'verbose_name': 'u"""Objednávka"""', 'related_name': '"""orderproduct_set"""'}), "(RoudnyreslOrders, verbose_name=u'Objednávka',\n related_name='orderproduct_set')\n", (2378, 2461), False, 'from django.db import models\n'), ((2480, 2545), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': 'u"""Vyberte produkt"""', 'max_length': '(255)'}), "(verbose_name=u'Vyberte produkt', max_length=255)\n", (2496, 2545), False, 'from django.db import models\n'), ((2570, 2634), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': 'u"""Výška podstavy"""', 'max_length': '(255)'}), "(verbose_name=u'Výška podstavy', max_length=255)\n", (2586, 2634), False, 'from django.db import models\n'), ((2656, 2719), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': 'u"""Výška reliéfu"""', 'max_length': '(255)'}), "(verbose_name=u'Výška reliéfu', max_length=255)\n", (2672, 2719), False, 'from django.db import models\n'), ((2749, 2821), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': 'u"""Rozměr raženého motivu"""', 'max_length': '(255)'}), "(verbose_name=u'Rozměr raženého motivu', max_length=255)\n", (2765, 2821), False, 'from django.db import models\n'), ((2844, 2909), 'django.db.models.FileField', 'models.FileField', (['u"""Nahrání dat"""'], {'upload_to': '"""documents/%Y/%m/%d/"""'}), "(u'Nahrání dat', upload_to='documents/%Y/%m/%d/')\n", (2860, 2909), False, 'from django.db import models\n'), ((1866, 1970), 'leonardo.module.web.widget.application.reverse.app_reverse', 'app_reverse', (['"""created_order"""', '"""leonardo_form_roudnyresl.apps.roudnyresl"""'], {'kwargs': "{'slug': self.slug}"}), "('created_order', 'leonardo_form_roudnyresl.apps.roudnyresl',\n kwargs={'slug': self.slug})\n", (1877, 1970), False, 'from leonardo.module.web.widget.application.reverse import app_reverse\n')]
|
import sys
import binascii
import hashlib
from PyQt5.QtWidgets import QApplication,QMainWindow,QFileDialog
from xtui import Ui_Form
from xtoolsfunc import XToolsFunc
base64_method = ["encode","decode"]
hash_available = hashlib.algorithms_guaranteed
class MainUi(QMainWindow,QFileDialog,Ui_Form):
def __init__(self,parent=None):
super(MainUi,self).__init__(parent)
self.setupUi(self)
self.type_ComboBox.addItem("")
self.type_ComboBox.addItem("")
self.type_ComboBox.setItemText(0,"base64")
self.type_ComboBox.setItemText(1,"Hash")
self.type_ComboBox.activated.connect(self.enc_type)
self.confirm_Button.clicked.connect(self.confirm)
self.open_Button.clicked.connect(self.openfile)
for i in range(len(base64_method)):
self.method_ComboBox.addItem("")
self.method_ComboBox.setItemText(i,base64_method[i])
def openfile(self):
filedir = self.getOpenFileName(self,"open file","./","All Files (*)")[0]
self.input_TextEdit.setText(filedir)
def enc_type(self):
self.method_ComboBox.clear()
if self.type_ComboBox.currentText() == "Hash":
hash_available_list = list(hash_available)
for i in range(len(hash_available_list)):
self.method_ComboBox.addItem("")
self.method_ComboBox.setItemText(i,hash_available_list[i])
else:
for i in range(len(base64_method)):
self.method_ComboBox.addItem("")
self.method_ComboBox.setItemText(i,base64_method[i])
def confirm(self):
enc_type = self.type_ComboBox.currentText()
method = self.method_ComboBox.currentText()
value = self.input_TextEdit.toPlainText()
if value:
if enc_type == "base64":
result = XToolsFunc.base64_method(method,value)
self.ouput_TextBrowser.setText(result[0])
self.output_label.setText(result[1])
elif enc_type == "Hash":
result = XToolsFunc.hash_method(method,value)
self.ouput_TextBrowser.setText(result[0])
self.output_label.setText(result[1])
else:
self.output_label.setText("无输入")
self.ouput_TextBrowser.clear()
def main():
app = QApplication(sys.argv)
myUi = MainUi()
myUi.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
[
"PyQt5.QtWidgets.QApplication",
"xtoolsfunc.XToolsFunc.base64_method",
"xtoolsfunc.XToolsFunc.hash_method"
] |
[((2346, 2368), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (2358, 2368), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog\n'), ((1861, 1900), 'xtoolsfunc.XToolsFunc.base64_method', 'XToolsFunc.base64_method', (['method', 'value'], {}), '(method, value)\n', (1885, 1900), False, 'from xtoolsfunc import XToolsFunc\n'), ((2073, 2110), 'xtoolsfunc.XToolsFunc.hash_method', 'XToolsFunc.hash_method', (['method', 'value'], {}), '(method, value)\n', (2095, 2110), False, 'from xtoolsfunc import XToolsFunc\n')]
|
'''
Description: ip反查域名
Author: Senkita
Date: 2020-10-09 10:23:52
LastEditors: Senkita
LastEditTime: 2020-10-09 15:01:39
'''
import os
from utils.Query import batch_query
if __name__ == "__main__":
os.makedirs('./Log', exist_ok=True)
filename = 'public.txt'
save_filename = 'domain_name.txt'
batch_query(filename, save_filename)
|
[
"os.makedirs",
"utils.Query.batch_query"
] |
[((214, 249), 'os.makedirs', 'os.makedirs', (['"""./Log"""'], {'exist_ok': '(True)'}), "('./Log', exist_ok=True)\n", (225, 249), False, 'import os\n'), ((325, 361), 'utils.Query.batch_query', 'batch_query', (['filename', 'save_filename'], {}), '(filename, save_filename)\n', (336, 361), False, 'from utils.Query import batch_query\n')]
|
import numpy as np
import matplotlib.pyplot as plt
def spectrum(f, x):
# Discrete Fourier transform
A = np.fft.rfft(f(x))
A_amplitude = np.abs(A)
# Compute the corresponding frequencies
dx = x[1] - x[0]
freqs = np.linspace(0, np.pi/dx, A_amplitude.size)
plt.plot(freqs[:len(freqs)/2], A_amplitude[:len(freqs)/2])
# Mesh
L = 10; Nx = 100
x = np.linspace(0, L, Nx+1)
spectrum(lambda x: np.where(x < 5, 1, 0), x)
spectrum(lambda x: np.sin(np.pi*x/float(L)) + np.sin(np.pi*20*x/float(L)), x)
s = 0.5
spectrum(lambda x: 1./(np.sqrt(2*np.pi)*s)*np.exp(-0.5*((x-L/2.)/s)**2), x)
def f(x):
r = np.zeros_like(x)
r[len(x)/2] = 1
return r
spectrum(f, x)
figfile = 'tmp'
plt.legend(['step', '2sin', 'gauss', 'peak'])
plt.savefig(figfile + '.pdf')
plt.savefig(figfile + '.png')
plt.show()
|
[
"numpy.zeros_like",
"numpy.abs",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"numpy.where",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((373, 398), 'numpy.linspace', 'np.linspace', (['(0)', 'L', '(Nx + 1)'], {}), '(0, L, Nx + 1)\n', (384, 398), True, 'import numpy as np\n'), ((707, 752), 'matplotlib.pyplot.legend', 'plt.legend', (["['step', '2sin', 'gauss', 'peak']"], {}), "(['step', '2sin', 'gauss', 'peak'])\n", (717, 752), True, 'import matplotlib.pyplot as plt\n'), ((753, 782), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(figfile + '.pdf')"], {}), "(figfile + '.pdf')\n", (764, 782), True, 'import matplotlib.pyplot as plt\n'), ((783, 812), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(figfile + '.png')"], {}), "(figfile + '.png')\n", (794, 812), True, 'import matplotlib.pyplot as plt\n'), ((813, 823), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (821, 823), True, 'import matplotlib.pyplot as plt\n'), ((149, 158), 'numpy.abs', 'np.abs', (['A'], {}), '(A)\n', (155, 158), True, 'import numpy as np\n'), ((237, 281), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi / dx)', 'A_amplitude.size'], {}), '(0, np.pi / dx, A_amplitude.size)\n', (248, 281), True, 'import numpy as np\n'), ((624, 640), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (637, 640), True, 'import numpy as np\n'), ((417, 438), 'numpy.where', 'np.where', (['(x < 5)', '(1)', '(0)'], {}), '(x < 5, 1, 0)\n', (425, 438), True, 'import numpy as np\n'), ((572, 611), 'numpy.exp', 'np.exp', (['(-0.5 * ((x - L / 2.0) / s) ** 2)'], {}), '(-0.5 * ((x - L / 2.0) / s) ** 2)\n', (578, 611), True, 'import numpy as np\n'), ((552, 570), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (559, 570), True, 'import numpy as np\n')]
|
# Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import logging
import time
import emoji
from googleapiclient import discovery
JOB_STATE_MAP = {"cancel": "JOB_STATE_CANCELLED", "drain": "JOB_STATE_DRAINED"}
class StopJob(object):
def __init__(self, api_version=None):
self._set_dataflow_client(api_version)
def _set_dataflow_client(self, api_version):
if not api_version:
api_version = "v1b3"
self._client = discovery.build("dataflow", api_version)
def _check_job_running(self, job_name, project, region):
request = (
self._client.projects()
.locations()
.jobs()
.list(projectId=project, location=region, filter="ACTIVE",)
)
try:
response = request.execute()
except Exception as e:
logging.warning(
"Could not find running job '{}' in project '{}': {}".format(
job_name, project, e
)
)
logging.warning(
"Continuing to attempt deploying '{}'".format(job_name)
)
return
job_results = response.get("jobs", [])
if job_results:
for result in job_results:
if result["name"] == job_name:
return result
def _update_job_state(self, job, req_state=None, retries=None):
if retries is None:
retries = 0
_req_state = JOB_STATE_MAP.get(req_state, JOB_STATE_MAP["cancel"])
if job.get("requestedState") is not _req_state:
job["requestedState"] = _req_state
request = (
self._client.projects()
.locations()
.jobs()
.update(
jobId=job["id"],
projectId=job["projectId"],
location=job["location"],
body=job,
)
)
try:
request.execute()
except Exception as e:
# generic catch if 4xx error - probably shouldn't retry
if getattr(e, "resp", None):
if e.resp.status < 500:
msg = "Failed to {} job '{}': {}".format(
req_state, job["name"], e
)
logging.error(msg)
raise SystemExit(1)
if retries > 2:
msg = "Max retries reached: could not {} job '{}': {}".format(
req_state, job["name"], e
)
logging.error(msg)
raise SystemExit(1)
logging.info(
"Failed to {} job '{}'. Trying again after 30s...".format(
req_state, job["name"]
)
)
retries += 1
time.sleep(30)
self._update_job_state(job, req_state, retries)
def _watch_job_state(self, job, timeout=600):
timeout = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
request = (
self._client.projects()
.locations()
.jobs()
.get(
jobId=job["id"],
projectId=job["projectId"],
location=job["location"],
)
)
while datetime.datetime.now() < timeout:
try:
resp = request.execute()
except Exception as e:
msg = (
"Failed to get current status for job '{}'. Error: {}.\n"
"Trying again after 5s...".format(job["name"], e)
)
logging.info(msg)
time.sleep(5)
continue
if resp["currentState"] in JOB_STATE_MAP.values():
return
else:
msg = "Waiting for job '{}' to reach terminal state...".format(
job["name"]
)
logging.info(msg)
time.sleep(5)
msg = "Job '{}' did not reach terminal state after '{}' secs.".format(
job["name"], timeout
)
logging.error(msg)
raise SystemExit(1)
def stop(self, job_name, project, region, strategy, api_version=None):
self._set_dataflow_client(api_version)
current_running_job = self._check_job_running(
job_name, project, region
)
if not current_running_job:
return
self._update_job_state(current_running_job, req_state=strategy)
self._watch_job_state(current_running_job)
verb = "cancelled" if strategy == "cancel" else "drained"
msg = "Successfully {} job '{}' :smile_cat:".format(verb, job_name)
logging.info(emoji.emojize(msg, use_aliases=True))
|
[
"logging.error",
"emoji.emojize",
"datetime.datetime.now",
"time.sleep",
"logging.info",
"datetime.timedelta",
"googleapiclient.discovery.build"
] |
[((1007, 1047), 'googleapiclient.discovery.build', 'discovery.build', (['"""dataflow"""', 'api_version'], {}), "('dataflow', api_version)\n", (1022, 1047), False, 'from googleapiclient import discovery\n'), ((4693, 4711), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (4706, 4711), False, 'import logging\n'), ((3516, 3539), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3537, 3539), False, 'import datetime\n'), ((3542, 3577), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'timeout'}), '(seconds=timeout)\n', (3560, 3577), False, 'import datetime\n'), ((3856, 3879), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3877, 3879), False, 'import datetime\n'), ((5309, 5345), 'emoji.emojize', 'emoji.emojize', (['msg'], {'use_aliases': '(True)'}), '(msg, use_aliases=True)\n', (5322, 5345), False, 'import emoji\n'), ((3372, 3386), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (3382, 3386), False, 'import time\n'), ((4514, 4531), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (4526, 4531), False, 'import logging\n'), ((4548, 4561), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4558, 4561), False, 'import time\n'), ((3103, 3121), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (3116, 3121), False, 'import logging\n'), ((4190, 4207), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (4202, 4207), False, 'import logging\n'), ((4224, 4237), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4234, 4237), False, 'import time\n'), ((2856, 2874), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (2869, 2874), False, 'import logging\n')]
|
# -*- coding: utf-8 -*-
#VecMap0.1
#The first versio of VecMap
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
class Ui_VecMap(QtWidgets.QMainWindow):
def __init__(self):
super(Ui_VecMap,self).__init__()
self.setupUi(self)
self.retranslateUi(self)
def setupUi(self, VecMap):
VecMap.setObjectName("VecMap")
VecMap.resize(402, 876)
VecMap.setMinimumSize(QtCore.QSize(402, 836))
VecMap.setMaximumSize(QtCore.QSize(1024, 1024))
self.pushButton = QtWidgets.QPushButton(VecMap)
self.pushButton.setGeometry(QtCore.QRect(20, 40, 91, 41))
self.pushButton.setObjectName("pushButton")
self.checkBox = QtWidgets.QCheckBox(VecMap)
self.checkBox.setGeometry(QtCore.QRect(150, 10, 111, 20))
self.checkBox.setObjectName("checkBox")
self.line = QtWidgets.QFrame(VecMap)
self.line.setGeometry(QtCore.QRect(20, 90, 371, 21))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.label = QtWidgets.QLabel(VecMap)
self.label.setGeometry(QtCore.QRect(20, 10, 121, 16))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(VecMap)
self.label_2.setGeometry(QtCore.QRect(130, 40, 251, 51))
self.label_2.setTextFormat(QtCore.Qt.AutoText)
self.label_2.setScaledContents(False)
self.label_2.setWordWrap(True)
self.label_2.setObjectName("label_2")
self.lineEdit = QtWidgets.QLineEdit(VecMap)
self.lineEdit.setGeometry(QtCore.QRect(130, 130, 30, 20))
self.lineEdit.setObjectName("lineEdit")
self.label_3 = QtWidgets.QLabel(VecMap)
self.label_3.setGeometry(QtCore.QRect(20, 110, 191, 16))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(VecMap)
self.label_4.setGeometry(QtCore.QRect(20, 130, 111, 16))
self.label_4.setObjectName("label_4")
self.pushButton_2 = QtWidgets.QPushButton(VecMap)
self.pushButton_2.setGeometry(QtCore.QRect(20, 170, 91, 41))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(VecMap)
self.pushButton_3.setGeometry(QtCore.QRect(20, 230, 91, 41))
self.pushButton_3.setObjectName("pushButton_3")
self.label_5 = QtWidgets.QLabel(VecMap)
self.label_5.setGeometry(QtCore.QRect(130, 160, 251, 51))
self.label_5.setTextFormat(QtCore.Qt.AutoText)
self.label_5.setScaledContents(False)
self.label_5.setWordWrap(True)
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(VecMap)
self.label_6.setGeometry(QtCore.QRect(130, 230, 251, 51))
self.label_6.setTextFormat(QtCore.Qt.AutoText)
self.label_6.setScaledContents(False)
self.label_6.setWordWrap(True)
self.label_6.setObjectName("label_6")
self.line_2 = QtWidgets.QFrame(VecMap)
self.line_2.setGeometry(QtCore.QRect(20, 280, 371, 21))
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.label_9 = QtWidgets.QLabel(VecMap)
self.label_9.setGeometry(QtCore.QRect(20, 300, 191, 16))
self.label_9.setObjectName("label_9")
self.checkBox_2 = QtWidgets.QCheckBox(VecMap)
self.checkBox_2.setGeometry(QtCore.QRect(20, 330, 111, 20))
self.checkBox_2.setObjectName("checkBox_2")
self.checkBox_3 = QtWidgets.QCheckBox(VecMap)
self.checkBox_3.setGeometry(QtCore.QRect(150, 330, 131, 20))
self.checkBox_3.setObjectName("checkBox_3")
self.pushButton_4 = QtWidgets.QPushButton(VecMap)
self.pushButton_4.setGeometry(QtCore.QRect(20, 370, 91, 41))
self.pushButton_4.setObjectName("pushButton_4")
self.label_10 = QtWidgets.QLabel(VecMap)
self.label_10.setGeometry(QtCore.QRect(130, 360, 251, 51))
self.label_10.setTextFormat(QtCore.Qt.AutoText)
self.label_10.setScaledContents(False)
self.label_10.setWordWrap(True)
self.label_10.setObjectName("label_10")
self.checkBox_4 = QtWidgets.QCheckBox(VecMap)
self.checkBox_4.setGeometry(QtCore.QRect(260, 10, 111, 20))
self.checkBox_4.setObjectName("checkBox_4")
self.line_3 = QtWidgets.QFrame(VecMap)
self.line_3.setGeometry(QtCore.QRect(20, 420, 371, 21))
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.label_11 = QtWidgets.QLabel(VecMap)
self.label_11.setGeometry(QtCore.QRect(20, 440, 191, 16))
self.label_11.setObjectName("label_11")
self.label_12 = QtWidgets.QLabel(VecMap)
self.label_12.setGeometry(QtCore.QRect(170, 130, 191, 16))
self.label_12.setObjectName("label_12")
self.label_14 = QtWidgets.QLabel(VecMap)
self.label_14.setGeometry(QtCore.QRect(20, 510, 381, 16))
self.label_14.setObjectName("label_14")
self.lineEdit_4 = QtWidgets.QLineEdit(VecMap)
self.lineEdit_4.setGeometry(QtCore.QRect(20, 550, 251, 22))
self.lineEdit_4.setObjectName("lineEdit_4")
self.label_15 = QtWidgets.QLabel(VecMap)
self.label_15.setGeometry(QtCore.QRect(20, 530, 181, 16))
self.label_15.setObjectName("label_15")
self.label_16 = QtWidgets.QLabel(VecMap)
self.label_16.setGeometry(QtCore.QRect(20, 580, 381, 16))
self.label_16.setObjectName("label_16")
self.label_17 = QtWidgets.QLabel(VecMap)
self.label_17.setGeometry(QtCore.QRect(20, 600, 181, 16))
self.label_17.setObjectName("label_17")
self.lineEdit_5 = QtWidgets.QLineEdit(VecMap)
self.lineEdit_5.setGeometry(QtCore.QRect(20, 620, 251, 22))
self.lineEdit_5.setObjectName("lineEdit_5")
self.pushButton_5 = QtWidgets.QPushButton(VecMap)
self.pushButton_5.setGeometry(QtCore.QRect(280, 550, 101, 91))
self.pushButton_5.setObjectName("pushButton_5")
self.pushButton_6 = QtWidgets.QPushButton(VecMap)
self.pushButton_6.setGeometry(QtCore.QRect(20, 680, 80, 41))
self.pushButton_6.setObjectName("pushButton_6")
self.label_18 = QtWidgets.QLabel(VecMap)
self.label_18.setGeometry(QtCore.QRect(200, 680, 191, 51))
self.label_18.setTextFormat(QtCore.Qt.AutoText)
self.label_18.setScaledContents(False)
self.label_18.setWordWrap(True)
self.label_18.setObjectName("label_18")
self.pushButton_7 = QtWidgets.QPushButton(VecMap)
self.pushButton_7.setGeometry(QtCore.QRect(290, 460, 91, 51))
self.pushButton_7.setObjectName("pushButton_7")
self.line_4 = QtWidgets.QFrame(VecMap)
self.line_4.setGeometry(QtCore.QRect(20, 730, 371, 21))
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.pushButton_8 = QtWidgets.QPushButton(VecMap)
self.pushButton_8.setGeometry(QtCore.QRect(20, 780, 120, 28))
self.pushButton_8.setObjectName("pushButton_8")
self.label_19 = QtWidgets.QLabel(VecMap)
self.label_19.setGeometry(QtCore.QRect(60, 850, 291, 16))
self.label_19.setObjectName("label_19")
self.label_20 = QtWidgets.QLabel(VecMap)
self.label_20.setGeometry(QtCore.QRect(20, 750, 211, 16))
self.label_20.setObjectName("label_20")
self.pushButton_9 = QtWidgets.QPushButton(VecMap)
self.pushButton_9.setGeometry(QtCore.QRect(150, 780, 120, 28))
self.pushButton_9.setObjectName("pushButton_9")
self.pushButton_10 = QtWidgets.QPushButton(VecMap)
self.pushButton_10.setGeometry(QtCore.QRect(20, 810, 120, 28))
self.pushButton_10.setObjectName("pushButton_10")
self.pushButton_11 = QtWidgets.QPushButton(VecMap)
self.pushButton_11.setGeometry(QtCore.QRect(150, 810, 120, 28))
self.pushButton_11.setObjectName("pushButton_11")
self.pushButton_12 = QtWidgets.QPushButton(VecMap)
self.pushButton_12.setGeometry(QtCore.QRect(280, 780, 101, 58))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.pushButton_12.setFont(font)
self.pushButton_12.setObjectName("pushButton_12")
self.radioButton = QtWidgets.QRadioButton(VecMap)
self.radioButton.setGeometry(QtCore.QRect(20, 480, 95, 20))
self.radioButton.setChecked(True)
self.radioButton.setObjectName("radioButton")
self.radioButton_2 = QtWidgets.QRadioButton(VecMap)
self.radioButton_2.setGeometry(QtCore.QRect(90, 480, 95, 20))
self.radioButton_2.setObjectName("radioButton_2")
self.label_21 = QtWidgets.QLabel(VecMap)
self.label_21.setGeometry(QtCore.QRect(20, 460, 171, 16))
self.label_21.setObjectName("label_21")
self.pushButton_13 = QtWidgets.QPushButton(VecMap)
self.pushButton_13.setGeometry(QtCore.QRect(200, 460, 81, 51))
self.pushButton_13.setObjectName("pushButton_13")
self.label_7 = QtWidgets.QLabel(VecMap)
self.label_7.setGeometry(QtCore.QRect(20, 650, 41, 16))
self.label_7.setObjectName("label_7")
self.lineEdit_2 = QtWidgets.QLineEdit(VecMap)
self.lineEdit_2.setGeometry(QtCore.QRect(60, 650, 30, 20))
self.lineEdit_2.setObjectName("lineEdit_2")
self.pushButton_14 = QtWidgets.QPushButton(VecMap)
self.pushButton_14.setGeometry(QtCore.QRect(110, 680, 80, 41))
self.pushButton_14.setObjectName("pushButton_14")
self.lineEdit_3 = QtWidgets.QLineEdit(VecMap)
self.lineEdit_3.setGeometry(QtCore.QRect(150, 650, 30, 20))
self.lineEdit_3.setObjectName("lineEdit_3")
self.label_13 = QtWidgets.QLabel(VecMap)
self.label_13.setGeometry(QtCore.QRect(110, 650, 41, 16))
self.label_13.setObjectName("label_13")
self.checkBox_5 = QtWidgets.QCheckBox(VecMap)
self.checkBox_5.setGeometry(QtCore.QRect(210, 650, 111, 20))
self.checkBox_5.setChecked(True)
self.checkBox_5.setObjectName("checkBox_5")
self.retranslateUi(VecMap)
QtCore.QMetaObject.connectSlotsByName(VecMap)
#=======Connect all the functions=============================================
self.pushButton.clicked.connect(self.openfile)
self.pushButton_2.clicked.connect(self.ini_atom_position)
self.pushButton_3.clicked.connect(self.find_separation)
self.pushButton_4.clicked.connect(self.refine_atom_position)
self.pushButton_13.clicked.connect(self.cal_disp)
self.pushButton_5.clicked.connect(self.vec_ang_dist)
self.pushButton_6.clicked.connect(self.show_vec_map)
self.pushButton_14.clicked.connect(self.show_O_vec_map)
self.pushButton_7.clicked.connect(self.load_from_csv)
self.pushButton_8.clicked.connect(self.disclaimer)
self.pushButton_9.clicked.connect(self.show_about)
self.pushButton_10.clicked.connect(self.acknowledgments)
self.pushButton_11.clicked.connect(self.show_contact)
self.pushButton_12.clicked.connect(self.donate)
def retranslateUi(self, VecMap):
_translate = QtCore.QCoreApplication.translate
VecMap.setWindowTitle(_translate("VecMap", "VecMap0.1"))
#VecMap.setWindowIcon(QtGui.QIcon('icon.png'))
self.pushButton.setText(_translate("VecMap", "Load Image"))
self.checkBox.setText(_translate("VecMap", "ABF/BF image"))
self.label.setText(_translate("VecMap", "Step 1. Load image"))
self.label_2.setText(_translate("VecMap", "<html><head/><body><p>Load a HR-STEM image with a perovskite structure. Support [001] and [011] zone axes. Filtered image is preferred.</p><p><br/></p></body></html>"))
self.lineEdit.setText(_translate("VecMap", "8"))
self.label_3.setText(_translate("VecMap", "Step 2. Initialize atom positions"))
self.label_4.setText(_translate("VecMap", "Separation factor"))
self.pushButton_2.setText(_translate("VecMap", "Initialize"))
self.pushButton_3.setText(_translate("VecMap", "Find \n"
"separation"))
self.label_5.setText(_translate("VecMap", "<html><head/><body><p>Input an appropriate separation factor to initialize the atom positions for refining. Adding/removing atoms by left-click.</p></body></html>"))
self.label_6.setText(_translate("VecMap", "<html><head/><body><p>Try a few separation factors around the given number to determine the best separation factor.</p></body></html>"))
self.label_9.setText(_translate("VecMap", "Step 3. Refine atom positions"))
self.checkBox_2.setText(_translate("VecMap", "Refine Oxygen"))
self.checkBox_3.setText(_translate("VecMap", "Save result plots"))
self.pushButton_4.setText(_translate("VecMap", "Refine"))
self.label_10.setText(_translate("VecMap", "<html><head/><body><p>Refine atom positions. Check [001] or [011] zone. Only check Refine Oxygen if O columns are visible.</p></body></html>"))
self.checkBox_4.setText(_translate("VecMap", "[011] Zone"))
self.label_11.setText(_translate("VecMap", "Step 4. Generate a vector map"))
self.label_12.setText(_translate("VecMap", "e.g., something around 8-12"))
self.label_14.setText(_translate("VecMap", "List of angles (degrees) of vectors that will be colored differently:"))
self.lineEdit_4.setText(_translate("VecMap", "45"))
self.label_15.setText(_translate("VecMap", "e.g., 45 135 225 315"))
self.label_16.setText(_translate("VecMap", "List of colors (should match the angles):"))
self.label_17.setText(_translate("VecMap", "e.g., yellow blue red green"))
self.lineEdit_5.setText(_translate("VecMap", "yellow"))
self.pushButton_5.setText(_translate("VecMap", "Vector angle\n"
"distrubution"))
self.pushButton_6.setText(_translate("VecMap", "Show \n"
"map"))
self.label_18.setText(_translate("VecMap", "<html><head/><body><p>Generate a vector map. Set the coloring pattern by checking the vector angle distribution.</p></body></html>"))
self.pushButton_7.setText(_translate("VecMap", "Load from csv"))
self.pushButton_8.setText(_translate("VecMap", "Disclaimer"))
self.label_19.setText(_translate("VecMap", "VecMap 0.1.1 Released: 06/13/2020 by Dr. <NAME>"))
self.label_20.setText(_translate("VecMap", "Check here for more information!"))
self.pushButton_9.setText(_translate("VecMap", "About"))
self.pushButton_10.setText(_translate("VecMap", "Acknoledgments"))
self.pushButton_11.setText(_translate("VecMap", "Contact"))
self.pushButton_12.setText(_translate("VecMap", "Donate me!"))
self.radioButton.setText(_translate("VecMap", "A-site"))
self.radioButton_2.setText(_translate("VecMap", "B-site"))
self.label_21.setText(_translate("VecMap", "Select which site to calculate"))
self.pushButton_13.setText(_translate("VecMap", "Calculate"))
self.label_7.setText(_translate("VecMap", "Scale:"))
self.lineEdit_2.setText(_translate("VecMap", "10"))
self.pushButton_14.setText(_translate("VecMap", "Oxygen\n"
" map"))
self.lineEdit_3.setText(_translate("VecMap", "6"))
self.label_13.setText(_translate("VecMap", "Scale:"))
self.checkBox_5.setText(_translate("VecMap", "Scale bar"))
#===== Open file and set up global variables such as path etc. ======================
#===== Connected to self.pushButton =================================================
def openfile(self):
openfile_name = QFileDialog.getOpenFileName(self,'Select Image','','DigitalMicrograph (*.dm3 , *.dm4);;Image files (*.tif , *.tiff , *.jpg , *.jpeg , *.png ,*.bmp);;All Files (*)')
global file, my_path, file_path, title, scale, units, s, image, ABF, img_110
file = openfile_name[0]
if self.checkBox.isChecked(): #Set ABF toggle from the checkbox
ABF = 1
else:
ABF = 0
if self.checkBox_4.isChecked():
img_110 = 1
else:
img_110 = 0
if file:
print('{} has been loaded!'.format(file))
my_path = getDirectory(file) #Set the working path
file_path = getDirectory(file, '/') #Set the parent path
if not os.path.exists(my_path):
os.makedirs(my_path)
s = readImage(file)
title = s.metadata.General.title
scale = s.axes_manager[0].scale #Read scale data from the image
units = s.axes_manager[0].units #Read units
s.save(my_path + 'Original image.hspy', overwrite=True) #Save a backup file in hspy format
image = s.data
if ABF == 1:
s.data = np.divide(1, s.data) #Inverse the ABF contrast to make a ADF-like image
# Draw an image
global f_original_img
f_original_img = PlotCanvas()
f_original_img.setWindowTitle(file)
f_original_img.axes.imshow(image)
f_original_img.axes.set_axis_off()
f_original_img.axes.set_title('{} \n has been successfully loaded!'.format(title))
f_original_img.show()
#==== Initialize atom position module ===============================================
#==== Connected to self.pushButton_2 ================================================
def ini_atom_position(self):
sep = int(self.lineEdit.text())
try:
A_positions_ini = get_atom_positions(s,separation=sep)
global A_positions, f_ini
A_positions = A_positions_ini.tolist()
f_ini = PlotCanvas()
f_ini.setWindowTitle('Initial atom positions for refining')
f_ini.axes.imshow(s.data)
f_ini.axes.set_axis_off()
f_ini.axes.set_title('Left click to add or remove atoms')
f_ini.show()
def onclick(event):
if event.inaxes != f_ini.axes:
return
if event.button == 1: # Left mouse button
x = np.float(event.xdata)
y = np.float(event.ydata)
atom_nearby = closest_node((x,y), A_positions)[0]
if distance.euclidean((x,y), A_positions[atom_nearby]) > 5:
A_positions.append([x, y])
else:
A_positions.pop(atom_nearby)
replot(f_ini)
def get_xy_pos_lists(atom_lst):
return np.asarray(atom_lst)[:,0], np.asarray(atom_lst)[:,1]
def replot(f):
x_pos, y_pos = get_xy_pos_lists(A_positions)
dp.set_xdata(x_pos)
dp.set_ydata(y_pos)
f.fig.canvas.draw()
f.fig.canvas.flush_events()
xy_positions = get_xy_pos_lists(A_positions)
dp, = f_ini.axes.plot(xy_positions[0], xy_positions[1], marker='o', ms=5, color='r', ls='')
cid = f_ini.fig.canvas.mpl_connect('button_press_event', onclick)
except NameError:
#Pop up an error window
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Please load the image file first!")
msg.setWindowTitle("Hey guys")
returnValue = msg.exec()
#==== Find separation module ========================================================
#==== Connected to self.pushButton_3 ================================================
def find_separation(self):
#sep_range = (int(self.lineEdit_2.text()), int(self.lineEdit_3.text()))
#s_peaks=am.get_feature_separation(s, separation_range=sep_range) #Range might be changed for different images
#s_peaks.metadata.General.title = 'Use Arrow keys to find an appropriate separation factor'
#s_peaks.plot(colorbar=False,scalebar=False,axes_off=True)
sep = int(self.lineEdit.text())
sep_range = list(range(sep - 4, sep + 5))
# Create canvas for drawing
try:
global f_sep
f_sep = SeparationCanvas()
for i in range(9):
s_factor = sep - 4 + i
f_sep.axes[i].set_aspect('equal')
f_sep.axes[i].set_axis_off()
if s_factor < 1:
continue
ini_position = get_atom_positions(s, separation=s_factor)
f_sep.axes[i].imshow(s.data)
f_sep.axes[i].scatter(np.asarray(ini_position)[:,0], np.asarray(ini_position)[:,1], s=5, color='r')
f_sep.axes[i].set_title('Separation = {}'.format(s_factor))
f_sep.show()
except NameError:
#Pop up an error window
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Please load the image file first!")
msg.setWindowTitle("Hey guys")
returnValue = msg.exec()
#==== Refine atom position module ===================================================
#==== Connected to self.pushButton_4 ================================================
def refine_atom_position(self):
#Global variables:
global ap_A, ap_B, ap_O, Ua, Uc, find_O
#Read checkboxes
if self.checkBox_2.isChecked():
find_O = 1
else:
find_O = 0
if self.checkBox_3.isChecked():
plotpos = 1
else:
plotpos = 0
try:
#Refine atom positions
print('='*50)
print('Refining atom positions for A-site atoms...')
print('This may take time...')
sublattice_A = find_atom(s.data, A_positions, 'A-site atoms')
print('Refining A-site atoms done!')
ap_A = sublattice_A.atom_positions #Refined atoms positions for A-site. NumPy array.
#lattice_list = []
#lattice_list.append(sublattice_A)
print('='*50)
print('Finding the initial positions for B-site atoms...')
sublattice_A.construct_zone_axes()
#Find the zone axis for the initial position of B: typically 3 for [001] and 1 for [110]
if img_110 == 1:
zone_axis = sublattice_A.zones_axis_average_distances[1]
else:
zone_axis = sublattice_A.zones_axis_average_distances[2]
#Calculate lattice parameter
z0 = sublattice_A.zones_axis_average_distances[0]
z1 = sublattice_A.zones_axis_average_distances[1]
Ua = math.sqrt(z0[0]**2 + z0[1]**2) * scale
Uc = math.sqrt(z1[0]**2 + z1[1]**2) * scale
print('='*50)
print('Estimated lattice parameters (average) from the image:')
print('a = {:.3f} {}'.format(Ua, units))
print('c = {:.3f} {}'.format(Uc, units))
B_positions = sublattice_A.find_missing_atoms_from_zone_vector(zone_axis)
#Reomve A-site atoms from the image
print('='*50)
print('Subtracting sublattice A from the image using 2D gaussian fit...')
print('This may take time...')
image_without_A = remove_atoms_from_image_using_2d_gaussian(sublattice_A.image, sublattice_A, show_progressbar=False)
#Refine B-site atoms
print('='*50)
print('Refining atom positions for sublattice B...')
print('Almost there...')
sublattice_B = find_atom(image_without_A, B_positions, 'B-site atoms', atom_color='blue')
ap_B = sublattice_B.atom_positions ##Refined atoms positions for B-site. NumPy array.
print('Refining B-site atoms done!')
#lattice_list.append(sublattice_B)
#Find the position of O atoms
if find_O == 1:
#Find initial positions for O
AB_positions = ap_A.tolist() + ap_B.tolist()
sublattice_AB = Sublattice(AB_positions,image=s.data,color='y',name='Sublattice A + B')
sublattice_AB.construct_zone_axes()
zone_axis_002 = sublattice_AB.zones_axis_average_distances[2]#Only work for [001] currently
O_positions = sublattice_AB.find_missing_atoms_from_zone_vector(zone_axis_002) #Initial positions of O
print('='*50)
print('Subtracting sublattice A and B from the image using 2D gaussian fit...')
print('This may take time...')
image_without_AB=remove_atoms_from_image_using_2d_gaussian(sublattice_B.image,sublattice_B,show_progressbar=False) #Subtract both A and B from the original image
#Refine O positions
print('='*50)
print('Refining atom positions for sublattice O...')
sublattice_O = find_atom(image_without_AB, O_positions, 'O sites', atom_color='g')
ap_O = sublattice_O.atom_positions #Refined atoms positions for O. NumPy array.
print('Refining O atoms done!')
#lattice_list.append(sublattice_O)
print('Refining atoms done!')
#Construct atom position results with sublattice A and B.
#atom_lattice = am.Atom_Lattice(image=image, name='Atoms positions', sublattice_list=lattice_list)
#Save the refined positions and original image as hdf5 file. This file can be called later.
#atom_lattice.save(my_path + 'atom_position.hdf5', overwrite=True)
#=======================
#Plot and save figures
#=======================
if plotpos == 1:
print('='*50)
print('Saving result plots...')
global f_A_site, f_B_site, f_AB
#Plot A-site atom positions with the original image overlayed.
f_A_site = PlotCanvas()
f_A_site.setWindowTitle('VecMap0.1: Refined positions of A-site atoms')
f_A_site.axes.imshow(image)
f_A_site.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r')
f_A_site.axes.set_axis_off()
f_A_site.show()
f_A_site.fig.savefig(my_path + title + '_A-site atoms' + '.tif',dpi=600,bbox_inches='tight')
#Plot B-site atom positions with the original image overlayed.
f_B_site = PlotCanvas()
f_B_site.setWindowTitle('VecMap0.1: Refined positions of B-site atoms')
f_B_site.axes.imshow(image)
f_B_site.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b')
f_B_site.axes.set_axis_off()
f_B_site.show()
f_B_site.fig.savefig(my_path + title + '_B-site atoms' + '.tif',dpi=600,bbox_inches='tight')
#Plot both A-site and B-site on the image
f_AB = PlotCanvas()
f_AB.setWindowTitle('VecMap0.1: A-site atoms vs. B-site atoms')
f_AB.axes.imshow(image)
f_AB.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r')
f_AB.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b')
f_AB.axes.set_axis_off()
f_AB.show()
f_AB.fig.savefig(my_path + title + '_A_and_B-site atoms' + '.tif',dpi=600,bbox_inches='tight')
#Plot O atoms if available
if find_O == 1:
global f_O_site, f_all
f_O_site = PlotCanvas()
f_O_site.setWindowTitle('VecMap0.1: Refined positions of O atoms')
f_O_site.axes.imshow(image)
f_O_site.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g')
f_O_site.axes.set_axis_off()
f_O_site.show()
f_O_site.fig.savefig(my_path + title + '_O atoms' + '.tif',dpi=600,bbox_inches='tight')
#Plot all the atoms on the image
f_all = PlotCanvas()
f_all.setWindowTitle('VecMap0.1: A-site vs. B-site vs. O atoms')
f_all.axes.imshow(image)
f_all.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r')
f_all.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b')
f_all.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g')
f_all.axes.set_axis_off()
f_all.show()
f_all.fig.savefig(my_path + title + '_A_B_O atoms' + '.tif',dpi=600,bbox_inches='tight')
if plotpos == 1:
print('All figures have been saved to '+ my_path)
except NameError:
#Pop up an error window
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Please initialize the atom positions first!")
msg.setWindowTitle("Hey guys")
returnValue = msg.exec()
#==================== Calculate displacement module =================================
#==================== Connected to self.pushButton_13 ===============================
def cal_disp(self):
try:
#Global variables
global U_avg, disp, disp_O, disp_atom
# Read cal_site from the radio button
# 0 to calculate A site in relative to B site; 1 to calculate B site in relative to A site
if self.radioButton.isChecked():
cal_site = 0
if self.radioButton_2.isChecked():
cal_site = 1
cal_110 = img_110 #If the input image is [110], turn this on. O map is not supported for [110] yet.
O_map = find_O #If enabled, will calculate the displacement of O atoms in relation to sublattice B.
U_avg = (Ua + Uc)/2 #Unit cell parameter estimated from the image.
#=========================================================================
#The main scripts start from here
if cal_site == 0:#Calculate A site
disp_atom = 'A-site'
rel_atom = 'B-site'
ap_0 = ap_A.tolist()
ap_1 = ap_B.tolist()
else:
disp_atom = 'B-site'
rel_atom = 'A-site'
ap_0 = ap_B.tolist()
ap_1 = ap_A.tolist()
print('='*50)
print('====Calculate {} in relative to {}===='.format(disp_atom, rel_atom))
ideal_pos, neighbor_pos = find_ideal_pos(ap_0, ap_1, U_avg, scale)
disp = find_displacement(ap_0, ideal_pos, scale)
#Save the displacement data
with open(my_path + title + '-{}-disp.csv'.format(disp_atom),'w') as disp_data:
disp_data.write('x (px), y (px), x disp (px), y disp (px), disp (nm), angle (deg)\n')
for data in disp:
disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5]))
disp_data.write('\n')
#Save the neigboring atoms as well
with open(my_path + 'neighboring atoms.csv','w') as neighbor_data:
for data in neighbor_pos:
n = len(data)
for idx in range(n):
neighbor_data.write('{0}, {1}, '.format(*data[idx]))
neighbor_data.write('\n')
#Calculate O map and save
if O_map == 1:
ap_2 = ap_O.tolist()
ideal_O_pos = find_ideal_O_pos(ap_0, ap_1, U_avg, scale)
disp_O = find_displacement(ap_2, ideal_O_pos, scale)
with open(my_path + title + '-disp_O_by_{}.csv'.format(disp_atom),'w') as disp_data:
disp_data.write('x (px), y (px), x disp (px), y disp (px), disp (nm), angle (deg)\n')
for data in disp_O:
disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5]))
disp_data.write('\n')
print('Atomic displacement data saved to ' + my_path + title + '-disp.csv.')
except NameError:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Please refine the atom positions first!")
msg.setWindowTitle("Hey guys")
returnValue = msg.exec()
#======== Display angle distribution of the vectors module ===========================
#======== Connected to self.pushButton_5 =============================================
def vec_ang_dist(self):
try:
disp_angles = [lst[5] for lst in disp]
global f_vec_ang_dist
f_vec_ang_dist = PlotCanvas()
f_vec_ang_dist.setWindowTitle('Histogram of Displacement Directions')
f_vec_ang_dist.axes.hist(disp_angles, bins=50)
f_vec_ang_dist.axes.set_xlabel('Displacement angles (Degrees)')
f_vec_ang_dist.axes.set_xticks(list(range(0,390,30)))
f_vec_ang_dist.axes.set_ylabel('Frequency')
f_vec_ang_dist.axes.set_title('Put your cursor on the peak(s) to see the\n displacement directions')
f_vec_ang_dist.show()
except NameError:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Please calculate the displacement first!")
msg.setWindowTitle("Hey guys")
returnValue = msg.exec()
print('')
#========= Generate vector map module =============================================
#========= Connected to self.pushButton_6 ===========================================
def show_vec_map(self):
a_len = int(self.lineEdit_2.text())
if self.checkBox_5.isChecked():
s_bar = 1
else:
s_bar = 0
try:
# Read from lineEdits:
ang_lst = str(self.lineEdit_4.text()).split() #A list of displacement directions. This is used to determine the coloring pattern. For single color rendering, just leave it as [0].
ang_lst = [int(a) for a in ang_lst]
color_lst = str(self.lineEdit_5.text()).split()
#====Plot====
disp_color = set_arrow_color(disp, ang_lst, color_lst)
global f_vec_map
f_vec_map = PlotCanvas()
f_vec_map.setWindowTitle('VecMap0.1: Vector Map')
f_vec_map.axes.imshow(image)
f_vec_map.axes.set_axis_off()
for vec in disp_color:
f_vec_map.axes.arrow(vec[0],vec[1],vec[2]*a_len,vec[3]*a_len,color=vec[6], linewidth=1, head_width=a_len/3, head_length=a_len/3)
#Add a scale bar
if s_bar == 1:
scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2)
f_vec_map.axes.add_artist(scalebar)
f_vec_map.show()
f_vec_map.fig.savefig(my_path + title + "_{}_vec_map.tif".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True)
print('The vector map has been saved to ' + my_path + title + "_{}_vec_map.tif! Enjoy!".format(disp_atom))
except NameError:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Please calculate the displacement first!")
msg.setWindowTitle("Hey guys")
returnValue = msg.exec()
except IndexError:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("The list of colors should match the list of angles!")
msg.setWindowTitle("Hey guys")
returnValue = msg.exec()
#========= Generate O vector map module =============================================
#========= Connected to self.pushButton_14 ===========================================
def show_O_vec_map(self):
O_len = int(self.lineEdit_3.text())
if self.checkBox_5.isChecked():
s_bar = 1
else:
s_bar = 0
try:
global f_vec_map_O
f_vec_map_O = PlotCanvas()
f_vec_map_O.setWindowTitle('VecMap0.1: Vector Map of Oxygen atoms')
f_vec_map_O.axes.imshow(image)
f_vec_map_O.axes.set_axis_off()
for vec in disp_O:
f_vec_map_O.axes.arrow(vec[0],vec[1],vec[2]*O_len,vec[3]*O_len,color='red',linewidth=1,head_width=O_len/3,head_length=O_len/3)
#Add a scale bar
if s_bar == 1:
scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2)
f_vec_map_O.axes.add_artist(scalebar)
f_vec_map_O.show()
f_vec_map_O.fig.savefig(my_path + title + "_O_vec_map_by_{}.tif".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True)
print('The O vector map has been saved to ' + my_path + title + "_O_vec_map_by_{}.tif! Enjoy!".format(disp_atom))
except NameError:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("No O displacement data exist!")
msg.setWindowTitle("Hey guys")
returnValue = msg.exec()
#============ Load displacement from csv module ====================================
#============ Connected to self.pushButton_7 =======================================
def load_from_csv(self):
# Load displacement data from the csv file saved previously
global s, my_path, title, scale, units, disp, disp_O, image, disp_atom
openfile_name = QFileDialog.getOpenFileName(self,'Select the displacement data','','CSV (*.csv);;All Files (*)')
file = openfile_name[0]
if file:
my_path = getDirectory(file,'/')
s = readImage(my_path + 'Original image.hspy')
title = s.metadata.General.title
scale = s.axes_manager[0].scale
units = s.axes_manager[0].units
image = s.data
disp = load_disp_data_from_csv(file)
# Look for the O data
disp_atom = file[-15:-9]
file_O_disp = my_path + title + '-disp_O_by_' + disp_atom + '.csv'
if os.path.isfile(file_O_disp):
disp_O = load_disp_data_from_csv(file_O_disp)
find_O = 1
print('Found O displacement data!')
else:
find_O = 0
print('No O displacement data was found! Will do {} atom displacement only!'.format(disp_atom))
#============ Disclaimer button ====================================================
#============ Connected to self.pushButton_8 =======================================
def disclaimer(self):
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("<b>Disclaimer</b><br>" \
"This app was designed by Dr <NAME>. Redistribution and use in source, " \
"with or without modification, are permitted. Any redistribution must remain "\
"the above copyright. When a scientific publication is reached through the "\
"app, please add the following reference: <br>"\
"1. Ma, T. et al. <a href=\"https://doi.org/10.1103/PhysRevLett.123.217602\">Phys. Rev. Lett. 123, 217602 (2019).</a>"\
"<br>"\
"2. Ma, T. et al. <a href=\"https://doi.org/10.1063/1.5115039\">Appl. Phys. Lett. 115, 122902 (2019).</a>"
"<br>" \
"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND.<br>")
msg.setWindowTitle("VecMap0.1: Disclaimer")
def disclaimerButtonClick():
msg = QMessageBox()
msg.setText('Thanks for using VecMap')
msg.setWindowTitle('Thank you!')
returnValue = msg.exec()
msg.buttonClicked.connect(disclaimerButtonClick)
returnValue = msg.exec()
#============ About button ====================================================
#============ Connected to self.pushButton_9 =======================================
def show_about(self):
msg = QMessageBox()
# msg.setIcon(QMessageBox.Information)
msg.setText("VecMap v0.1.1"\
"<br>"\
"Designed by Dr. <NAME>"\
"<br>"\
"06/13/2020"\
"<br>"
"First version release!<br>"
"Get more information and<br> source code from my <a href=\"http://www-personal.umich.edu/~taoma/VectorMap.html\">website</a>.")
msg.setWindowTitle("VecMap0.1: About")
returnValue = msg.exec()
#============ Acknowledgments button ====================================================
#============ Connected to self.pushButton_10 =======================================
def acknowledgments(self):
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("This program was written with Python 3. The author " \
"acknowledges the HyperSpy and Atomap packages which "\
"are partially incorporated in the program. Please "\
"consider citing/adding acknowledgement for Hyperspy "\
"and Atomap packages in your publication:"\
"<br>"
"<NAME> la et al. <a href=\"http://doi.org/10.5281/zenodo.3396791\">hyperspy/hyperspy: HyperSpy v1.5.2 (2019).</a>" \
"<br>"
"<NAME>. et al. <a href=\"https://doi.org/10.1186/s40679-017-0042-5\">Adv. Struct. Chem. Imaging 3, 9 (2017).</a>")
msg.setWindowTitle("VecMap0.1: Acknowledgments")
returnValue = msg.exec()
#============ Contact button ====================================================
#============ Connected to self.pushButton_11 =======================================
def show_contact(self):
msg = QMessageBox()
msg.setText("Ask questions and report bugs to:"\
"<br>"
"<a href=\"mailto:<EMAIL>\"><EMAIL></a>")
msg.setWindowTitle("VecMap0.1: Contact")
returnValue = msg.exec()
#============ Donate me button ====================================================
#============ Connected to self.pushButton_12 =======================================
def donate(self):
msg = QMessageBox()
msg.setText("I will make this app freely available for the society.<br>"\
"If you like this app, show your appreciation by <a href=\"https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=NQTP8WZX9VDRQ¤cy_code=USD&source=url\">donating me!</a>"\
"<br>"\
"Your support is my motivation!<br>")
msg.setWindowTitle("VecMap0.1: Donate me!")
returnValue = msg.exec()
#=========== Define figure canvas ===================================================
class PlotCanvas(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('VecMap0.1: Plot')
self.create_main_frame()
def create_main_frame(self):
self.main_frame = QWidget()
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure((5.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
# Since we have only one plot, we can use add_axes
# instead of add_subplot, but then the subplot
# configuration tool in the navigation toolbar wouldn't
# work.
#
self.axes = self.fig.add_subplot(111)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
vbox = QVBoxLayout()
vbox.addWidget(self.mpl_toolbar)
vbox.addWidget(self.canvas)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
#==================== Find separation canvas =========================================
class SeparationCanvas(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('VecMap0.1: Find separation factors')
self.create_main_frame()
def create_main_frame(self):
self.main_frame = QWidget()
# Create the mpl Figure and FigCanvas objects.
# 10x10 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure((10.0, 10.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
# Add a 9x9 axes layout
#
self.axes = [self.fig.add_subplot(3,3,n) for n in range(1,10)]
self.fig.set_tight_layout(True)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
vbox = QVBoxLayout()
vbox.addWidget(self.mpl_toolbar)
vbox.addWidget(self.canvas)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
#==================== Modules and helper functions ===================================
from hyperspy.io import load
from atomap.atom_finding_refining import get_atom_positions
from atomap.sublattice import Sublattice
from atomap.tools import remove_atoms_from_image_using_2d_gaussian
import os
import numpy as np
import matplotlib.pyplot as plt
import math
import copy
from scipy.spatial import distance
from matplotlib_scalebar.scalebar import ScaleBar
#====Helper functions, do not change====
def readImage(file):
#Load raw image file for process.
#Require Hyperspy package
s = load(file)
return s
def getDirectory(file, s='.'):
#Make the working directory and return the path.
for idx in range(-1, -len(file), -1):
if file[idx] == s: #find the file extension and remove it. '/' for parent path
path = file[:idx] + '/'
return path
def find_atom(img, ini_pos, atom_name, atom_color='r'):
#Refine atom positions for a sublattice
#img: an array of image data; ini_pos: initial positions; atom_name: a string for name; atom_color: a string for color
#img_110: For [110] image
sublattice = Sublattice(ini_pos, image=img, color=atom_color, name=atom_name)
sublattice.find_nearest_neighbors()
sublattice.refine_atom_positions_using_center_of_mass(show_progressbar=False)
sublattice.refine_atom_positions_using_2d_gaussian(show_progressbar=False)
return sublattice #Return an atomap sublattice object
def find_neighboring_atoms(P, A, Ua, tol=1.2):
# Define a function to find the neighboring atoms of P(x,y) from a list of atoms A.
# P:a given atom (x,y); A: a list of atoms; Ua: A threashold in px, 0.707*a for [001] and 0.5*a for [110]
x, y = P
N = [a for a in A if (a[0]-x)**2 + (a[1]-y)**2 < (Ua * tol) **2] #A list to store the neighboring atoms
N = sorted(N, key=lambda x: (x[0] ** 2 + x[1] ** 2) ** 0.5)
return N
def closest_node(node, nodes):
#A function to find the closest node in an array
closest_index = distance.cdist([node], nodes).argmin()
return closest_index,nodes[closest_index]
def line(p1, p2):
#Find a line function from two points
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C
def intersection(L1, L2):
#A function to find the intersection point of two lines
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x,y
else:
return False
def math_center(a, b, c, d):
#Define a function to find the mathematical center of four points, a, b, c, d
#Find the diagonal of a
M = [b,c,d]
diag_idx = distance.cdist([a],M).argmax()
L1 = line(a,M[diag_idx])
del M[diag_idx]
L2 = line(M[0],M[1])
center = intersection(L1, L2)
return center
def find_ideal_pos(A, B, Ua, scale, img_110=False):
#calculate the ideal atomic positions for A in a un-distorted perovskite structure
#A, B are lists of atom coordinates; Ua is the estimated lattice paramter in nm; scale is the image pixel size
#return a list of tuples
ideal_positions = []
Neighbor_positions = []
if not img_110: #calculate image [001]
for atom in A:
Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707)
if len(Neighbor) == 4:
ap_center = math_center(*Neighbor)
ideal_positions.append(ap_center)
Neighbor_positions.append(Neighbor) #Save neighbors for plotting
return ideal_positions, Neighbor_positions
for atom in A:
Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.5)
if len(Neighbor) == 2:
ap_center = ((Neighbor[0][0]+Neighbor[1][0])/2,(Neighbor[0][1]+Neighbor[1][1])/2)
ideal_positions.append(ap_center)
Neighbor_positions.append(Neighbor)
return ideal_positions, Neighbor_positions
def find_ideal_O_pos(A, B, Ua, scale):
#calculate the ideal atomic positions for O in a un-distorted perovskite structure
#only support [001] images
ideal_O_positions = []
for atom in A:
Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707)
if len(Neighbor) == 4:
n_0 = Neighbor.pop(0)
n_1 = Neighbor.pop(closest_node(n_0, Neighbor)[0])
n_2 = Neighbor.pop(closest_node(n_0,Neighbor)[0])
n_3 = Neighbor.pop()
o_0 = (n_0[0] + n_1[0]) / 2, (n_0[1] + n_1[1]) / 2
ideal_O_positions.append(o_0)
o_1 = (n_0[0] + n_2[0]) / 2, (n_0[1] + n_2[1]) / 2
ideal_O_positions.append(o_1)
o_2 = (n_1[0] + n_3[0]) / 2, (n_1[1] + n_3[1]) / 2
ideal_O_positions.append(o_2)
o_3 = (n_2[0] + n_3[0]) / 2, (n_2[1] + n_3[1]) / 2
ideal_O_positions.append(o_3)
ideal_O_positions = list(dict.fromkeys(ideal_O_positions))
return ideal_O_positions
def find_displacement(A, A_com, scale):
#find atomic displacement of A
#A_com, A are lists of atom coordinates; Ua is the estimated lattice paramter in nm; scale is the image pixel size
disp = []
for atom in A_com:
arrow_end = closest_node(atom,A)[1]
vec_len = distance.euclidean(arrow_end,atom)
if vec_len > 0.14 / scale:
continue
dx = arrow_end[0]-atom[0]
dy = arrow_end[1]-atom[1]
#calculate the displacement vector angle according to dx, dy.
if dy >= 0 and dx >= 0:
vec_ang = math.degrees(math.atan(dy/dx))
elif dy >= 0 and dx < 0:
vec_ang = math.degrees(math.atan(dy/dx)) + 180
elif dx < 0 and dy < 0:
vec_ang = math.degrees(math.atan(dy/dx)) + 180
else:
vec_ang = 360 + math.degrees(math.atan(dy/dx))
disp.append([atom[0], atom[1], dx, dy, scale*1000*vec_len, vec_ang])
return disp
def set_arrow_color(vec_data, ang_lst, color_lst):
color_lst = color_lst
vec_data_color = copy.deepcopy(vec_data) #Make a copy so it does not modify the original list
if len(ang_lst) == 1:
for vec in vec_data_color:
vec.append(color_lst[0]) #set yellow for single-color rendering
return vec_data_color
ang_lst_mod = [a - ang_lst[0] for a in ang_lst]
ang_bond = []
for idx in range(len(ang_lst_mod)-1):
ang_bond.append((ang_lst_mod[idx + 1] - ang_lst_mod[idx]) // 2 + ang_lst_mod[idx])
ang_bond.append((360 - ang_lst_mod[-1]) // 2 + ang_lst_mod[-1])
for vec in vec_data_color:
ang = vec[5] - ang_lst[0]
if ang < 0:
ang = ang + 360
for i in range(len(ang_bond)-1):
if round(ang) in range(ang_bond[i], ang_bond[i+1]):
vec.append(color_lst[i+1])
for vec in vec_data_color:
if len(vec) == 6:
vec.append(color_lst[0])
return vec_data_color
def load_disp_data_from_csv(file):
with open(file,'r') as disp:
disp_data = []
lines = disp.readlines()
print('Displacement data:\n')
print(lines[0])
for lin in lines[1:]:
lin_data = lin.strip().split(', ')
disp_data.append([float(data) for data in lin_data])
return disp_data
#====Application entry==================================
def main():
print('='*50)
print('''
Welcome to the first version of VecMap
--- a convenient tool to calculate atomic displacements in perovskite structures
This app was designed by Dr. <NAME>.
Address your questions and suggestions to <EMAIL>.
Please see the "Disclaimer" before use!
Hope you get good results and publications from it!
Version 0.1.1 06/13/2020
''')
print('='*50)
import sys
app = QtWidgets.QApplication(sys.argv)
VecMap = QtWidgets.QWidget()
ui = Ui_VecMap()
ui.setupUi(VecMap)
VecMap.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
[
"PyQt5.QtWidgets.QPushButton",
"os.path.isfile",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"atomap.tools.remove_atoms_from_image_using_2d_gaussian",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QRadioButton",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"scipy.spatial.distance.euclidean",
"PyQt5.QtWidgets.QCheckBox",
"atomap.sublattice.Sublattice",
"os.path.exists",
"matplotlib.figure.Figure",
"scipy.spatial.distance.cdist",
"numpy.divide",
"copy.deepcopy",
"PyQt5.QtWidgets.QFrame",
"PyQt5.QtCore.QRect",
"math.sqrt",
"numpy.asarray",
"hyperspy.io.load",
"numpy.float",
"atomap.atom_finding_refining.get_atom_positions",
"matplotlib.use",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"matplotlib_scalebar.scalebar.ScaleBar",
"math.atan",
"os.makedirs",
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtCore.QSize",
"PyQt5.QtGui.QFont"
] |
[((192, 216), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (206, 216), False, 'import matplotlib\n'), ((48135, 48145), 'hyperspy.io.load', 'load', (['file'], {}), '(file)\n', (48139, 48145), False, 'from hyperspy.io import load\n'), ((48725, 48789), 'atomap.sublattice.Sublattice', 'Sublattice', (['ini_pos'], {'image': 'img', 'color': 'atom_color', 'name': 'atom_name'}), '(ini_pos, image=img, color=atom_color, name=atom_name)\n', (48735, 48789), False, 'from atomap.sublattice import Sublattice\n'), ((53787, 53810), 'copy.deepcopy', 'copy.deepcopy', (['vec_data'], {}), '(vec_data)\n', (53800, 53810), False, 'import copy\n'), ((55699, 55731), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (55721, 55731), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((55746, 55765), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (55763, 55765), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((857, 886), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (878, 886), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1032, 1059), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['VecMap'], {}), '(VecMap)\n', (1051, 1059), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1197, 1221), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['VecMap'], {}), '(VecMap)\n', (1213, 1221), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1463, 1487), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (1479, 1487), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1618, 1642), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (1634, 1642), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1924, 1951), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['VecMap'], {}), '(VecMap)\n', (1943, 1951), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2092, 2116), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (2108, 2116), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2254, 2278), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (2270, 2278), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2421, 2450), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (2442, 2450), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2607, 2636), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (2628, 2636), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2788, 2812), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (2804, 2812), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3094, 3118), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (3110, 3118), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3399, 3423), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['VecMap'], {}), '(VecMap)\n', (3415, 3423), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3678, 3702), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (3694, 3702), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3843, 3870), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['VecMap'], {}), '(VecMap)\n', (3862, 3870), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4020, 4047), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['VecMap'], {}), '(VecMap)\n', (4039, 4047), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4200, 4229), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (4221, 4229), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4382, 4406), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (4398, 4406), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4697, 4724), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['VecMap'], {}), '(VecMap)\n', (4716, 4724), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4870, 4894), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['VecMap'], {}), '(VecMap)\n', (4886, 4894), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5150, 5174), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (5166, 5174), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5316, 5340), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (5332, 5340), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5483, 5507), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (5499, 5507), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5651, 5678), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['VecMap'], {}), '(VecMap)\n', (5670, 5678), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5826, 5850), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (5842, 5850), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5992, 6016), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (6008, 6016), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6158, 6182), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (6174, 6182), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6326, 6353), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['VecMap'], {}), '(VecMap)\n', (6345, 6353), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6505, 6534), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (6526, 6534), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6693, 6722), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (6714, 6722), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6875, 6899), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (6891, 6899), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7192, 7221), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (7213, 7221), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7373, 7397), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['VecMap'], {}), '(VecMap)\n', (7389, 7397), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7657, 7686), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (7678, 7686), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7840, 7864), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (7856, 7864), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8006, 8030), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (8022, 8030), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8176, 8205), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (8197, 8205), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8365, 8394), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (8386, 8394), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8556, 8585), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (8577, 8585), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8748, 8777), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (8769, 8777), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8867, 8880), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (8878, 8880), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9066, 9096), 'PyQt5.QtWidgets.QRadioButton', 'QtWidgets.QRadioButton', (['VecMap'], {}), '(VecMap)\n', (9088, 9096), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9294, 9324), 'PyQt5.QtWidgets.QRadioButton', 'QtWidgets.QRadioButton', (['VecMap'], {}), '(VecMap)\n', (9316, 9324), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9480, 9504), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (9496, 9504), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9651, 9680), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (9672, 9680), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9836, 9860), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (9852, 9860), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10000, 10027), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['VecMap'], {}), '(VecMap)\n', (10019, 10027), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10179, 10208), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (10200, 10208), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10367, 10394), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['VecMap'], {}), '(VecMap)\n', (10386, 10394), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10542, 10566), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (10558, 10566), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10710, 10737), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['VecMap'], {}), '(VecMap)\n', (10729, 10737), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10950, 10995), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['VecMap'], {}), '(VecMap)\n', (10987, 10995), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((45486, 45518), 'matplotlib.figure.Figure', 'Figure', (['(5.0, 4.0)'], {'dpi': 'self.dpi'}), '((5.0, 4.0), dpi=self.dpi)\n', (45492, 45518), False, 'from matplotlib.figure import Figure\n'), ((45542, 45564), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['self.fig'], {}), '(self.fig)\n', (45554, 45564), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((45989, 46036), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self.canvas', 'self.main_frame'], {}), '(self.canvas, self.main_frame)\n', (46006, 46036), True, 'from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\n'), ((46824, 46858), 'matplotlib.figure.Figure', 'Figure', (['(10.0, 10.0)'], {'dpi': 'self.dpi'}), '((10.0, 10.0), dpi=self.dpi)\n', (46830, 46858), False, 'from matplotlib.figure import Figure\n'), ((46882, 46904), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['self.fig'], {}), '(self.fig)\n', (46894, 46904), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((47230, 47277), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self.canvas', 'self.main_frame'], {}), '(self.canvas, self.main_frame)\n', (47247, 47277), True, 'from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\n'), ((53002, 53037), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['arrow_end', 'atom'], {}), '(arrow_end, atom)\n', (53020, 53037), False, 'from scipy.spatial import distance\n'), ((749, 771), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(402)', '(836)'], {}), '(402, 836)\n', (761, 771), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((804, 828), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (816, 828), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((924, 952), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(40)', '(91)', '(41)'], {}), '(20, 40, 91, 41)\n', (936, 952), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1095, 1125), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(150)', '(10)', '(111)', '(20)'], {}), '(150, 10, 111, 20)\n', (1107, 1125), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1253, 1282), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(90)', '(371)', '(21)'], {}), '(20, 90, 371, 21)\n', (1265, 1282), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1520, 1549), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(10)', '(121)', '(16)'], {}), '(20, 10, 121, 16)\n', (1532, 1549), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1677, 1707), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(130)', '(40)', '(251)', '(51)'], {}), '(130, 40, 251, 51)\n', (1689, 1707), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1987, 2017), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(130)', '(130)', '(30)', '(20)'], {}), '(130, 130, 30, 20)\n', (1999, 2017), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2151, 2181), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(110)', '(191)', '(16)'], {}), '(20, 110, 191, 16)\n', (2163, 2181), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2313, 2343), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(130)', '(111)', '(16)'], {}), '(20, 130, 111, 16)\n', (2325, 2343), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2490, 2519), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(170)', '(91)', '(41)'], {}), '(20, 170, 91, 41)\n', (2502, 2519), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2676, 2705), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(230)', '(91)', '(41)'], {}), '(20, 230, 91, 41)\n', (2688, 2705), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2847, 2878), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(130)', '(160)', '(251)', '(51)'], {}), '(130, 160, 251, 51)\n', (2859, 2878), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3153, 3184), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(130)', '(230)', '(251)', '(51)'], {}), '(130, 230, 251, 51)\n', (3165, 3184), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3457, 3487), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(280)', '(371)', '(21)'], {}), '(20, 280, 371, 21)\n', (3469, 3487), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3737, 3767), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(300)', '(191)', '(16)'], {}), '(20, 300, 191, 16)\n', (3749, 3767), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3908, 3938), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(330)', '(111)', '(20)'], {}), '(20, 330, 111, 20)\n', (3920, 3938), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4085, 4116), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(150)', '(330)', '(131)', '(20)'], {}), '(150, 330, 131, 20)\n', (4097, 4116), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4269, 4298), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(370)', '(91)', '(41)'], {}), '(20, 370, 91, 41)\n', (4281, 4298), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4442, 4473), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(130)', '(360)', '(251)', '(51)'], {}), '(130, 360, 251, 51)\n', (4454, 4473), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4762, 4792), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(260)', '(10)', '(111)', '(20)'], {}), '(260, 10, 111, 20)\n', (4774, 4792), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4928, 4958), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(420)', '(371)', '(21)'], {}), '(20, 420, 371, 21)\n', (4940, 4958), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5210, 5240), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(440)', '(191)', '(16)'], {}), '(20, 440, 191, 16)\n', (5222, 5240), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5376, 5407), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(170)', '(130)', '(191)', '(16)'], {}), '(170, 130, 191, 16)\n', (5388, 5407), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5543, 5573), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(510)', '(381)', '(16)'], {}), '(20, 510, 381, 16)\n', (5555, 5573), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5716, 5746), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(550)', '(251)', '(22)'], {}), '(20, 550, 251, 22)\n', (5728, 5746), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5886, 5916), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(530)', '(181)', '(16)'], {}), '(20, 530, 181, 16)\n', (5898, 5916), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6052, 6082), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(580)', '(381)', '(16)'], {}), '(20, 580, 381, 16)\n', (6064, 6082), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6218, 6248), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(600)', '(181)', '(16)'], {}), '(20, 600, 181, 16)\n', (6230, 6248), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6391, 6421), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(620)', '(251)', '(22)'], {}), '(20, 620, 251, 22)\n', (6403, 6421), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6574, 6605), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(280)', '(550)', '(101)', '(91)'], {}), '(280, 550, 101, 91)\n', (6586, 6605), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6762, 6791), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(680)', '(80)', '(41)'], {}), '(20, 680, 80, 41)\n', (6774, 6791), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6935, 6966), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(200)', '(680)', '(191)', '(51)'], {}), '(200, 680, 191, 51)\n', (6947, 6966), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7261, 7291), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(290)', '(460)', '(91)', '(51)'], {}), '(290, 460, 91, 51)\n', (7273, 7291), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7431, 7461), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(730)', '(371)', '(21)'], {}), '(20, 730, 371, 21)\n', (7443, 7461), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7726, 7756), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(780)', '(120)', '(28)'], {}), '(20, 780, 120, 28)\n', (7738, 7756), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7900, 7930), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(60)', '(850)', '(291)', '(16)'], {}), '(60, 850, 291, 16)\n', (7912, 7930), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8066, 8096), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(750)', '(211)', '(16)'], {}), '(20, 750, 211, 16)\n', (8078, 8096), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8245, 8276), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(150)', '(780)', '(120)', '(28)'], {}), '(150, 780, 120, 28)\n', (8257, 8276), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8435, 8465), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(810)', '(120)', '(28)'], {}), '(20, 810, 120, 28)\n', (8447, 8465), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8626, 8657), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(150)', '(810)', '(120)', '(28)'], {}), '(150, 810, 120, 28)\n', (8638, 8657), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8818, 8849), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(280)', '(780)', '(101)', '(58)'], {}), '(280, 780, 101, 58)\n', (8830, 8849), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9135, 9164), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(480)', '(95)', '(20)'], {}), '(20, 480, 95, 20)\n', (9147, 9164), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9365, 9394), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(90)', '(480)', '(95)', '(20)'], {}), '(90, 480, 95, 20)\n', (9377, 9394), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9540, 9570), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(460)', '(171)', '(16)'], {}), '(20, 460, 171, 16)\n', (9552, 9570), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9721, 9751), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(200)', '(460)', '(81)', '(51)'], {}), '(200, 460, 81, 51)\n', (9733, 9751), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9895, 9924), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(650)', '(41)', '(16)'], {}), '(20, 650, 41, 16)\n', (9907, 9924), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10065, 10094), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(60)', '(650)', '(30)', '(20)'], {}), '(60, 650, 30, 20)\n', (10077, 10094), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10249, 10279), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(110)', '(680)', '(80)', '(41)'], {}), '(110, 680, 80, 41)\n', (10261, 10279), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10432, 10462), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(150)', '(650)', '(30)', '(20)'], {}), '(150, 650, 30, 20)\n', (10444, 10462), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10602, 10632), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(110)', '(650)', '(41)', '(16)'], {}), '(110, 650, 41, 16)\n', (10614, 10632), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10775, 10806), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(210)', '(650)', '(111)', '(20)'], {}), '(210, 650, 111, 20)\n', (10787, 10806), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((18589, 18626), 'atomap.atom_finding_refining.get_atom_positions', 'get_atom_positions', (['s'], {'separation': 'sep'}), '(s, separation=sep)\n', (18607, 18626), False, 'from atomap.atom_finding_refining import get_atom_positions\n'), ((24648, 24751), 'atomap.tools.remove_atoms_from_image_using_2d_gaussian', 'remove_atoms_from_image_using_2d_gaussian', (['sublattice_A.image', 'sublattice_A'], {'show_progressbar': '(False)'}), '(sublattice_A.image, sublattice_A,\n show_progressbar=False)\n', (24689, 24751), False, 'from atomap.tools import remove_atoms_from_image_using_2d_gaussian\n'), ((40146, 40173), 'os.path.isfile', 'os.path.isfile', (['file_O_disp'], {}), '(file_O_disp)\n', (40160, 40173), False, 'import os\n'), ((49616, 49645), 'scipy.spatial.distance.cdist', 'distance.cdist', (['[node]', 'nodes'], {}), '([node], nodes)\n', (49630, 49645), False, 'from scipy.spatial import distance\n'), ((50372, 50394), 'scipy.spatial.distance.cdist', 'distance.cdist', (['[a]', 'M'], {}), '([a], M)\n', (50386, 50394), False, 'from scipy.spatial import distance\n'), ((17353, 17376), 'os.path.exists', 'os.path.exists', (['my_path'], {}), '(my_path)\n', (17367, 17376), False, 'import os\n'), ((17395, 17415), 'os.makedirs', 'os.makedirs', (['my_path'], {}), '(my_path)\n', (17406, 17415), False, 'import os\n'), ((17828, 17848), 'numpy.divide', 'np.divide', (['(1)', 's.data'], {}), '(1, s.data)\n', (17837, 17848), True, 'import numpy as np\n'), ((21678, 21720), 'atomap.atom_finding_refining.get_atom_positions', 'get_atom_positions', (['s'], {'separation': 's_factor'}), '(s, separation=s_factor)\n', (21696, 21720), False, 'from atomap.atom_finding_refining import get_atom_positions\n'), ((24009, 24043), 'math.sqrt', 'math.sqrt', (['(z0[0] ** 2 + z0[1] ** 2)'], {}), '(z0[0] ** 2 + z0[1] ** 2)\n', (24018, 24043), False, 'import math\n'), ((24066, 24100), 'math.sqrt', 'math.sqrt', (['(z1[0] ** 2 + z1[1] ** 2)'], {}), '(z1[0] ** 2 + z1[1] ** 2)\n', (24075, 24100), False, 'import math\n'), ((25433, 25507), 'atomap.sublattice.Sublattice', 'Sublattice', (['AB_positions'], {'image': 's.data', 'color': '"""y"""', 'name': '"""Sublattice A + B"""'}), "(AB_positions, image=s.data, color='y', name='Sublattice A + B')\n", (25443, 25507), False, 'from atomap.sublattice import Sublattice\n'), ((25997, 26100), 'atomap.tools.remove_atoms_from_image_using_2d_gaussian', 'remove_atoms_from_image_using_2d_gaussian', (['sublattice_B.image', 'sublattice_B'], {'show_progressbar': '(False)'}), '(sublattice_B.image, sublattice_B,\n show_progressbar=False)\n', (26038, 26100), False, 'from atomap.tools import remove_atoms_from_image_using_2d_gaussian\n'), ((36556, 36624), 'matplotlib_scalebar.scalebar.ScaleBar', 'ScaleBar', (['scale', '"""nm"""'], {'location': '"""lower left"""', 'scale_loc': '"""top"""', 'sep': '(2)'}), "(scale, 'nm', location='lower left', scale_loc='top', sep=2)\n", (36564, 36624), False, 'from matplotlib_scalebar.scalebar import ScaleBar\n'), ((38420, 38488), 'matplotlib_scalebar.scalebar.ScaleBar', 'ScaleBar', (['scale', '"""nm"""'], {'location': '"""lower left"""', 'scale_loc': '"""top"""', 'sep': '(2)'}), "(scale, 'nm', location='lower left', scale_loc='top', sep=2)\n", (38428, 38488), False, 'from matplotlib_scalebar.scalebar import ScaleBar\n'), ((53302, 53320), 'math.atan', 'math.atan', (['(dy / dx)'], {}), '(dy / dx)\n', (53311, 53320), False, 'import math\n'), ((19206, 19227), 'numpy.float', 'np.float', (['event.xdata'], {}), '(event.xdata)\n', (19214, 19227), True, 'import numpy as np\n'), ((19253, 19274), 'numpy.float', 'np.float', (['event.ydata'], {}), '(event.ydata)\n', (19261, 19274), True, 'import numpy as np\n'), ((19370, 19422), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['(x, y)', 'A_positions[atom_nearby]'], {}), '((x, y), A_positions[atom_nearby])\n', (19388, 19422), False, 'from scipy.spatial import distance\n'), ((19700, 19720), 'numpy.asarray', 'np.asarray', (['atom_lst'], {}), '(atom_lst)\n', (19710, 19720), True, 'import numpy as np\n'), ((19727, 19747), 'numpy.asarray', 'np.asarray', (['atom_lst'], {}), '(atom_lst)\n', (19737, 19747), True, 'import numpy as np\n'), ((21806, 21830), 'numpy.asarray', 'np.asarray', (['ini_position'], {}), '(ini_position)\n', (21816, 21830), True, 'import numpy as np\n'), ((21837, 21861), 'numpy.asarray', 'np.asarray', (['ini_position'], {}), '(ini_position)\n', (21847, 21861), True, 'import numpy as np\n'), ((53391, 53409), 'math.atan', 'math.atan', (['(dy / dx)'], {}), '(dy / dx)\n', (53400, 53409), False, 'import math\n'), ((53484, 53502), 'math.atan', 'math.atan', (['(dy / dx)'], {}), '(dy / dx)\n', (53493, 53502), False, 'import math\n'), ((53565, 53583), 'math.atan', 'math.atan', (['(dy / dx)'], {}), '(dy / dx)\n', (53574, 53583), False, 'import math\n')]
|
import numpy as np
def count_subset_occurrences(array, subset_array):
occurrences = 0
for idx in range(len(array) - len(subset_array) + 1):
if np.array_equal(array[idx:(idx + len(subset_array))], subset_array):
occurrences += 1
return occurrences
def test_base_case():
assert count_subset_occurrences(
np.array([0, 1, 1, 1, 2, 2, 2, 1, 1, 3, 3, 3]),
np.array([1, 1])
) == 3
test_base_case()
|
[
"numpy.array"
] |
[((348, 394), 'numpy.array', 'np.array', (['[0, 1, 1, 1, 2, 2, 2, 1, 1, 3, 3, 3]'], {}), '([0, 1, 1, 1, 2, 2, 2, 1, 1, 3, 3, 3])\n', (356, 394), True, 'import numpy as np\n'), ((405, 421), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (413, 421), True, 'import numpy as np\n')]
|
from .. import db
from flask_login import UserMixin, login_manager, LoginManager
from werkzeug.security import generate_password_hash, check_password_hash
from .. import login_manager
from datetime import date, datetime
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key = True)
firstname = db.Column(db.String(255))
secondname = db.Column(db.String(255))
username = db.Column(db.String(255),unique = True)
email = db.Column(db.String(255), unique = True, index = True)
profile_picture = db.Column(db.String())
profile_bio = db.Column(db.String(255))
secured_password = db.Column(db.String(255))
blog_posts_by_me = db.relationship('Blog', backref = 'myblogposts', lazy = 'dynamic')
blog_comments_by_me = db.relationship('BlogComment', backref = 'myblogcomments', lazy = 'dynamic')
@property
def password(self):
raise AttributeError('You cannot view a users password')
@password.setter
def password(self, password):
self.secured_password = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.secured_password, password)
@classmethod
def save_user(self):
db.session.add(self)
db.session.commit()
|
[
"werkzeug.security.check_password_hash",
"werkzeug.security.generate_password_hash"
] |
[((1149, 1181), 'werkzeug.security.generate_password_hash', 'generate_password_hash', (['password'], {}), '(password)\n', (1171, 1181), False, 'from werkzeug.security import generate_password_hash, check_password_hash\n'), ((1239, 1291), 'werkzeug.security.check_password_hash', 'check_password_hash', (['self.secured_password', 'password'], {}), '(self.secured_password, password)\n', (1258, 1291), False, 'from werkzeug.security import generate_password_hash, check_password_hash\n')]
|
# --------------------------------------------------------
# mcan-vqa (Deep Modular Co-Attention Networks)
# modify this to our VQA dataset
# --------------------------------------------------------
import os
from copy import copy
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib import colors
from cfgs.base_cfgs import Cfgs
from core.exec import Execution
import argparse, yaml
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='MCAN Args')
parser.add_argument('--run', dest='run_mode',
choices=['train', 'val', 'test', 'visualize'],
type=str, default='train')
parser.add_argument('--model', dest='model',
choices=['small', 'large'],
default='small', type=str)
parser.add_argument('--split', dest='train_split',
choices=['train', 'train+val', 'train+val+vg'],
help="set training split, "
"eg.'train', 'train+val+vg'"
"set 'train' can trigger the "
"eval after every epoch",
type=str)
parser.add_argument('--eval_every_epoch', default=False,
help='set True to evaluate the '
'val split when an epoch finished'
"(only work when train with "
"'train' split)",
type=bool)
parser.add_argument('--test_save_pred',
help='set True to save the '
'prediction vectors'
'(only work in testing)',
type=bool)
parser.add_argument('--batch_size', default=1, # was 256
help='batch size during training',
type=int)
parser.add_argument('--max_epoch',
help='max training epoch',
type=int)
parser.add_argument('--preload',
help='pre-load the features into memory'
'to increase the I/O speed',
type=bool)
parser.add_argument('--gpu', default='0,1',
help="gpu select, eg.'0, 1, 2'",
type=str)
parser.add_argument('--seed', default=444,
help='fix random seed',
type=int)
parser.add_argument('--version',
help='version control',
type=str)
parser.add_argument('--resume',
help='resume training',
type=bool)
parser.add_argument('--ckpt_version',
help='checkpoint version',
type=str)
parser.add_argument('--ckpt_epoch',
help='checkpoint epoch',
type=int)
parser.add_argument('--ckpt_path',
help='load checkpoint path, we '
'recommend that you use '
'ckpt_version and ckpt_epoch '
'instead',
type=str)
parser.add_argument('--grad_accu_steps',
help='reduce gpu memory usage',
type=int)
parser.add_argument('--num_workers',
help='multithreaded loading',
type=int)
parser.add_argument('--pin_mem',
help='use pin memory',
type=bool)
parser.add_argument('--verbose',
help='verbose print',
type=bool)
parser.add_argument('--dataset_path',
help='vqav2 dataset root path',
type=str)
parser.add_argument('--feature_path',
help='bottom up features root path',
type=str)
args = parser.parse_args()
return args
def main():
opt = Cfgs()
args = parse_args()
args_dict = opt.parse_to_dict(args)
cfg_file = "cfgs/{}_model.yml".format(args.model)
with open(cfg_file, 'r') as f:
yaml_dict = yaml.load(f, Loader=yaml.FullLoader)
args_dict = {**yaml_dict, **args_dict}
opt.add_args(args_dict)
opt.proc()
print('Hyper Parameters:')
print(opt)
opt.check_path()
execution = Execution(opt)
execution.run(opt.run_mode)
def text_layout():
# compute some interesting data
x0, x1 = -5, 5
y0, y1 = -3, 3
x = np.linspace(x0, x1, 500)
y = np.linspace(y0, y1, 500)
X, Y = np.meshgrid(x, y)
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2)
Z = (Z1 - Z2) * 2
# Set up a colormap:
# use copy so that we do not mutate the global colormap instance
palette = copy(plt.cm.gray)
palette.set_over('r', 1.0)
palette.set_under('g', 1.0)
palette.set_bad('b', 1.0)
# Alternatively, we could use
# palette.set_bad(alpha = 0.0)
# to make the bad region transparent. This is the default.
# If you comment out all the palette.set* lines, you will see
# all the defaults; under and over will be colored with the
# first and last colors in the palette, respectively.
Zm = np.ma.masked_where(Z > 1.2, Z)
# By setting vmin and vmax in the norm, we establish the
# range to which the regular palette color scale is applied.
# Anything above that range is colored based on palette.set_over, etc.
# set up the Axes objects
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(6, 5.4))
# plot using 'continuous' color map
im = ax1.imshow(Zm, interpolation='bilinear',
cmap=palette,
norm=colors.Normalize(vmin=-1.0, vmax=1.0),
aspect='auto',
origin='lower',
extent=[x0, x1, y0, y1])
ax1.set_title('Green=low, Red=high, Blue=masked')
cbar = fig.colorbar(im, extend='both', shrink=0.9, ax=ax1)
cbar.set_label('uniform')
for ticklabel in ax1.xaxis.get_ticklabels():
ticklabel.set_visible(False)
# Plot using a small number of colors, with unevenly spaced boundaries.
im = ax2.imshow(Zm, interpolation='nearest',
cmap=palette,
norm=colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1],
ncolors=palette.N),
aspect='auto',
origin='lower',
extent=[x0, x1, y0, y1])
ax2.set_title('With BoundaryNorm')
cbar = fig.colorbar(im, extend='both', spacing='proportional',
shrink=0.9, ax=ax2)
cbar.set_label('proportional')
fig.suptitle('imshow, with out-of-range and masked data')
f1 = os.path.join(os.getcwd(), f'results/val_imgs/dark_mask.jpg')
plt.savefig(f1)
plt.close()
if __name__ == '__main__':
main()
# text_layout()
|
[
"matplotlib.pyplot.savefig",
"numpy.meshgrid",
"yaml.load",
"argparse.ArgumentParser",
"numpy.ma.masked_where",
"matplotlib.colors.Normalize",
"os.getcwd",
"matplotlib.pyplot.close",
"matplotlib.colors.BoundaryNorm",
"copy.copy",
"numpy.exp",
"numpy.linspace",
"cfgs.base_cfgs.Cfgs",
"matplotlib.pyplot.subplots",
"core.exec.Execution"
] |
[((516, 564), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MCAN Args"""'}), "(description='MCAN Args')\n", (539, 564), False, 'import argparse, yaml\n'), ((4103, 4109), 'cfgs.base_cfgs.Cfgs', 'Cfgs', ([], {}), '()\n', (4107, 4109), False, 'from cfgs.base_cfgs import Cfgs\n'), ((4495, 4509), 'core.exec.Execution', 'Execution', (['opt'], {}), '(opt)\n', (4504, 4509), False, 'from core.exec import Execution\n'), ((4645, 4669), 'numpy.linspace', 'np.linspace', (['x0', 'x1', '(500)'], {}), '(x0, x1, 500)\n', (4656, 4669), True, 'import numpy as np\n'), ((4678, 4702), 'numpy.linspace', 'np.linspace', (['y0', 'y1', '(500)'], {}), '(y0, y1, 500)\n', (4689, 4702), True, 'import numpy as np\n'), ((4714, 4731), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (4725, 4731), True, 'import numpy as np\n'), ((4741, 4765), 'numpy.exp', 'np.exp', (['(-X ** 2 - Y ** 2)'], {}), '(-X ** 2 - Y ** 2)\n', (4747, 4765), True, 'import numpy as np\n'), ((4771, 4807), 'numpy.exp', 'np.exp', (['(-(X - 1) ** 2 - (Y - 1) ** 2)'], {}), '(-(X - 1) ** 2 - (Y - 1) ** 2)\n', (4777, 4807), True, 'import numpy as np\n'), ((4935, 4952), 'copy.copy', 'copy', (['plt.cm.gray'], {}), '(plt.cm.gray)\n', (4939, 4952), False, 'from copy import copy\n'), ((5376, 5406), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(Z > 1.2)', 'Z'], {}), '(Z > 1.2, Z)\n', (5394, 5406), True, 'import numpy as np\n'), ((5662, 5701), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'figsize': '(6, 5.4)'}), '(nrows=2, figsize=(6, 5.4))\n', (5674, 5701), True, 'import matplotlib.pyplot as plt\n'), ((6912, 6927), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f1'], {}), '(f1)\n', (6923, 6927), True, 'import matplotlib.pyplot as plt\n'), ((6932, 6943), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6941, 6943), True, 'import matplotlib.pyplot as plt\n'), ((4285, 4321), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (4294, 4321), False, 'import argparse, yaml\n'), ((6860, 6871), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6869, 6871), False, 'import os\n'), ((5840, 5877), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': '(-1.0)', 'vmax': '(1.0)'}), '(vmin=-1.0, vmax=1.0)\n', (5856, 5877), False, 'from matplotlib import colors\n'), ((6383, 6455), 'matplotlib.colors.BoundaryNorm', 'colors.BoundaryNorm', (['[-1, -0.5, -0.2, 0, 0.2, 0.5, 1]'], {'ncolors': 'palette.N'}), '([-1, -0.5, -0.2, 0, 0.2, 0.5, 1], ncolors=palette.N)\n', (6402, 6455), False, 'from matplotlib import colors\n')]
|
r"""
===========
Transport laws
===========
Create a plot comparing the different transport laws.
"""
import matplotlib.pyplot as plt
import numpy as np
from PyDune.physics.sedtransport import transport_laws as TL
theta = np.linspace(0, 0.4, 1000)
theta_d = 0.035
omega = 8
plt.figure()
plt.plot(theta, TL.quadratic_transport_law(theta, theta_d, omega), label='quadratic transport law')
plt.plot(theta, TL.cubic_transport_law(theta, theta_d, omega), label='cubic transport law')
plt.plot(theta, TL.quartic_transport_law(theta, theta_d), label='cubic transport law')
plt.xlabel(r'Shield number, $\theta$')
plt.ylabel('Non dimensional saturated flux')
plt.legend()
plt.tight_layout()
plt.show()
|
[
"PyDune.physics.sedtransport.transport_laws.quartic_transport_law",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"PyDune.physics.sedtransport.transport_laws.quadratic_transport_law",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"PyDune.physics.sedtransport.transport_laws.cubic_transport_law"
] |
[((226, 251), 'numpy.linspace', 'np.linspace', (['(0)', '(0.4)', '(1000)'], {}), '(0, 0.4, 1000)\n', (237, 251), True, 'import numpy as np\n'), ((280, 292), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (290, 292), True, 'import matplotlib.pyplot as plt\n'), ((572, 610), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Shield number, $\\\\theta$"""'], {}), "('Shield number, $\\\\theta$')\n", (582, 610), True, 'import matplotlib.pyplot as plt\n'), ((611, 655), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Non dimensional saturated flux"""'], {}), "('Non dimensional saturated flux')\n", (621, 655), True, 'import matplotlib.pyplot as plt\n'), ((656, 668), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (666, 668), True, 'import matplotlib.pyplot as plt\n'), ((669, 687), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (685, 687), True, 'import matplotlib.pyplot as plt\n'), ((688, 698), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (696, 698), True, 'import matplotlib.pyplot as plt\n'), ((309, 358), 'PyDune.physics.sedtransport.transport_laws.quadratic_transport_law', 'TL.quadratic_transport_law', (['theta', 'theta_d', 'omega'], {}), '(theta, theta_d, omega)\n', (335, 358), True, 'from PyDune.physics.sedtransport import transport_laws as TL\n'), ((409, 454), 'PyDune.physics.sedtransport.transport_laws.cubic_transport_law', 'TL.cubic_transport_law', (['theta', 'theta_d', 'omega'], {}), '(theta, theta_d, omega)\n', (431, 454), True, 'from PyDune.physics.sedtransport import transport_laws as TL\n'), ((501, 541), 'PyDune.physics.sedtransport.transport_laws.quartic_transport_law', 'TL.quartic_transport_law', (['theta', 'theta_d'], {}), '(theta, theta_d)\n', (525, 541), True, 'from PyDune.physics.sedtransport import transport_laws as TL\n')]
|
from office365.runtime.client_object import ClientObject
from office365.runtime.serviceOperationQuery import ServiceOperationQuery
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.resource_path import ResourcePath
from office365.sharepoint.portal.SPSiteCreationResponse import SPSiteCreationResponse
class SPSiteManager(ClientObject):
def __init__(self, context):
super(SPSiteManager, self).__init__(context, ResourcePath("SPSiteManager"), None)
def create(self, request):
"""Create a modern site"""
response = SPSiteCreationResponse()
qry = ServiceOperationQuery(self, "Create", None, request, "request", response)
self.context.add_query(qry)
return response
def delete(self, site_id):
"""Deletes a SharePoint site"""
payload = {
"siteId": site_id
}
qry = ServiceOperationQuery(self, "Delete", None, payload)
self.context.add_query(qry)
def get_status(self, url):
"""Get the status of a SharePoint site"""
response = SPSiteCreationResponse()
qry = ServiceOperationQuery(self, "Status", None, {'url': url}, None, response)
self.context.add_query(qry)
self.context.get_pending_request().beforeExecute += self._construct_status_request
return response
def _construct_status_request(self, request):
query = self.context.get_pending_request().current_query
request.method = HttpMethod.Get
request.url += "?url='{0}'".format(query.parameter_type['url'])
self.context.get_pending_request().beforeExecute -= self._construct_status_request
|
[
"office365.runtime.resource_path.ResourcePath",
"office365.sharepoint.portal.SPSiteCreationResponse.SPSiteCreationResponse",
"office365.runtime.serviceOperationQuery.ServiceOperationQuery"
] |
[((579, 603), 'office365.sharepoint.portal.SPSiteCreationResponse.SPSiteCreationResponse', 'SPSiteCreationResponse', ([], {}), '()\n', (601, 603), False, 'from office365.sharepoint.portal.SPSiteCreationResponse import SPSiteCreationResponse\n'), ((618, 691), 'office365.runtime.serviceOperationQuery.ServiceOperationQuery', 'ServiceOperationQuery', (['self', '"""Create"""', 'None', 'request', '"""request"""', 'response'], {}), "(self, 'Create', None, request, 'request', response)\n", (639, 691), False, 'from office365.runtime.serviceOperationQuery import ServiceOperationQuery\n'), ((898, 950), 'office365.runtime.serviceOperationQuery.ServiceOperationQuery', 'ServiceOperationQuery', (['self', '"""Delete"""', 'None', 'payload'], {}), "(self, 'Delete', None, payload)\n", (919, 950), False, 'from office365.runtime.serviceOperationQuery import ServiceOperationQuery\n'), ((1088, 1112), 'office365.sharepoint.portal.SPSiteCreationResponse.SPSiteCreationResponse', 'SPSiteCreationResponse', ([], {}), '()\n', (1110, 1112), False, 'from office365.sharepoint.portal.SPSiteCreationResponse import SPSiteCreationResponse\n'), ((1127, 1200), 'office365.runtime.serviceOperationQuery.ServiceOperationQuery', 'ServiceOperationQuery', (['self', '"""Status"""', 'None', "{'url': url}", 'None', 'response'], {}), "(self, 'Status', None, {'url': url}, None, response)\n", (1148, 1200), False, 'from office365.runtime.serviceOperationQuery import ServiceOperationQuery\n'), ((456, 485), 'office365.runtime.resource_path.ResourcePath', 'ResourcePath', (['"""SPSiteManager"""'], {}), "('SPSiteManager')\n", (468, 485), False, 'from office365.runtime.resource_path import ResourcePath\n')]
|
import random
import numpy as np
import time
class Signalgenerator():
def __init__(self):
self.Fs = 8000
self.f = 2
self.sample = 8000
self.x = np.arange(1, self.sample+1)
self.y = np.empty(self.sample)
self.level = 0
self.filename = ''
def set_filename(self, name):
self.filename = name
def configure_device(self, level):
self.level = level
def measure_signal(self):
for i in range(0, self.sample):
delta = random.randint(1, self.level * 10) / 10 - self.level
self.y[i] = self.level * 10 * np.cos(2* np.pi * self.f * i / self.Fs) + delta
def get_signal(self):
return self.y
def save_signal(self):
with open (self.filename, 'w') as f:
f.write('Time=' + str(time.asctime(time.localtime(time.time()))) + '\n')
f.write('Intensity=' + str(random.randint(1, self.level * 10)) + '\n')
f.write('Spectrum:\n')
for i in range(0, self.sample):
f.write(str(self.x[i]) + '\t' + str(self.y[i]) + '\n')
|
[
"random.randint",
"numpy.empty",
"time.time",
"numpy.arange",
"numpy.cos"
] |
[((181, 210), 'numpy.arange', 'np.arange', (['(1)', '(self.sample + 1)'], {}), '(1, self.sample + 1)\n', (190, 210), True, 'import numpy as np\n'), ((226, 247), 'numpy.empty', 'np.empty', (['self.sample'], {}), '(self.sample)\n', (234, 247), True, 'import numpy as np\n'), ((523, 557), 'random.randint', 'random.randint', (['(1)', '(self.level * 10)'], {}), '(1, self.level * 10)\n', (537, 557), False, 'import random\n'), ((618, 658), 'numpy.cos', 'np.cos', (['(2 * np.pi * self.f * i / self.Fs)'], {}), '(2 * np.pi * self.f * i / self.Fs)\n', (624, 658), True, 'import numpy as np\n'), ((922, 956), 'random.randint', 'random.randint', (['(1)', '(self.level * 10)'], {}), '(1, self.level * 10)\n', (936, 956), False, 'import random\n'), ((860, 871), 'time.time', 'time.time', ([], {}), '()\n', (869, 871), False, 'import time\n')]
|
from server.mod_auth.auth import load_user # , register, login
from server.tests.helpers import FlaskTestCase, fixtures
class TestAuth(FlaskTestCase):
@fixtures('single_user.json')
def test_load_existing_user(self):
"""Test loading a single valid user"""
with self.flaskapp.test_request_context():
user = load_user(1)
assert user is not None
assert user.username == 'ganemone'
@fixtures('base.json')
def test_load_nonexisting_user(self):
"""Test loading a user not in the database"""
with self.flaskapp.test_request_context():
user = load_user(50)
assert user is None
|
[
"server.mod_auth.auth.load_user",
"server.tests.helpers.fixtures"
] |
[((159, 187), 'server.tests.helpers.fixtures', 'fixtures', (['"""single_user.json"""'], {}), "('single_user.json')\n", (167, 187), False, 'from server.tests.helpers import FlaskTestCase, fixtures\n'), ((446, 467), 'server.tests.helpers.fixtures', 'fixtures', (['"""base.json"""'], {}), "('base.json')\n", (454, 467), False, 'from server.tests.helpers import FlaskTestCase, fixtures\n'), ((344, 356), 'server.mod_auth.auth.load_user', 'load_user', (['(1)'], {}), '(1)\n', (353, 356), False, 'from server.mod_auth.auth import load_user\n'), ((634, 647), 'server.mod_auth.auth.load_user', 'load_user', (['(50)'], {}), '(50)\n', (643, 647), False, 'from server.mod_auth.auth import load_user\n')]
|
# Core functions for Vireo model
# Author: <NAME>
# Date: 30/08/2019
# http://edwardlib.org/tutorials/probabilistic-pca
# https://github.com/allentran/pca-magic
import sys
import itertools
import numpy as np
from scipy.stats import entropy
from scipy.special import digamma
from .vireo_base import normalize, loglik_amplify, beta_entropy
def vireo_core(AD, DP, n_donor=None, GT_prior=None, learn_GT=True,
theta_prior=None, learn_theta=True, ASE_mode=False,
Psi=None, ID_prob_init=None, doublet_prior=None, check_doublet=True,
min_iter=20, max_iter=100, min_GP=0.00001, epsilon_conv=1e-2,
random_seed=None, verbose=False):
"""
Vireo core function to cluster the cells into donors.
"""
if random_seed is not None:
np.random.seed(random_seed)
if n_donor is None:
if len(GT_prior.shape) < 3 or GT_prior.shape[1] < 2:
print("Error: no n_donor and GT_prior has < 2 donors.")
sys.exit(1)
else:
n_donor = GT_prior.shape[1]
n_var = AD.shape[0] # n_variants
## initialize thete
if theta_prior is None:
#theta_prior = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]])
theta_prior = np.array([[0.1, 99.9], [50, 50], [99.9, 0.1]])
theta_shapes = theta_prior.copy()
if ASE_mode and len(theta_prior.shape) == 2:
theta_prior = np.repeat(np.expand_dims(theta_prior, 2), n_var, axis=2)
theta_shapes = np.repeat(np.expand_dims(theta_shapes, 2), n_var, axis=2)
n_gt = theta_shapes.shape[0] # number of genotype categories
## initialize Psi
if Psi is None:
Psi = np.ones(n_donor) / n_donor
else:
Psi = Psi[:n_donor] / np.sum(Psi[:n_donor])
if ID_prob_init is None:
ID_prob = normalize(np.random.rand(AD.shape[1], n_donor))
else:
ID_prob = normalize(ID_prob_init.copy())
## initialize GT
if GT_prior is None:
GT_prior = normalize(np.ones((n_var, n_donor, n_gt)))
GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob,
theta_shapes, GT_prior)
if learn_GT is False:
print("As GT_prior is not given, we change learn_GT to True.")
learn_GT = True
else:
GT_prob = GT_prior.copy()
GT_prior[GT_prior < min_GP] = min_GP
GT_prior[GT_prior > 1 - min_GP] = 1 - min_GP
GT_prior = normalize(GT_prior)
#TODO: check if there is a better way to deal with GT imcompleteness
if GT_prior.shape[1] < n_donor:
_add_n = n_donor - GT_prior.shape[1]
GT_prior = np.append(GT_prior,
normalize(np.ones((n_var, n_gt, _add_n)), axis=1))
GT_prob = GT_prior.copy()
if learn_GT is False:
print("As GT_prior is not complete, we change learn_GT to True.")
learn_GT = True
elif GT_prior.shape[1] > n_donor:
print("Warning: n_donor is smaller than samples in GT_prior, hence we "
"ignore n_donor.")
n_donor = GT_prior.shape[1]
# check if n_gt is matched to GT_prior
if GT_prior.shape[2] != n_gt:
print("Error: number of GT categories not matched: theta and GT_prior")
sys.exit(1)
## VB interations
LB = np.zeros(max_iter)
for it in range(max_iter):
ID_prob, GT_prob, theta_shapes, LB[it] = update_VB(AD, DP, GT_prob,
theta_shapes, theta_prior, GT_prior, Psi, doublet_prior,
learn_GT=learn_GT, learn_theta=learn_theta,
check_doublet=check_doublet)
if it > min_iter:
if LB[it] < LB[it - 1]:
if verbose:
print("Warning: Lower bound decreases!\n")
elif it == max_iter - 1:
if verbose:
print("Warning: VB did not converge!\n")
elif LB[it] - LB[it - 1] < epsilon_conv:
break
## one-off check doublet
if check_doublet:
ID_prob2, GT_prob, theta_shapes, LB_doublet = update_VB(AD, DP, GT_prob,
theta_shapes, theta_prior, GT_prior, Psi, doublet_prior,
learn_GT=True, learn_theta=learn_theta, check_doublet=True)
ID_prob = ID_prob2[:, :n_donor]
doublet_prob = ID_prob2[:, n_donor:]
else:
LB_doublet = LB[it]
n_donor_doublt = int(n_donor * (n_donor - 1) / 2)
doublet_prob = np.zeros((ID_prob.shape[0], n_donor_doublt))
RV = {}
RV['ID_prob'] = ID_prob
RV['GT_prob'] = GT_prob
RV['doublet_prob'] = doublet_prob
RV['theta_shapes'] = theta_shapes
RV['LB_list'] = LB[: it+1]
RV['LB_doublet'] = LB_doublet
return RV
def update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior,
Psi, doublet_prior=None, learn_GT=True, learn_theta=True,
check_doublet=False):
"""
Update the parameters of each component of the variantional
distribution.
The doublet probability can be created by doublet genotypes
"""
if check_doublet:
GT_both = add_doublet_GT(GT_prob)
theta_both = add_doublet_theta(theta_shapes)
n_doublet_pair = GT_both.shape[1] - GT_prob.shape[1]
if doublet_prior is None:
doublet_prior = min(0.5, AD.shape[1] / 100000)
Psi_both = np.append(Psi * (1 - doublet_prior),
(np.ones(n_doublet_pair) / n_doublet_pair *
doublet_prior))
else:
Psi_both = Psi.copy()
GT_both = GT_prob.copy()
theta_both = theta_shapes.copy()
ID_prob2, logLik_ID = get_ID_prob(AD, DP, GT_both, theta_both, Psi_both)
ID_prob = ID_prob2[:, :GT_prob.shape[1]]
if learn_GT:
GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob,
theta_shapes, GT_prior)
if learn_theta:
theta_shapes = get_theta_shapes(AD, DP, ID_prob,
GT_prob, theta_prior)
### check how to calculate lower bound for when detecting doublets
LB_val = VB_lower_bound(logLik_ID, GT_prob, ID_prob2, theta_shapes,
theta_prior, GT_prior, Psi_both)
return ID_prob2, GT_prob, theta_shapes, LB_val
def get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior):
"""
"""
S1_gt = AD * ID_prob
SS_gt = DP * ID_prob
S2_gt = SS_gt - S1_gt
theta_shapes = theta_prior.copy()
for ig in range(theta_shapes.shape[0]):
_axis = 1 if len(theta_shapes.shape) == 3 else None
theta_shapes[ig, 0] += np.sum(S1_gt * GT_prob[:, :, ig], axis=_axis)
theta_shapes[ig, 1] += np.sum(S2_gt * GT_prob[:, :, ig], axis=_axis)
return theta_shapes
def get_ID_prob(AD, DP, GT_prob, theta_shapes, Psi=None):
"""
"""
if Psi is None:
Psi = np.ones(GT_prob.shape[1]) / GT_prob.shape[1]
BD = DP - AD
logLik_ID = np.zeros((AD.shape[1], GT_prob.shape[1]))
for ig in range(GT_prob.shape[2]):
_digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1)
_digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1)
_digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1)
S1 = AD.transpose() * (GT_prob[:, :, ig] * _digmma1)
S2 = BD.transpose() * (GT_prob[:, :, ig] * _digmma2)
SS = DP.transpose() * (GT_prob[:, :, ig] * _digmmas)
logLik_ID += (S1 + S2 - SS)
Psi_norm = np.log(Psi / np.sum(Psi))
ID_prob = np.exp(loglik_amplify(logLik_ID + Psi_norm, axis=1))
ID_prob = normalize(ID_prob, axis=1)
return ID_prob, logLik_ID
def get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior=None):
"""
"""
if GT_prior is None:
GT_prior = np.ones((AD.shape[0], ID_prob.shape[1],
theta_shapes.shape[0]))
GT_prior = GT_prior / theta_shapes.shape[0]
S1_gt = AD * ID_prob
SS_gt = DP * ID_prob
S2_gt = SS_gt - S1_gt
logLik_GT = np.zeros(GT_prior.shape)
for ig in range(logLik_GT.shape[2]):
_digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1)
_digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1)
_digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1)
logLik_GT[:, :, ig] = (S1_gt * _digmma1 +
S2_gt * _digmma2 -
SS_gt * _digmmas)
# += np.log(GT_prior)
GT_prob = loglik_amplify(logLik_GT + np.log(GT_prior), axis=2)
GT_prob = normalize(np.exp(GT_prob), axis=2)
return GT_prob, logLik_GT
def VB_lower_bound(logLik_ID, GT_prob, ID_prob, theta_shapes,
theta_prior, GT_prior=None, Psi=None):
"""
"""
if GT_prior is None:
GT_prior = normalize(np.ones(GT_prob.shape), axis=2)
if Psi is None:
ID_prior = np.ones(ID_prob.shape) / ID_prob.shape[1]
else:
ID_prior = np.ones(ID_prob.shape) * np.log(Psi / np.sum(Psi))
LB_p = np.sum(logLik_ID * ID_prob)
KL_ID = -np.sum(entropy(ID_prob, ID_prior, axis=1))
KL_GT = -np.sum(entropy(GT_prob, GT_prior, axis=2))
KL_theta = -beta_entropy(theta_shapes, theta_prior)
# print(LB_p, KL_ID, KL_GT, KL_theta)
return LB_p - KL_ID - KL_GT - KL_theta
def add_doublet_theta(theta_shapes):
"""
calculate theta for doublet genotype: GT=0&1, GT=0&2, and GT=1&2 by
averaging thire beta paramters
Example
-------
theta_shapes = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]])
add_doublet_theta(theta_shapes)
"""
# TODO: support reduced GT for relatives
combn_iter = itertools.combinations(range(theta_shapes.shape[0]), 2)
db_idx = np.array([x for x in combn_iter])
_theta_p1 = theta_shapes[db_idx[:, 0]]
_theta_p2 = theta_shapes[db_idx[:, 1]]
_theta_mean = (normalize(_theta_p1, axis=1) +
normalize(_theta_p2, axis=1)) / 2.0
_theta_sum = np.sqrt(np.sum(_theta_p1, axis=1, keepdims=True) *
np.sum(_theta_p2, axis=1, keepdims=True))
theta_shapes_db = _theta_mean * _theta_sum
return np.append(theta_shapes, theta_shapes_db, axis=0)
def add_doublet_GT(GT_prob):
"""
Add doublet genotype by summarizing their probability:
New GT has five categories: 0, 1, 2, 1.5, 2.5
TODO: New GT has six categories: 0, 1, 2, 0_1, 0_2, 1_2
"""
combn_iter = itertools.combinations(range(GT_prob.shape[2]), 2)
gt_idx = np.array([x for x in combn_iter]) # GT combination
g_idx1 = gt_idx[:, 0]
g_idx2 = gt_idx[:, 1]
combn_iter = itertools.combinations(range(GT_prob.shape[1]), 2)
sp_idx = np.array([x for x in combn_iter]) # sample combination
s_idx1 = sp_idx[:, 0]
s_idx2 = sp_idx[:, 1]
## GT_prob has three genotypes: 0, 1, 2;
n_gt = GT_prob.shape[2]
GT_prob2 = np.zeros((GT_prob.shape[0], sp_idx.shape[0],
n_gt + gt_idx.shape[0]))
GT_prob2[:, :, :n_gt] = (GT_prob[:, s_idx1, :] *
GT_prob[:, s_idx2, :])
GT_prob2[:, :, n_gt:] = (GT_prob[:, s_idx1, :][:, :, g_idx1] *
GT_prob[:, s_idx2, :][:, :, g_idx2] +
GT_prob[:, s_idx1, :][:, :, g_idx2] *
GT_prob[:, s_idx2, :][:, :, g_idx1])
GT_prob2 = normalize(GT_prob2, axis=2)
GT_prob1 = np.append(GT_prob,
np.zeros((GT_prob.shape[0], GT_prob.shape[1], gt_idx.shape[0])), axis=2)
return np.append(GT_prob1, GT_prob2, axis=1)
|
[
"numpy.sum",
"numpy.random.seed",
"numpy.log",
"scipy.stats.entropy",
"numpy.zeros",
"numpy.ones",
"numpy.expand_dims",
"numpy.append",
"scipy.special.digamma",
"numpy.array",
"numpy.exp",
"numpy.random.rand",
"sys.exit"
] |
[((3246, 3264), 'numpy.zeros', 'np.zeros', (['max_iter'], {}), '(max_iter)\n', (3254, 3264), True, 'import numpy as np\n'), ((6889, 6930), 'numpy.zeros', 'np.zeros', (['(AD.shape[1], GT_prob.shape[1])'], {}), '((AD.shape[1], GT_prob.shape[1]))\n', (6897, 6930), True, 'import numpy as np\n'), ((7960, 7984), 'numpy.zeros', 'np.zeros', (['GT_prior.shape'], {}), '(GT_prior.shape)\n', (7968, 7984), True, 'import numpy as np\n'), ((8964, 8991), 'numpy.sum', 'np.sum', (['(logLik_ID * ID_prob)'], {}), '(logLik_ID * ID_prob)\n', (8970, 8991), True, 'import numpy as np\n'), ((9672, 9705), 'numpy.array', 'np.array', (['[x for x in combn_iter]'], {}), '([x for x in combn_iter])\n', (9680, 9705), True, 'import numpy as np\n'), ((10102, 10150), 'numpy.append', 'np.append', (['theta_shapes', 'theta_shapes_db'], {'axis': '(0)'}), '(theta_shapes, theta_shapes_db, axis=0)\n', (10111, 10150), True, 'import numpy as np\n'), ((10448, 10481), 'numpy.array', 'np.array', (['[x for x in combn_iter]'], {}), '([x for x in combn_iter])\n', (10456, 10481), True, 'import numpy as np\n'), ((10633, 10666), 'numpy.array', 'np.array', (['[x for x in combn_iter]'], {}), '([x for x in combn_iter])\n', (10641, 10666), True, 'import numpy as np\n'), ((10833, 10902), 'numpy.zeros', 'np.zeros', (['(GT_prob.shape[0], sp_idx.shape[0], n_gt + gt_idx.shape[0])'], {}), '((GT_prob.shape[0], sp_idx.shape[0], n_gt + gt_idx.shape[0]))\n', (10841, 10902), True, 'import numpy as np\n'), ((11479, 11516), 'numpy.append', 'np.append', (['GT_prob1', 'GT_prob2'], {'axis': '(1)'}), '(GT_prob1, GT_prob2, axis=1)\n', (11488, 11516), True, 'import numpy as np\n'), ((758, 785), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (772, 785), True, 'import numpy as np\n'), ((1211, 1257), 'numpy.array', 'np.array', (['[[0.1, 99.9], [50, 50], [99.9, 0.1]]'], {}), '([[0.1, 99.9], [50, 50], [99.9, 0.1]])\n', (1219, 1257), True, 'import numpy as np\n'), ((3202, 3213), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3210, 3213), False, 'import sys\n'), ((4376, 4420), 'numpy.zeros', 'np.zeros', (['(ID_prob.shape[0], n_donor_doublt)'], {}), '((ID_prob.shape[0], n_donor_doublt))\n', (4384, 4420), True, 'import numpy as np\n'), ((6554, 6599), 'numpy.sum', 'np.sum', (['(S1_gt * GT_prob[:, :, ig])'], {'axis': '_axis'}), '(S1_gt * GT_prob[:, :, ig], axis=_axis)\n', (6560, 6599), True, 'import numpy as np\n'), ((6631, 6676), 'numpy.sum', 'np.sum', (['(S2_gt * GT_prob[:, :, ig])'], {'axis': '_axis'}), '(S2_gt * GT_prob[:, :, ig], axis=_axis)\n', (6637, 6676), True, 'import numpy as np\n'), ((7708, 7771), 'numpy.ones', 'np.ones', (['(AD.shape[0], ID_prob.shape[1], theta_shapes.shape[0])'], {}), '((AD.shape[0], ID_prob.shape[1], theta_shapes.shape[0]))\n', (7715, 7771), True, 'import numpy as np\n'), ((8512, 8527), 'numpy.exp', 'np.exp', (['GT_prob'], {}), '(GT_prob)\n', (8518, 8527), True, 'import numpy as np\n'), ((11395, 11458), 'numpy.zeros', 'np.zeros', (['(GT_prob.shape[0], GT_prob.shape[1], gt_idx.shape[0])'], {}), '((GT_prob.shape[0], GT_prob.shape[1], gt_idx.shape[0]))\n', (11403, 11458), True, 'import numpy as np\n'), ((952, 963), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (960, 963), False, 'import sys\n'), ((1377, 1407), 'numpy.expand_dims', 'np.expand_dims', (['theta_prior', '(2)'], {}), '(theta_prior, 2)\n', (1391, 1407), True, 'import numpy as np\n'), ((1457, 1488), 'numpy.expand_dims', 'np.expand_dims', (['theta_shapes', '(2)'], {}), '(theta_shapes, 2)\n', (1471, 1488), True, 'import numpy as np\n'), ((1627, 1643), 'numpy.ones', 'np.ones', (['n_donor'], {}), '(n_donor)\n', (1634, 1643), True, 'import numpy as np\n'), ((1694, 1715), 'numpy.sum', 'np.sum', (['Psi[:n_donor]'], {}), '(Psi[:n_donor])\n', (1700, 1715), True, 'import numpy as np\n'), ((1773, 1809), 'numpy.random.rand', 'np.random.rand', (['AD.shape[1]', 'n_donor'], {}), '(AD.shape[1], n_donor)\n', (1787, 1809), True, 'import numpy as np\n'), ((1950, 1981), 'numpy.ones', 'np.ones', (['(n_var, n_donor, n_gt)'], {}), '((n_var, n_donor, n_gt))\n', (1957, 1981), True, 'import numpy as np\n'), ((6810, 6835), 'numpy.ones', 'np.ones', (['GT_prob.shape[1]'], {}), '(GT_prob.shape[1])\n', (6817, 6835), True, 'import numpy as np\n'), ((7423, 7434), 'numpy.sum', 'np.sum', (['Psi'], {}), '(Psi)\n', (7429, 7434), True, 'import numpy as np\n'), ((8462, 8478), 'numpy.log', 'np.log', (['GT_prior'], {}), '(GT_prior)\n', (8468, 8478), True, 'import numpy as np\n'), ((8751, 8773), 'numpy.ones', 'np.ones', (['GT_prob.shape'], {}), '(GT_prob.shape)\n', (8758, 8773), True, 'import numpy as np\n'), ((8822, 8844), 'numpy.ones', 'np.ones', (['ID_prob.shape'], {}), '(ID_prob.shape)\n', (8829, 8844), True, 'import numpy as np\n'), ((8893, 8915), 'numpy.ones', 'np.ones', (['ID_prob.shape'], {}), '(ID_prob.shape)\n', (8900, 8915), True, 'import numpy as np\n'), ((9012, 9046), 'scipy.stats.entropy', 'entropy', (['ID_prob', 'ID_prior'], {'axis': '(1)'}), '(ID_prob, ID_prior, axis=1)\n', (9019, 9046), False, 'from scipy.stats import entropy\n'), ((9068, 9102), 'scipy.stats.entropy', 'entropy', (['GT_prob', 'GT_prior'], {'axis': '(2)'}), '(GT_prob, GT_prior, axis=2)\n', (9075, 9102), False, 'from scipy.stats import entropy\n'), ((9926, 9966), 'numpy.sum', 'np.sum', (['_theta_p1'], {'axis': '(1)', 'keepdims': '(True)'}), '(_theta_p1, axis=1, keepdims=True)\n', (9932, 9966), True, 'import numpy as np\n'), ((9996, 10036), 'numpy.sum', 'np.sum', (['_theta_p2'], {'axis': '(1)', 'keepdims': '(True)'}), '(_theta_p2, axis=1, keepdims=True)\n', (10002, 10036), True, 'import numpy as np\n'), ((2638, 2668), 'numpy.ones', 'np.ones', (['(n_var, n_gt, _add_n)'], {}), '((n_var, n_gt, _add_n))\n', (2645, 2668), True, 'import numpy as np\n'), ((6989, 7017), 'scipy.special.digamma', 'digamma', (['theta_shapes[ig, 0]'], {}), '(theta_shapes[ig, 0])\n', (6996, 7017), False, 'from scipy.special import digamma\n'), ((7052, 7080), 'scipy.special.digamma', 'digamma', (['theta_shapes[ig, 1]'], {}), '(theta_shapes[ig, 1])\n', (7059, 7080), False, 'from scipy.special import digamma\n'), ((8053, 8081), 'scipy.special.digamma', 'digamma', (['theta_shapes[ig, 0]'], {}), '(theta_shapes[ig, 0])\n', (8060, 8081), False, 'from scipy.special import digamma\n'), ((8116, 8144), 'scipy.special.digamma', 'digamma', (['theta_shapes[ig, 1]'], {}), '(theta_shapes[ig, 1])\n', (8123, 8144), False, 'from scipy.special import digamma\n'), ((5345, 5368), 'numpy.ones', 'np.ones', (['n_doublet_pair'], {}), '(n_doublet_pair)\n', (5352, 5368), True, 'import numpy as np\n'), ((8931, 8942), 'numpy.sum', 'np.sum', (['Psi'], {}), '(Psi)\n', (8937, 8942), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
## preliminary tests
#inputs: A, P, Q, R
# A is the discrete representation of epsilon
#number of spatial harmonics (or orders)
P = 6;
Q = 6;
R = 6;
Nx = 20; Ny = 20; Nz = 1; #this is fundamentally 3D...not sure how to make general for 2D
N = np.array([Nx, Ny, Nz]);
## generalize two 2D geometries;
A = np.ones(N+1)
A[2:18, 2:18, 0] = 12;
plt.imshow(A[:,:,0]);
plt.show()
# deal with different dimensionalities
if(len(N) == 1):
Q = 1; R = 1;
elif(len(N) == 2):
R = 1;
NH = P*Q*R;
p = list(range(-int(np.floor(P/2)), int(np.floor(P/2))+1));
print(p)
q = list(range(-int(np.floor(Q/2)), int(np.floor(Q/2))+1));
r = list(range(-int(np.floor(R/2)), int(np.floor(R/2))+1));
Af = (1/np.prod(N))*np.fft.fftshift(np.fft.fftn(A));
#central indices;
p0 = int(np.floor(Nx/2));
q0 = int(np.floor(Ny/2));
r0 = int(np.floor(Nz/2));
C = np.zeros((NH, NH))
C = C.astype(complex);
for rrow in range(R):
for qrow in range(Q):
for prow in range(P):
#first term locates z plane, 2nd locates y column, prow locates x
row = (rrow)*Q*P+(qrow)*P + prow;
for rcol in range(R):
for qcol in range(Q):
for pcol in range(P):
col = (rcol)*Q*P + (qcol)*P + pcol;
pfft = p[prow] - p[pcol];
qfft = q[qrow] - q[qcol];
rfft = r[rrow] - r[rrow]
C[row, col] = Af[p0+pfft, q0+qfft, r0+rfft];
plt.imshow(np.abs(Af[:, :, 0]));
plt.show()
plt.imshow(np.abs(C));
plt.show()
plt.plot(np.diag(abs(C)))
plt.show()
|
[
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.imshow",
"numpy.floor",
"numpy.fft.fftn",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"numpy.prod"
] |
[((296, 318), 'numpy.array', 'np.array', (['[Nx, Ny, Nz]'], {}), '([Nx, Ny, Nz])\n', (304, 318), True, 'import numpy as np\n'), ((359, 373), 'numpy.ones', 'np.ones', (['(N + 1)'], {}), '(N + 1)\n', (366, 373), True, 'import numpy as np\n'), ((395, 417), 'matplotlib.pyplot.imshow', 'plt.imshow', (['A[:, :, 0]'], {}), '(A[:, :, 0])\n', (405, 417), True, 'import matplotlib.pyplot as plt\n'), ((417, 427), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (425, 427), True, 'import matplotlib.pyplot as plt\n'), ((890, 908), 'numpy.zeros', 'np.zeros', (['(NH, NH)'], {}), '((NH, NH))\n', (898, 908), True, 'import numpy as np\n'), ((1561, 1571), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1569, 1571), True, 'import matplotlib.pyplot as plt\n'), ((1595, 1605), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1603, 1605), True, 'import matplotlib.pyplot as plt\n'), ((1632, 1642), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1640, 1642), True, 'import matplotlib.pyplot as plt\n'), ((816, 832), 'numpy.floor', 'np.floor', (['(Nx / 2)'], {}), '(Nx / 2)\n', (824, 832), True, 'import numpy as np\n'), ((842, 858), 'numpy.floor', 'np.floor', (['(Ny / 2)'], {}), '(Ny / 2)\n', (850, 858), True, 'import numpy as np\n'), ((868, 884), 'numpy.floor', 'np.floor', (['(Nz / 2)'], {}), '(Nz / 2)\n', (876, 884), True, 'import numpy as np\n'), ((1539, 1558), 'numpy.abs', 'np.abs', (['Af[:, :, 0]'], {}), '(Af[:, :, 0])\n', (1545, 1558), True, 'import numpy as np\n'), ((1583, 1592), 'numpy.abs', 'np.abs', (['C'], {}), '(C)\n', (1589, 1592), True, 'import numpy as np\n'), ((743, 753), 'numpy.prod', 'np.prod', (['N'], {}), '(N)\n', (750, 753), True, 'import numpy as np\n'), ((771, 785), 'numpy.fft.fftn', 'np.fft.fftn', (['A'], {}), '(A)\n', (782, 785), True, 'import numpy as np\n'), ((565, 580), 'numpy.floor', 'np.floor', (['(P / 2)'], {}), '(P / 2)\n', (573, 580), True, 'import numpy as np\n'), ((585, 600), 'numpy.floor', 'np.floor', (['(P / 2)'], {}), '(P / 2)\n', (593, 600), True, 'import numpy as np\n'), ((634, 649), 'numpy.floor', 'np.floor', (['(Q / 2)'], {}), '(Q / 2)\n', (642, 649), True, 'import numpy as np\n'), ((654, 669), 'numpy.floor', 'np.floor', (['(Q / 2)'], {}), '(Q / 2)\n', (662, 669), True, 'import numpy as np\n'), ((694, 709), 'numpy.floor', 'np.floor', (['(R / 2)'], {}), '(R / 2)\n', (702, 709), True, 'import numpy as np\n'), ((714, 729), 'numpy.floor', 'np.floor', (['(R / 2)'], {}), '(R / 2)\n', (722, 729), True, 'import numpy as np\n')]
|
from fastapi import APIRouter
main_router = APIRouter()
from resources.db import session_dependency
session_dep = session_dependency()
@main_router.get("/", status_code=200)
async def root():
return {"msg": "Welcome to UMass Match!"}
from .user import user_router
from .match import match_router
# add individual routers to top-level router
main_router.include_router(user_router)
main_router.include_router(match_router)
|
[
"resources.db.session_dependency",
"fastapi.APIRouter"
] |
[((45, 56), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (54, 56), False, 'from fastapi import APIRouter\n'), ((117, 137), 'resources.db.session_dependency', 'session_dependency', ([], {}), '()\n', (135, 137), False, 'from resources.db import session_dependency\n')]
|
import torch.nn as nn
from torch import cat, transpose
import torch
import torch.nn.functional as F
from Layers import EncoderLayer, DecoderLayer
from Sublayers import Norm, OutputFeedForward
import copy
import attention_setting
import numpy as np
import crispr_attn
import math
import OT_crispr_attn
import sys
import importlib
import pdb
# Setting the correct config file
config_path = ".".join(["models", sys.argv[1]]) + "." if len(sys.argv) >= 2 else ""
config = importlib.import_module(config_path + "config")
attention_setting = importlib.import_module(config_path+"attention_setting")
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
def __init__(self, d_input, d_model, N, heads, dropout):
super().__init__()
self.N = N
self.layers = get_clones(EncoderLayer(d_input, d_model, heads, dropout), N)
self.norm = nn.LayerNorm(d_model)
def forward(self, src, mask=None):
x = src
for i in range(self.N):
x = self.layers[i](x, mask)
return self.norm(x) if attention_setting.attention_layer_norm else x
class Decoder(nn.Module):
def __init__(self, d_input, d_model, N, heads, dropout):
super().__init__()
self.N = N
self.layers = get_clones(DecoderLayer(d_input, d_model, heads, dropout), N)
self.norm = nn.LayerNorm(d_model)
def forward(self, trg, e_outputs, src_mask=None, trg_mask=None):
x = trg
for i in range(self.N):
x = self.layers[i](x, e_outputs, src_mask, trg_mask)
return self.norm(x) if attention_setting.attention_layer_norm else x
class Transformer(nn.Module):
def __init__(self, d_input, d_model, n_feature_dim, N, heads, dropout, extra_length):
super().__init__()
self.encoder = Encoder(n_feature_dim, d_model, N, heads, dropout)
self.decoder = Decoder(n_feature_dim, d_model, N, heads, dropout)
#self.linear = nn.Linear()
self.cnn = customized_CNN()
assert not attention_setting.add_seq_cnn or not attention_setting.add_parallel_cnn
if attention_setting.add_seq_cnn:
d_input = 64 * (((d_input + 2) // 2 + 2) // 2)
if attention_setting.analysis == 'deepCrispr':
d_model += 4
extra_length = 0
if attention_setting.add_parallel_cnn:
d_input_1 = d_input * d_model
d_input_2 = ((64 * (((d_input + 2) // 2 + 2) // 2)) * config.embedding_vec_dim)
d_input = d_input_1 + d_input_2
d_model = 1
self.out = OutputFeedForward(d_model, d_input, extra_length, d_layers=attention_setting.output_FF_layers, dropout=dropout)
def forward(self, src, trg, extra_input_for_FF=None, src_mask=None, trg_mask=None):
e_outputs = self.encoder(src, src_mask)
# print("DECODER")
d_output = self.decoder(trg, e_outputs, src_mask, trg_mask)
if attention_setting.add_seq_cnn:
if extra_input_for_FF is not None and attention_setting.analysis == 'deepCrispr':
bs = extra_input_for_FF.size(0)
extra_input_for_FF = extra_input_for_FF.view(bs, -1, 4)
d_output = cat((d_output, extra_input_for_FF), dim = 2)
d_output = torch.unsqueeze(d_output, 1)
d_output = self.cnn(d_output)
flat_d_output = d_output.view(-1, d_output.size(-2)*d_output.size(-1))
if attention_setting.add_parallel_cnn:
src = torch.unsqueeze(src, 1)
inter_output = self.cnn(src).view(src.size(0), -1)
flat_d_output = cat((inter_output, flat_d_output),dim=1)
if extra_input_for_FF is not None and attention_setting.analysis != 'deepCrispr':
flat_d_output = cat((flat_d_output, extra_input_for_FF), dim=1)
output = self.out(flat_d_output)
return output
class customized_CNN(nn.Module):
def __init__(self):
super().__init__()
self.cnn_1 = nn.Conv2d(1, 32, kernel_size=(3,1), padding=(1,0))
self.maxpool_1 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0))
self.cnn_2 = nn.Conv2d(32, 64, kernel_size=(3,1), padding=(1,0))
if config.seq_len == 22:
self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0))
else:
self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0))
self.dropout = nn.Dropout(p = attention_setting.cnn_dropout)
def forward(self, input):
x = self.maxpool_1(self.cnn_1(input))
x = F.relu(x)
x = self.maxpool_2(self.cnn_2(x))
x = F.relu(x)
x = x.contiguous().view(x.size(0), -1, x.size(-1) * x.size(-2))
return x
class OTembeddingTransformer(nn.Module):
def __init__(self, embedding_vec_dim, d_model, N, heads, dropout, feature_len_map, classifier=False):
super().__init__()
self.feature_len_map = feature_len_map
extra_length = 0 if self.feature_len_map[-1] is None else self.feature_len_map[-1][1] - self.feature_len_map[-1][0]
d_input = self.feature_len_map[0][1] - self.feature_len_map[0][0]
self.transformer = Transformer(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length)
self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim)
self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim)
self.embedding_pos = nn.Embedding(d_input, embedding_vec_dim)
self.trg_embedding_pos = nn.Embedding(d_input, embedding_vec_dim)
self.dropout = nn.Dropout(p=config.dropout)
self.classifier = classifier
def forward(self, input, src_mask=None, trg_mask=None):
src = input[:,self.feature_len_map[0][0]: self.feature_len_map[0][1]].long()
embedded_src = self.embedding(src)
bs = src.size(0)
pos_len = src.size(1)
pos = torch.from_numpy(np.array([[i for i in range(pos_len)] for _ in range(bs)]))
pos = pos.to(OT_crispr_attn.device2)
embedded_pos = self.embedding_pos(pos)
embedded_src = embedded_pos + embedded_src
if self.feature_len_map[1] is not None:
trg = input[:, self.feature_len_map[1][0]:self.feature_len_map[1][1]].long()
else:
trg = src
embedded_trg = self.trg_embedding(trg)
embedded_pos_trg = self.trg_embedding_pos(pos)
embedded_trg = embedded_pos_trg + embedded_trg
embedded_src = self.dropout(embedded_src)
embedded_trg = self.dropout(embedded_trg)
extra_input_for_FF = None
if self.feature_len_map[2] is not None:
extra_input_for_FF = input[:, self.feature_len_map[2][0]: self.feature_len_map[2][1]]
output = self.transformer(embedded_src, embedded_trg, extra_input_for_FF=extra_input_for_FF,
src_mask=src_mask, trg_mask=trg_mask)
if self.classifier:
# output = F.log_softmax(output, dim = -1)
#output = F.softmax(output, dim = -1)
pass
return output
class EmbeddingTransformer(Transformer):
def __init__(self, embedding_vec_dim , d_input, d_model, N, heads, dropout, extra_length):
super().__init__(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length)
self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim)
self.embedding_2 = nn.Embedding(config.embedding_voca_size, embedding_vec_dim)
self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim)
self.embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim)
self.trg_embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim)
self.dropout = nn.Dropout(p = config.dropout)
def forward(self, src, trg = None, extra_input_for_FF=None, src_mask=None, trg_mask=None):
if config.sep_len != 0:
src_1 = src[:,:config.sep_len]
src_2 = src[:, config.sep_len:]
embedded_src = self.embedding(src_1)
embedded_src_2 = self.embedding_2(src_2)
embedded_src = cat(tuple([embedded_src, embedded_src_2]), dim=1)
else:
embedded_src = self.embedding(src)
bs = src.size(0)
pos_length = config.seq_len - config.seq_start - config.word_len + 1
pos = torch.from_numpy(np.array([[i for i in range(pos_length)] for _ in range(bs)]))
pos = pos.to(crispr_attn.device2)
embedded_src_pos = self.embedding_pos(pos)
embedded_src_1 = embedded_src + embedded_src_pos
embedded_src_2 = self.dropout(embedded_src_1)
if trg is not None:
embedded_trg = self.trg_embedding(trg)
embedded_trg_pos = self.trg_embedding_pos(pos)
embedded_trg_1 = embedded_trg + embedded_trg_pos
embedded_trg_2 = self.dropout(embedded_trg_1)
else:
embedded_trg_2 = embedded_src_2
#embedded_src_2 = transpose(embedded_src_2, 1, 2)
output = super().forward(embedded_src_2, embedded_trg_2, extra_input_for_FF)
return output
def get_OT_model(feature_len_map, classifier = False):
assert attention_setting.d_model % attention_setting.attention_heads == 0
assert attention_setting.attention_dropout < 1
if not classifier:
model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model,
attention_setting.n_layers, attention_setting.attention_heads,
attention_setting.attention_dropout, feature_len_map)
else:
model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model,
attention_setting.n_layers, attention_setting.attention_heads,
attention_setting.attention_dropout, feature_len_map, classifier = True)
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
def get_model(inputs_lengths=None, d_input = 20):
assert attention_setting.d_model % attention_setting.attention_heads == 0
assert attention_setting.attention_dropout < 1
#model = Transformer(d_input, attention_setting.d_model, attention_setting.n_feature_dim, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout)
extra_feature_length = len(config.extra_categorical_features + config.extra_numerical_features)
model = EmbeddingTransformer(attention_setting.n_feature_dim, d_input, attention_setting.d_model,
attention_setting.n_layers, attention_setting.attention_heads,
attention_setting.attention_dropout, extra_feature_length)
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
# if attention_setting.device == 0:
# model = model.cuda()
return model
|
[
"torch.nn.Dropout",
"copy.deepcopy",
"importlib.import_module",
"Layers.DecoderLayer",
"torch.nn.Embedding",
"torch.nn.Conv2d",
"Layers.EncoderLayer",
"torch.unsqueeze",
"torch.cat",
"torch.nn.init.xavier_uniform_",
"torch.nn.LayerNorm",
"Sublayers.OutputFeedForward",
"torch.nn.functional.relu",
"torch.nn.MaxPool2d"
] |
[((467, 514), 'importlib.import_module', 'importlib.import_module', (["(config_path + 'config')"], {}), "(config_path + 'config')\n", (490, 514), False, 'import importlib\n'), ((535, 593), 'importlib.import_module', 'importlib.import_module', (["(config_path + 'attention_setting')"], {}), "(config_path + 'attention_setting')\n", (558, 593), False, 'import importlib\n'), ((928, 949), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {}), '(d_model)\n', (940, 949), True, 'import torch.nn as nn\n'), ((1393, 1414), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {}), '(d_model)\n', (1405, 1414), True, 'import torch.nn as nn\n'), ((2624, 2740), 'Sublayers.OutputFeedForward', 'OutputFeedForward', (['d_model', 'd_input', 'extra_length'], {'d_layers': 'attention_setting.output_FF_layers', 'dropout': 'dropout'}), '(d_model, d_input, extra_length, d_layers=\n attention_setting.output_FF_layers, dropout=dropout)\n', (2641, 2740), False, 'from Sublayers import Norm, OutputFeedForward\n'), ((4028, 4080), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(32)'], {'kernel_size': '(3, 1)', 'padding': '(1, 0)'}), '(1, 32, kernel_size=(3, 1), padding=(1, 0))\n', (4037, 4080), True, 'import torch.nn as nn\n'), ((4104, 4152), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2, 1)', 'padding': '(1, 0)'}), '(kernel_size=(2, 1), padding=(1, 0))\n', (4116, 4152), True, 'import torch.nn as nn\n'), ((4172, 4225), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)'], {'kernel_size': '(3, 1)', 'padding': '(1, 0)'}), '(32, 64, kernel_size=(3, 1), padding=(1, 0))\n', (4181, 4225), True, 'import torch.nn as nn\n'), ((4446, 4489), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'attention_setting.cnn_dropout'}), '(p=attention_setting.cnn_dropout)\n', (4456, 4489), True, 'import torch.nn as nn\n'), ((4582, 4591), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (4588, 4591), True, 'import torch.nn.functional as F\n'), ((4646, 4655), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (4652, 4655), True, 'import torch.nn.functional as F\n'), ((5300, 5359), 'torch.nn.Embedding', 'nn.Embedding', (['config.embedding_voca_size', 'embedding_vec_dim'], {}), '(config.embedding_voca_size, embedding_vec_dim)\n', (5312, 5359), True, 'import torch.nn as nn\n'), ((5389, 5448), 'torch.nn.Embedding', 'nn.Embedding', (['config.embedding_voca_size', 'embedding_vec_dim'], {}), '(config.embedding_voca_size, embedding_vec_dim)\n', (5401, 5448), True, 'import torch.nn as nn\n'), ((5478, 5518), 'torch.nn.Embedding', 'nn.Embedding', (['d_input', 'embedding_vec_dim'], {}), '(d_input, embedding_vec_dim)\n', (5490, 5518), True, 'import torch.nn as nn\n'), ((5552, 5592), 'torch.nn.Embedding', 'nn.Embedding', (['d_input', 'embedding_vec_dim'], {}), '(d_input, embedding_vec_dim)\n', (5564, 5592), True, 'import torch.nn as nn\n'), ((5616, 5644), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'config.dropout'}), '(p=config.dropout)\n', (5626, 5644), True, 'import torch.nn as nn\n'), ((7377, 7436), 'torch.nn.Embedding', 'nn.Embedding', (['config.embedding_voca_size', 'embedding_vec_dim'], {}), '(config.embedding_voca_size, embedding_vec_dim)\n', (7389, 7436), True, 'import torch.nn as nn\n'), ((7464, 7523), 'torch.nn.Embedding', 'nn.Embedding', (['config.embedding_voca_size', 'embedding_vec_dim'], {}), '(config.embedding_voca_size, embedding_vec_dim)\n', (7476, 7523), True, 'import torch.nn as nn\n'), ((7553, 7612), 'torch.nn.Embedding', 'nn.Embedding', (['config.embedding_voca_size', 'embedding_vec_dim'], {}), '(config.embedding_voca_size, embedding_vec_dim)\n', (7565, 7612), True, 'import torch.nn as nn\n'), ((7642, 7711), 'torch.nn.Embedding', 'nn.Embedding', (['(config.seq_len - config.word_len + 1)', 'embedding_vec_dim'], {}), '(config.seq_len - config.word_len + 1, embedding_vec_dim)\n', (7654, 7711), True, 'import torch.nn as nn\n'), ((7745, 7814), 'torch.nn.Embedding', 'nn.Embedding', (['(config.seq_len - config.word_len + 1)', 'embedding_vec_dim'], {}), '(config.seq_len - config.word_len + 1, embedding_vec_dim)\n', (7757, 7814), True, 'import torch.nn as nn\n'), ((7838, 7866), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'config.dropout'}), '(p=config.dropout)\n', (7848, 7866), True, 'import torch.nn as nn\n'), ((647, 668), 'copy.deepcopy', 'copy.deepcopy', (['module'], {}), '(module)\n', (660, 668), False, 'import copy\n'), ((857, 903), 'Layers.EncoderLayer', 'EncoderLayer', (['d_input', 'd_model', 'heads', 'dropout'], {}), '(d_input, d_model, heads, dropout)\n', (869, 903), False, 'from Layers import EncoderLayer, DecoderLayer\n'), ((1322, 1368), 'Layers.DecoderLayer', 'DecoderLayer', (['d_input', 'd_model', 'heads', 'dropout'], {}), '(d_input, d_model, heads, dropout)\n', (1334, 1368), False, 'from Layers import EncoderLayer, DecoderLayer\n'), ((3320, 3348), 'torch.unsqueeze', 'torch.unsqueeze', (['d_output', '(1)'], {}), '(d_output, 1)\n', (3335, 3348), False, 'import torch\n'), ((3535, 3558), 'torch.unsqueeze', 'torch.unsqueeze', (['src', '(1)'], {}), '(src, 1)\n', (3550, 3558), False, 'import torch\n'), ((3650, 3691), 'torch.cat', 'cat', (['(inter_output, flat_d_output)'], {'dim': '(1)'}), '((inter_output, flat_d_output), dim=1)\n', (3653, 3691), False, 'from torch import cat, transpose\n'), ((3809, 3856), 'torch.cat', 'cat', (['(flat_d_output, extra_input_for_FF)'], {'dim': '(1)'}), '((flat_d_output, extra_input_for_FF), dim=1)\n', (3812, 3856), False, 'from torch import cat, transpose\n'), ((4286, 4334), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2, 1)', 'padding': '(1, 0)'}), '(kernel_size=(2, 1), padding=(1, 0))\n', (4298, 4334), True, 'import torch.nn as nn\n'), ((4376, 4424), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2, 1)', 'padding': '(1, 0)'}), '(kernel_size=(2, 1), padding=(1, 0))\n', (4388, 4424), True, 'import torch.nn as nn\n'), ((10095, 10121), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['p'], {}), '(p)\n', (10118, 10121), True, 'import torch.nn as nn\n'), ((10975, 11001), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['p'], {}), '(p)\n', (10998, 11001), True, 'import torch.nn as nn\n'), ((3252, 3294), 'torch.cat', 'cat', (['(d_output, extra_input_for_FF)'], {'dim': '(2)'}), '((d_output, extra_input_for_FF), dim=2)\n', (3255, 3294), False, 'from torch import cat, transpose\n')]
|
import random
from testcanarybot import objects
from testcanarybot import exceptions
# Copyright 2021 kensoi
class Main(objects.libraryModule):
@objects.priority(commands = ['quit']) # @testcanarybot quit
async def second(self, tools: objects.tools, package: objects.package):
await tools.api.messages.send(
random_id = tools.gen_random(),
peer_id = package.peer_id,
message = 'выхожу из фреймворка...'
)
raise exceptions.Quit("test") # -> to finish your framework (closing all projects that was launched by tppm)
@objects.priority(commands = ['lib_reload']) # @testcanarybot lib_reload
async def second2(self, tools: objects.tools, package: objects.package):
await tools.api.messages.send(
random_id = tools.gen_random(),
peer_id = package.peer_id,
message = 'перезагружаю...'
)
raise exceptions.LibraryReload("Reload") # -> framework will reload your library
|
[
"testcanarybot.exceptions.Quit",
"testcanarybot.exceptions.LibraryReload",
"testcanarybot.objects.priority"
] |
[((151, 186), 'testcanarybot.objects.priority', 'objects.priority', ([], {'commands': "['quit']"}), "(commands=['quit'])\n", (167, 186), False, 'from testcanarybot import objects\n'), ((592, 633), 'testcanarybot.objects.priority', 'objects.priority', ([], {'commands': "['lib_reload']"}), "(commands=['lib_reload'])\n", (608, 633), False, 'from testcanarybot import objects\n'), ((482, 505), 'testcanarybot.exceptions.Quit', 'exceptions.Quit', (['"""test"""'], {}), "('test')\n", (497, 505), False, 'from testcanarybot import exceptions\n'), ((936, 970), 'testcanarybot.exceptions.LibraryReload', 'exceptions.LibraryReload', (['"""Reload"""'], {}), "('Reload')\n", (960, 970), False, 'from testcanarybot import exceptions\n')]
|
'''
<NAME> (<EMAIL>)
Department of Physics
University of Bath, UK
May 1st, 2020
Conductance model of an RVLM neuron for use with reservoir computing
using a modified Hodgkin-Huxley framework of ion channel gating.
Model parameters are chosen so as to replicate the behaviour of
the thalamocortical relay neuron presented in Huguenard J, McCormick DA,
Shepherd GM (1997) 'Electrophysiology of the Neuron'.
The neuron model consists of three ionic currents: a passive leak current,
a transient sodium current (NaT), and a potassium current (K). The sodium
current is controlled by an activation gating variable (m) and an
inactivation gating variable (h). The potassium channel is non-inactivating
and is controlld by a single activation gating variable (n).
The full model state x comprises four state variables - the membrane voltage
and the three gating varibales m, h, and n, and is thus described as:
x = [V,m,h,n]
The only state variable that it is possible to measure experimentally is the
membrane voltage. This is the state variable output by the python script.
'''
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
# Define constants
TEMP_C = 35
FARADAY = 96480
PI = 3.14159265359
# Model duration (ms)
T = 7400
dt = 0.025
# Generate array of time points, from zero to T
t = np.arange(0,T,dt)
##############################################################################
# Model Equations of Motion
##############################################################################
# Define functions for gating kinetics of ion channels
# Effect of temperature is accounted for by the Q10 coeff
def mm_inf(VV): return 0.5*(1 + sp.tanh((VV - amV1)/ amV2))
def mm_tau(VV): return (tm0 + epsm*(1 - sp.tanh((VV - amV1)/ amV3)*sp.tanh((VV - amV1)/ amV3))) / 3.0**((TEMP_C-23.5)/10)
def hh_inf(VV): return 0.5*(1 + sp.tanh((VV - ahV1)/ ahV2))
def hh_tau(VV): return (th0 + epsh*(1 - sp.tanh((VV - ahV1)/ ahV3)*sp.tanh((VV - ahV1)/ ahV3))) / 3.0**((TEMP_C-23.5)/10)
def nn_inf(VV): return 0.5*(1 + sp.tanh((VV - anV1)/ anV2))
def nn_tau(VV): return (tn0 + epsn*(1 - sp.tanh((VV - anV1)/ anV3)*sp.tanh((VV - anV1)/ anV3))) / 3.0**((TEMP_C-23.5)/10)
# Define functions for ionic currents (in uA/cm^2)
# Currents correspond to passive leak, delayed-rectifier potassium,
# and transient sodium currents
def I_Leak(VV): return gLeak * (VV - EL)
def I_K(VV,nn): return gK * nn**4 * (VV - EK)
def I_NaT(VV,mm,hh): return gNaT * mm**3 * hh * (VV - ENa)
# Define equations of motion for full neuron state x = [V,m,h,n]
# Use idx to read in correct current stimulation data point
# Function reads in system state and returns its derivative
def dXdt(X,t):
VV, mm, hh, nn, idx = X
soma_area = soma_len*soma_diam*PI
idx = int(t/dt)
dVVdt = (-(I_NaT(VV,mm,hh) + I_K(VV,nn) + I_Leak(VV)) + (i_inj(t) + stim[idx])/soma_area) / Cm
dmmdt = (mm_inf(VV) - mm)/mm_tau(VV)
dhhdt = (hh_inf(VV) - hh)/hh_tau(VV)
dnndt = (nn_inf(VV) - nn)/nn_tau(VV)
return dVVdt, dmmdt, dhhdt, dnndt, idx
##############################################################################
# Model Parameters
##############################################################################
# Soma dimensions (cm)
soma_len = 0.01
soma_diam = 0.029/PI
# Define model parameters
# conductances: gX; reversal potentials: EX;
# thresholds: aXV1; membrane capacitance: Cm;
# time constants: tx0, epsx
Cm = 1
gNaT = 69
ENa = 41
gK = 6.9
EK = -100
EL = -65
gLeak = 0.465
amV1 = -39.92
amV2 = 10
amV3 = 23.39
tm0 = 0.143
epsm = 1.099
ahV1 = -65.37
ahV2 = -17.65
ahV3 = 27.22
th0 = 0.701
epsh = 12.90
anV1 = -34.58
anV2 = 22.17
anV3 = 23.58
tn0 = 1.291
epsn = 4.314
##############################################################################
# Preparing current stimulation to be injected into the neuron
##############################################################################
# Function for injected a current step (uA/cm^2)
# Args: amplitude, init time, final time
def i_inj(t):
return amp*(t>t_i) - amp*(t>t_f)
# Function for loading current injection protocol (uA/cm^2)
# Args: file path, amplitude scale (default = 0.02), sample every 'n'th point
def load_stim(name, scale, n):
stim = []
with open(name, "r") as ins:
count = 0
for line in ins:
count+=1
if count % n == 0:
stim.append(scale*(float(line.rstrip('\n'))))
ins.close()
return stim
# Initialise stim or load external stimulation files
# If not loading in external stim, uncomment line below
#stim = np.zeros(int(2*T/dt))
stim = load_stim('stim_files/Pstandard_100khz_0.dat', 0.02, 20)
stim += load_stim('stim_files/Pstandard_100khz_1.dat', 0.02, 20)
stim += load_stim('stim_files/Pstandard_100khz_2.dat', 0.02, 20)
stim += load_stim('stim_files/Pstandard_100khz_3.dat', 0.02, 20)
stim += load_stim('stim_files/Pstandard_100khz_4.dat', 0.02, 20)
# Current step (uA/cm^2)
# Define amplitude, init time and end time
amp = 0 #0.003
t_i = 100
t_f = 300
##############################################################################
# Initializing the neuron model
##############################################################################
# Initialize state variable values for t=0: x(0) = [V(0),m(0),h(0),n(0)]
# Default vals correspond to neuron at steady-state resting potential
# Final value in the init array is idx (starts at 0)
init = [-65,0.00742,0.47258,0.06356,0]
##############################################################################
# Running model: forward-integrating the equations of motion
##############################################################################
# Integrate model equations
# Arguments: state derivative, initial neuron state x(0), time point array
X = odeint(dXdt, init, t)
# Define variables to simplify analysis
VV = X[:,0]
mm = X[:,1]
hh = X[:,2]
nn = X[:,3]
# Adding Gaussian error to voltage trace (mV)
sigma_obs = 0.1
obs_error = np.random.normal(0, sigma_obs, len(VV))
VV_obs = VV + obs_error
##############################################################################
# Plotting and saving model output
##############################################################################
# Define total current
stimulation = stim[0:len(VV)] + i_inj(t)
# Plotting membrane voltage and stimulation time series
plt.subplot(2,1,1)
plt.plot(t,VV_obs,'k',linewidth=0.8)
plt.ylabel("Membrane Potential (mV)")
plt.subplot(2,1,2)
plt.ylabel("Current (uA)")
plt.plot(t,stimulation,'b',linewidth=0.8)
plt.show()
# Save voltage data (without gaussian noise)
f = open('output/voltage_clean.csv', 'w')
for i in range(int(len(VV))):
f.write('%f \n' % VV[i])
f.close()
# Save voltage data (with gaussian noise)
f = open('output/voltage.csv', 'w')
for i in range(int(len(VV))):
f.write('%f \n' % VV_obs[i])
f.close()
# Save current stimulation data
f = open('output/stimulation.csv', 'w')
for i in range(int(len(VV))):
f.write('%f\n' % stimulation[i])
f.close()
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.integrate.odeint",
"scipy.tanh",
"numpy.arange",
"matplotlib.pyplot.ylabel"
] |
[((1399, 1418), 'numpy.arange', 'np.arange', (['(0)', 'T', 'dt'], {}), '(0, T, dt)\n', (1408, 1418), True, 'import numpy as np\n'), ((6104, 6125), 'scipy.integrate.odeint', 'odeint', (['dXdt', 'init', 't'], {}), '(dXdt, init, t)\n', (6110, 6125), False, 'from scipy.integrate import odeint\n'), ((6691, 6711), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (6702, 6711), True, 'import matplotlib.pyplot as plt\n'), ((6711, 6750), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'VV_obs', '"""k"""'], {'linewidth': '(0.8)'}), "(t, VV_obs, 'k', linewidth=0.8)\n", (6719, 6750), True, 'import matplotlib.pyplot as plt\n'), ((6749, 6786), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Membrane Potential (mV)"""'], {}), "('Membrane Potential (mV)')\n", (6759, 6786), True, 'import matplotlib.pyplot as plt\n'), ((6788, 6808), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (6799, 6808), True, 'import matplotlib.pyplot as plt\n'), ((6808, 6834), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Current (uA)"""'], {}), "('Current (uA)')\n", (6818, 6834), True, 'import matplotlib.pyplot as plt\n'), ((6836, 6880), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'stimulation', '"""b"""'], {'linewidth': '(0.8)'}), "(t, stimulation, 'b', linewidth=0.8)\n", (6844, 6880), True, 'import matplotlib.pyplot as plt\n'), ((6881, 6891), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6889, 6891), True, 'import matplotlib.pyplot as plt\n'), ((1758, 1785), 'scipy.tanh', 'sp.tanh', (['((VV - amV1) / amV2)'], {}), '((VV - amV1) / amV2)\n', (1765, 1785), True, 'import scipy as sp\n'), ((1942, 1969), 'scipy.tanh', 'sp.tanh', (['((VV - ahV1) / ahV2)'], {}), '((VV - ahV1) / ahV2)\n', (1949, 1969), True, 'import scipy as sp\n'), ((2126, 2153), 'scipy.tanh', 'sp.tanh', (['((VV - anV1) / anV2)'], {}), '((VV - anV1) / anV2)\n', (2133, 2153), True, 'import scipy as sp\n'), ((1827, 1854), 'scipy.tanh', 'sp.tanh', (['((VV - amV1) / amV3)'], {}), '((VV - amV1) / amV3)\n', (1834, 1854), True, 'import scipy as sp\n'), ((1854, 1881), 'scipy.tanh', 'sp.tanh', (['((VV - amV1) / amV3)'], {}), '((VV - amV1) / amV3)\n', (1861, 1881), True, 'import scipy as sp\n'), ((2011, 2038), 'scipy.tanh', 'sp.tanh', (['((VV - ahV1) / ahV3)'], {}), '((VV - ahV1) / ahV3)\n', (2018, 2038), True, 'import scipy as sp\n'), ((2038, 2065), 'scipy.tanh', 'sp.tanh', (['((VV - ahV1) / ahV3)'], {}), '((VV - ahV1) / ahV3)\n', (2045, 2065), True, 'import scipy as sp\n'), ((2195, 2222), 'scipy.tanh', 'sp.tanh', (['((VV - anV1) / anV3)'], {}), '((VV - anV1) / anV3)\n', (2202, 2222), True, 'import scipy as sp\n'), ((2222, 2249), 'scipy.tanh', 'sp.tanh', (['((VV - anV1) / anV3)'], {}), '((VV - anV1) / anV3)\n', (2229, 2249), True, 'import scipy as sp\n')]
|
#!/usr/bin/env python
import sys
from pymccelib import *
import logging
logging.basicConfig(level=logging.DEBUG, format='%(levelname)-s: %(message)s')
if __name__ == "__main__":
env.init()
prot = Protein()
prot.load_nativepdb(env.prm["INPDB"])
# identify N and C terminal
if env.prm["TERMINALS"].upper() == "T":
prot.identify_nc()
# remove exposed water
# Disulfide bridge
lines = prot.pdblines()
open(env.fn_step1_out,"w").writelines(lines)
|
[
"logging.basicConfig"
] |
[((74, 152), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(levelname)-s: %(message)s"""'}), "(level=logging.DEBUG, format='%(levelname)-s: %(message)s')\n", (93, 152), False, 'import logging\n')]
|
#!/usr/bin/env python
import lasagne
from lasagne.layers.conv import Conv2DLayer as Conv2DLayer
from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer
from lasagne.nonlinearities import elu, sigmoid, rectify
from lasagne.layers import batch_norm
from lasagne_wrapper.network import SegmentationNetwork
from lasagne_wrapper.training_strategy import get_binary_segmentation_TrainingStrategy,get_categorical_segmentation_TrainingStrategy
from lasagne_wrapper.batch_iterators import get_batch_iterator
from lasagne_wrapper.learn_rate_shedules import get_stepwise
from lasagne_wrapper.parameter_updates import get_update_momentum
Network = SegmentationNetwork
INPUT_SHAPE = [1, 256, 256]
nonlin = elu
def conv_bn(in_layer, num_filters, filter_size, nonlinearity=rectify, pad='same', name='conv'):
""" convolution block with with batch normalization """
in_layer = Conv2DLayer(in_layer, num_filters=num_filters, filter_size=filter_size,
nonlinearity=nonlinearity, pad=pad, name=name)
in_layer = batch_norm(in_layer)
return in_layer
def build_model():
""" Compile net architecture """
l_in = lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]), name='Input')
net1 = batch_norm(l_in)
# --- preprocessing ---
net1 = conv_bn(net1, num_filters=10, filter_size=1, nonlinearity=nonlin, pad='same')
net1 = conv_bn(net1, num_filters=1, filter_size=1, nonlinearity=nonlin, pad='same', name='color_deconv_preproc')
# number of filters in first layer
# decreased by factor 2 in each block
nf0 = 16
# --- encoder ---
net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same')
p1 = net1
net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool1')
net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
p2 = net1
net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool2')
net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
p3 = net1
net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool3')
net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
# --- decoder ---
net1 = TransposedConv2DLayer(net1, num_filters=4 * nf0, filter_size=2, stride=2, name='upconv')
net1 = ConcatLayer((p3, net1), name='concat')
net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = TransposedConv2DLayer(net1, num_filters=2 * nf0, filter_size=2, stride=2, name='upconv')
net1 = ConcatLayer((p2, net1), name='concat')
net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = TransposedConv2DLayer(net1, num_filters=nf0, filter_size=2, stride=2, name='upconv')
net1 = ConcatLayer((p1, net1), name='concat')
net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = Conv2DLayer(net1, num_filters=1, filter_size=1, nonlinearity=sigmoid, pad='same', name='segmentation')
return net1
# prepare training strategy
train_strategy = get_binary_segmentation_TrainingStrategy(batch_size=2, max_epochs=1000, samples_per_epoch=250, patience=300,
ini_learning_rate=0.2, L2=None, use_weights=False,
adapt_learn_rate=get_stepwise(k=1000, factor=0.5),
update_function=get_update_momentum(0.9),
valid_batch_iter=get_batch_iterator(),
train_batch_iter=get_batch_iterator())
|
[
"lasagne_wrapper.learn_rate_shedules.get_stepwise",
"lasagne.layers.ConcatLayer",
"lasagne.layers.InputLayer",
"lasagne.layers.TransposedConv2DLayer",
"lasagne.layers.MaxPool2DLayer",
"lasagne.layers.conv.Conv2DLayer",
"lasagne_wrapper.batch_iterators.get_batch_iterator",
"lasagne_wrapper.parameter_updates.get_update_momentum",
"lasagne.layers.batch_norm"
] |
[((898, 1020), 'lasagne.layers.conv.Conv2DLayer', 'Conv2DLayer', (['in_layer'], {'num_filters': 'num_filters', 'filter_size': 'filter_size', 'nonlinearity': 'nonlinearity', 'pad': 'pad', 'name': 'name'}), '(in_layer, num_filters=num_filters, filter_size=filter_size,\n nonlinearity=nonlinearity, pad=pad, name=name)\n', (909, 1020), True, 'from lasagne.layers.conv import Conv2DLayer as Conv2DLayer\n'), ((1059, 1079), 'lasagne.layers.batch_norm', 'batch_norm', (['in_layer'], {}), '(in_layer)\n', (1069, 1079), False, 'from lasagne.layers import batch_norm\n'), ((1170, 1275), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', ([], {'shape': '(None, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2])', 'name': '"""Input"""'}), "(shape=(None, INPUT_SHAPE[0], INPUT_SHAPE[1],\n INPUT_SHAPE[2]), name='Input')\n", (1195, 1275), False, 'import lasagne\n'), ((1283, 1299), 'lasagne.layers.batch_norm', 'batch_norm', (['l_in'], {}), '(l_in)\n', (1293, 1299), False, 'from lasagne.layers import batch_norm\n'), ((1858, 1915), 'lasagne.layers.MaxPool2DLayer', 'MaxPool2DLayer', (['net1'], {'pool_size': '(2)', 'stride': '(2)', 'name': '"""pool1"""'}), "(net1, pool_size=2, stride=2, name='pool1')\n", (1872, 1915), False, 'from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer\n'), ((2130, 2187), 'lasagne.layers.MaxPool2DLayer', 'MaxPool2DLayer', (['net1'], {'pool_size': '(2)', 'stride': '(2)', 'name': '"""pool2"""'}), "(net1, pool_size=2, stride=2, name='pool2')\n", (2144, 2187), False, 'from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer\n'), ((2402, 2459), 'lasagne.layers.MaxPool2DLayer', 'MaxPool2DLayer', (['net1'], {'pool_size': '(2)', 'stride': '(2)', 'name': '"""pool3"""'}), "(net1, pool_size=2, stride=2, name='pool3')\n", (2416, 2459), False, 'from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer\n'), ((2683, 2775), 'lasagne.layers.TransposedConv2DLayer', 'TransposedConv2DLayer', (['net1'], {'num_filters': '(4 * nf0)', 'filter_size': '(2)', 'stride': '(2)', 'name': '"""upconv"""'}), "(net1, num_filters=4 * nf0, filter_size=2, stride=2,\n name='upconv')\n", (2704, 2775), False, 'from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer\n'), ((2783, 2821), 'lasagne.layers.ConcatLayer', 'ConcatLayer', (['(p3, net1)'], {'name': '"""concat"""'}), "((p3, net1), name='concat')\n", (2794, 2821), False, 'from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer\n'), ((3022, 3114), 'lasagne.layers.TransposedConv2DLayer', 'TransposedConv2DLayer', (['net1'], {'num_filters': '(2 * nf0)', 'filter_size': '(2)', 'stride': '(2)', 'name': '"""upconv"""'}), "(net1, num_filters=2 * nf0, filter_size=2, stride=2,\n name='upconv')\n", (3043, 3114), False, 'from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer\n'), ((3122, 3160), 'lasagne.layers.ConcatLayer', 'ConcatLayer', (['(p2, net1)'], {'name': '"""concat"""'}), "((p2, net1), name='concat')\n", (3133, 3160), False, 'from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer\n'), ((3361, 3450), 'lasagne.layers.TransposedConv2DLayer', 'TransposedConv2DLayer', (['net1'], {'num_filters': 'nf0', 'filter_size': '(2)', 'stride': '(2)', 'name': '"""upconv"""'}), "(net1, num_filters=nf0, filter_size=2, stride=2, name=\n 'upconv')\n", (3382, 3450), False, 'from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer\n'), ((3457, 3495), 'lasagne.layers.ConcatLayer', 'ConcatLayer', (['(p1, net1)'], {'name': '"""concat"""'}), "((p1, net1), name='concat')\n", (3468, 3495), False, 'from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer\n'), ((3688, 3795), 'lasagne.layers.conv.Conv2DLayer', 'Conv2DLayer', (['net1'], {'num_filters': '(1)', 'filter_size': '(1)', 'nonlinearity': 'sigmoid', 'pad': '"""same"""', 'name': '"""segmentation"""'}), "(net1, num_filters=1, filter_size=1, nonlinearity=sigmoid, pad=\n 'same', name='segmentation')\n", (3699, 3795), True, 'from lasagne.layers.conv import Conv2DLayer as Conv2DLayer\n'), ((4148, 4180), 'lasagne_wrapper.learn_rate_shedules.get_stepwise', 'get_stepwise', ([], {'k': '(1000)', 'factor': '(0.5)'}), '(k=1000, factor=0.5)\n', (4160, 4180), False, 'from lasagne_wrapper.learn_rate_shedules import get_stepwise\n'), ((4256, 4280), 'lasagne_wrapper.parameter_updates.get_update_momentum', 'get_update_momentum', (['(0.9)'], {}), '(0.9)\n', (4275, 4280), False, 'from lasagne_wrapper.parameter_updates import get_update_momentum\n'), ((4357, 4377), 'lasagne_wrapper.batch_iterators.get_batch_iterator', 'get_batch_iterator', ([], {}), '()\n', (4375, 4377), False, 'from lasagne_wrapper.batch_iterators import get_batch_iterator\n'), ((4454, 4474), 'lasagne_wrapper.batch_iterators.get_batch_iterator', 'get_batch_iterator', ([], {}), '()\n', (4472, 4474), False, 'from lasagne_wrapper.batch_iterators import get_batch_iterator\n')]
|
#!/usr/bin/python
import sys
import os
import numpy as np
import pandas as pd
import argparse
import tensorflow as tf
from importlib.machinery import SourceFileLoader
import math
import psutil
import time
from scipy.sparse import csr_matrix
import gc
import matplotlib
matplotlib.use('Agg')
import scimpute
def learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, stage, skip=1):
'''Save mse curves to csv files
Parameters:
-----------
skip:
epoch_log:
mse_batch_vec:
mse_valid_vec:
stage: step1 or step2
'''
print('> plotting learning curves')
scimpute.learning_curve(epoch_log, mse_batch_vec, mse_valid_vec,
title="Learning Curve MSE.{}".format(stage),
ylabel='MSE (X vs Y, nz)',
dir=stage,
skip=skip
)
_ = np.asarray(list(zip(epoch_log, mse_batch_vec, mse_valid_vec)))
_ = pd.DataFrame(data=_,
index=epoch_log,
columns=['Epoch', 'MSE_batch', 'MSE_valid']
).set_index('Epoch')
_.to_csv("./{}/mse.csv".format(stage))
#def learning_curve_mse_nz(skip=1):
def learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, stage, skip=1):
'''Save mse curves to csv files
Parameters:
-----------
skip:
epoch_log:
mse_nz_batch_vec:
mse_nz_valid_vec:
stage:
'''
print('> plotting learning curves')
scimpute.learning_curve(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec,
title="Learning Curve MSE_NZ.{}".format(stage),
ylabel='MSE_NZ (X vs Y, nz)',
dir=stage,
skip=skip
)
_ = np.asarray(list(zip(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec)))
_ = pd.DataFrame(data=_,
index=epoch_log,
columns=['Epoch', 'MSE_NZ_batch', 'MSE_NZ_valid']
).set_index('Epoch')
_.to_csv("./{}/mse_nz.csv".format(stage))
def fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_data, gene_ids, cell_ids):
'''Calculate /and save/ the snapshot results of the current model on the whole dataset
Parameters:
-----------
'''
Y_input_arr = sess.run(h, feed_dict={X: input_data, pIn_holder: 1, pHidden_holder: 1})
# save sample imputation
Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids)
return Y_input_df
#def save_whole_imputation:
def save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder,pHidden_holder, input_matrix, gene_ids, cell_ids, p, m):
''' calculate and save imputation results for an input matrix at the 'impute' mode. If the number
of cells is larger than a threshold (large_size: 1e5), save results of m//p.sample_size 'folds'.
Parameters
----------
'''
if m > p.large_size:
#impute on small data blocks to avoid high memory cost
n_out_batches = m//p.sample_size
print('num_out_batches:', n_out_batches)
handle2 = open('./{}/latent_code.{}.csv'.format(p.stage, p.stage), 'w')
with open('./{}/imputation.{}.csv'.format(p.stage, p.stage), 'w') as handle:
for i_ in range(n_out_batches+1):
start_idx = i_*p.sample_size
end_idx = min((i_+1)*p.sample_size, m)
print('saving:', start_idx, end_idx)
x_out_batch = input_matrix[start_idx:end_idx, :].todense()
y_out_batch = sess.run(
h,
feed_dict={
X: x_out_batch,
pIn_holder: 1, pHidden_holder: 1
}
)
df_out_batch = pd.DataFrame(
data=y_out_batch,
columns=gene_ids,
index=cell_ids[range(start_idx, end_idx)]
)
latent_code = sess.run(
a_bottleneck,
feed_dict={
X: x_out_batch,
pIn_holder: 1, pHidden_holder: 1
}
)
latent_code_df = pd.DataFrame(
data=latent_code,
index=cell_ids[range(start_idx, end_idx)]
)
if i_ == 0:
df_out_batch.to_csv(handle, float_format='%.6f')
latent_code_df.to_csv(handle2, float_format='%.6f')
print('RAM usage during mini-batch imputation and saving output: ',
'{} M'.format(usage()))
else:
df_out_batch.to_csv(handle, header=None)
latent_code_df.to_csv(handle2, header=None)
handle2.close()
else: # if m the # of cells is less than large_size (1e5))
Y_input_arr = sess.run(h, feed_dict={X: input_matrix.todense(),
pIn_holder: 1, pHidden_holder: 1})
# save sample imputation
Y_input_df = pd.DataFrame(data=Y_input_arr,
columns=gene_ids,
index=cell_ids)
latent_code = sess.run(a_bottleneck, feed_dict={X: input_matrix.todense(),
pIn_holder: 1, pHidden_holder: 1})
latent_code_df = pd.DataFrame(data=latent_code,
index=cell_ids)
print('RAM usage during whole data imputation and saving output: ',
'{} M'.format(usage()))
scimpute.save_hd5(Y_input_df, "{}/imputation.{}.hd5".format(p.stage,
p.stage))
scimpute.save_hd5(latent_code_df, "{}/latent_code.{}.hd5".format(p.stage,
p.stage))
def visualize_weight(sess, stage, w_name, b_name):
w = eval(w_name)
b = eval(b_name)
w_arr = sess.run(w)
b_arr = sess.run(b)
b_arr = b_arr.reshape(len(b_arr), 1)
b_arr_T = b_arr.T
scimpute.visualize_weights_biases(w_arr, b_arr_T,
'{},{}.{}'.format(w_name, b_name, stage),
dir=stage)
def visualize_weights(sess, stage, en_de_layers):
for l1 in range(1, en_de_layers+1):
encoder_weight = 'e_w'+str(l1)
encoder_bias = 'e_b'+str(l1)
visualize_weight(sess, stage, encoder_weight, encoder_bias)
decoder_bias = 'd_b'+str(l1)
decoder_weight = 'd_w'+str(l1)
visualize_weight(sess, stage, decoder_weight, decoder_bias)
def save_weights(sess, stage, en_de_layers):
print('save weights in npy')
for l1 in range(1, en_de_layers+1):
encoder_weight_name = 'e_w'+str(l1)
encoder_bias_name = 'e_b'+str(l1)
decoder_bias_name = 'd_b'+str(l1)
decoder_weight_name = 'd_w'+str(l1)
np.save('{}/{}.{}'.format(stage, encoder_weight_name, stage),
sess.run(eval(encoder_weight_name)))
np.save('{}/{}.{}'.format(stage, decoder_weight_name, stage),
sess.run(eval(decoder_weight_name)))
np.save('{}/{}.{}'.format(stage, encoder_bias_name, stage),
sess.run(eval(encoder_bias_name)))
np.save('{}/{}.{}'.format(stage, decoder_bias_name, stage),
sess.run(eval(decoder_bias_name)))
def usage():
process = psutil.Process(os.getpid())
ram = process.memory_info()[0] / float(2 ** 20)
ram = round(ram, 1)
return ram
# sys.path.append('./bin')
# print('sys.path', sys.path)
#print('python version:', sys.version)
#print('tf.__version__', tf.__version__)
def late_main(p, log_dir, rand_state=3):
##0. read data and extract gene IDs and cell IDs
input_matrix, gene_ids, cell_ids = read_data(p)
##1. split data and save indexes
#input p, input_matrix, cell_ids
#return cell_ids_train, cell_ids_valid, cell_ids_test
m, n = input_matrix.shape
input_train, input_valid, input_test, train_idx, valid_idx, test_idx = \
scimpute.split__csr_matrix(input_matrix, a=p.a, b=p.b, c=p.c)
cell_ids_train = cell_ids[train_idx]
cell_ids_valid = cell_ids[valid_idx]
cell_ids_test = cell_ids[test_idx]
np.savetxt('{}/train.{}_index.txt'.format(p.stage, p.stage), cell_ids_train, fmt='%s')
np.savetxt('{}/valid.{}_index.txt'.format(p.stage, p.stage), cell_ids_valid, fmt='%s')
np.savetxt('{}/test.{}_index.txt'.format(p.stage, p.stage), cell_ids_test, fmt='%s')
print('RAM usage after splitting input data is: {} M'.format(usage()))
# todo: for backward support for older parameter files only
# sample_size is 1000 in default; if sample_size is less than the number of cells (m),
# we reconstruct the training and validation sets by randomly sampling.
try:
p.sample_size
sample_size = p.sample_size
except:
sample_size = int(9e4)
if sample_size < m:
np.random.seed(1)
rand_idx = np.random.choice(
range(len(cell_ids_train)), min(sample_size, len(cell_ids_train)))
sample_train = input_train[rand_idx, :].todense()
sample_train_cell_ids = cell_ids_train[rand_idx]
rand_idx = np.random.choice(
range(len(cell_ids_valid)), min(sample_size, len(cell_ids_valid)))
sample_valid = input_valid[rand_idx, :].todense()
sample_valid_cell_ids = cell_ids_valid[rand_idx]
#?? the following sample_input is a matrix sampled randomly, and should it be a matrix containing
# sample_training and sample_valid
rand_idx = np.random.choice(range(m), min(sample_size, m))
sample_input = input_matrix[rand_idx, :].todense()
sample_input_cell_ids = cell_ids[rand_idx]
del rand_idx
gc.collect()
np.random.seed()
else:
sample_input = input_matrix.todense()
sample_train = input_train.todense()
sample_valid = input_valid.todense()
sample_input_cell_ids = cell_ids
sample_train_cell_ids = cell_ids_train
sample_valid_cell_ids = cell_ids_valid
print('len of sample_train: {}, sample_valid: {}, sample_input {}'.format(
len(sample_train_cell_ids), len(sample_valid_cell_ids), len(sample_input_cell_ids)
))
##2. model training and validation
#2.1 init --> keep this in the main
tf.reset_default_graph()
# define placeholders and variables
X = tf.placeholder(tf.float32, [None, n], name='X_input') # input
pIn_holder = tf.placeholder(tf.float32, name='p.pIn') #keep_prob for dropout
pHidden_holder = tf.placeholder(tf.float32, name='p.pHidden')#keep_prob for dropout
#2.2 define layers and variables
# input p, X, pIn_holder, pHidden_holder, n
# return a_bottleneck, h(d_a1)
a_bottleneck, h = build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3)
#2.3 define loss
# input X, h, p
# return mse_nz, mse, reg_term
mse_nz, mse, reg_term = build_metrics(X, h, p.reg_coef)
#2.4 costruct the trainer --> keep this section in the main
optimizer = tf.train.AdamOptimizer(p.learning_rate)
if p.mse_mode in ('mse_omega', 'mse_nz'):
print('training on mse_nz')
trainer = optimizer.minimize(mse_nz + reg_term)
elif p.mse_mode == 'mse':
print('training on mse')
trainer = optimizer.minimize(mse + reg_term)
else:
raise Exception('mse_mode spelled wrong')
#2.5 Init a session accoding to the run_flag
sess = tf.Session()
# restore variables
saver = tf.train.Saver()
if p.run_flag == 'load_saved':
print('*** In TL Mode')
saver.restore(sess, "./step1/step1.ckpt")
elif p.run_flag == 'rand_init':
print('*** In Rand Init Mode')
init = tf.global_variables_initializer()
sess.run(init)
elif p.run_flag == 'impute':
print('*** In impute mode loading "step2.ckpt"..')
saver.restore(sess, './step2/step2.ckpt')
p.max_training_epochs = 0
p.learning_rate = 0.0
## save_whole_imputation
save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder,
pHidden_holder, input_matrix, gene_ids,
cell_ids, p, m)
print('imputation finished')
#toc_stop = time.time()
#print("reading took {:.1f} seconds".format(toc_stop - tic_start))
exit()
else:
raise Exception('run_flag err')
# define tensor_board writer
batch_writer = tf.summary.FileWriter(log_dir + '/batch', sess.graph)
valid_writer = tf.summary.FileWriter(log_dir + '/valid', sess.graph)
# prep mini-batch, and reporter vectors
num_batch = int(math.floor(len(train_idx) // p.batch_size)) # floor
epoch_log = []
mse_nz_batch_vec, mse_nz_valid_vec = [], [] #, mse_nz_train_vec = [], [], []
mse_batch_vec, mse_valid_vec = [], [] # mse = MSE(X, h)
#msej_batch_vec, msej_valid_vec = [], [] # msej = MSE(X, h), for genej, nz_cells
print('RAM usage after building the model is: {} M'.format(usage()))
epoch = 0
#2.6. pre-training epoch (0)
#save imputation results before training steps
print("Evaluation: epoch{}".format(epoch))
epoch_log.append(epoch)
mse_train, mse_nz_train = sess.run([mse, mse_nz], feed_dict={X: sample_train,pHidden_holder: 1.0, pIn_holder: 1.0})
mse_valid, mse_nz_valid = sess.run([mse, mse_nz],feed_dict={X: sample_valid,pHidden_holder: 1.0, pIn_holder: 1.0})
print("mse_nz_train=", round(mse_nz_train, 3), "mse_nz_valid=",round(mse_nz_valid, 3))
print("mse_train=", round(mse_train, 3),"mse_valid=", round(mse_valid, 3))
mse_batch_vec.append(mse_train)
mse_valid_vec.append(mse_valid)
mse_nz_batch_vec.append(mse_nz_train)
mse_nz_valid_vec.append(mse_nz_valid)
#2.7. training epochs (1-)
for epoch in range(1, p.max_training_epochs+1):
tic_cpu, tic_wall = time.clock(), time.time()
ridx_full = np.random.choice(len(train_idx), len(train_idx), replace=False)
#2.7.1 training model on mini-batches
for i in range(num_batch):
# x_batch
indices = np.arange(p.batch_size * i, p.batch_size*(i+1))
ridx_batch = ridx_full[indices]
# x_batch = df1_train.ix[ridx_batch, :]
x_batch = input_train[ridx_batch, :].todense()
sess.run(trainer, feed_dict={X: x_batch,
pIn_holder: p.pIn,
pHidden_holder: p.pHidden})
toc_cpu, toc_wall = time.clock(), time.time()
#2.7.2 save the results of epoch 1 and all display steps (epochs)
if (epoch == 1) or (epoch % p.display_step == 0):
tic_log = time.time()
print('#Epoch {} took: {} CPU seconds; {} Wall seconds'.format(
epoch, round(toc_cpu - tic_cpu, 2), round(toc_wall - tic_wall, 2)
))
print('num-mini-batch per epoch: {}, till now: {}'.format(i+1, epoch*(i+1)))
print('RAM usage: {:0.1f} M'.format(usage()))
# debug
# print('d_w1', sess.run(d_w1[1, 0:4])) # verified when GradDescent used
# training mse and mse_nz of the last batch
mse_batch, mse_nz_batch, h_batch = sess.run(
[mse, mse_nz, h],
feed_dict={X: x_batch, pHidden_holder: 1.0, pIn_holder: 1.0}
)
# validation mse and mse_nz of the sample validation set (1000)
mse_valid, mse_nz_valid, Y_valid = sess.run(
[mse, mse_nz, h],
feed_dict={X: sample_valid, pHidden_holder: 1.0, pIn_holder: 1.0}
)
toc_log = time.time()
print('mse_nz_batch:{}; mse_omage_valid: {}'.
format(mse_nz_batch, mse_nz_valid))
print('mse_batch:', mse_batch, '; mse_valid:', mse_valid)
print('log time for each epoch: {}\n'.format(round(toc_log - tic_log, 1)))
mse_batch_vec.append(mse_batch)
mse_valid_vec.append(mse_valid)
mse_nz_batch_vec.append(mse_nz_batch)
mse_nz_valid_vec.append(mse_nz_valid)
epoch_log.append(epoch)
#2.7.3 save snapshot step
if (epoch % p.snapshot_step == 0) or (epoch == p.max_training_epochs):
tic_log2 = time.time()
#1.save imputation results
#if the input matrix is large (m > p.large_size), only save the
#imputation results of a small sample set (sample_input)
print("> Impute and save.. ")
if m > p.large_size:
Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, sample_input, gene_ids, sample_input_cell_ids)
scimpute.save_hd5(Y_input_df, "{}/sample_imputation.{}.hd5".format(p.stage,
p.stage))
else:
Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_matrix.todense(), gene_ids, cell_ids)
scimpute.save_hd5(Y_input_df, "{}/imputation.{}.hd5".format(p.stage,
p.stage))
#2.save model
print('> Saving model..')
save_path = saver.save(sess, log_dir + "/{}.ckpt".format(p.stage))
print("Model saved in: %s" % save_path)
#3.save the training and test curve
if p.mse_mode in ('mse_nz', 'mse_omega'):
#learning_curve_mse_nz(skip=math.floor(epoch / 5 / p.display_step))
learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec,
p.stage, skip=math.floor(epoch / 5 / p.display_step))
elif p.mse_mode == 'mse':
#learning_curve_mse(skip=math.floor(epoch / 5 / p.display_step))
learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, p.stage,
skip=math.floor(epoch / 5 / p.display_step))
#4.save save_bottleneck_representation
print("> save bottleneck_representation")
code_bottleneck_input = sess.run(a_bottleneck,
feed_dict={
X: sample_input,
pIn_holder: 1,
pHidden_holder: 1})
np.save('{}/code_neck_valid.{}.npy'.format(p.stage, p.stage),
code_bottleneck_input)
#save_weights()
save_weights(sess, p.stage, en_de_layers=p.l)
#visualize_weights()
visualize_weights(sess, p.stage, en_de_layers=p.l)
toc_log2 = time.time()
log2_time = round(toc_log2 - tic_log2, 1)
min_mse_valid = min(mse_nz_valid_vec)
# os.system(
# '''for file in {0}/*npy
# do python -u weight_clustmap.py $file {0}
# done'''.format(p.stage)
# )
print('min_mse_nz_valid till now: {}'.format(min_mse_valid))
print('snapshot_step: {}s'.format(log2_time))
batch_writer.close()
valid_writer.close()
sess.close()
def build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3):
#5.2 define layers and variables
# input p, X, pIn_holder, pHidden_holder, n
# return a_bottleneck, h(d_a1)
tf.set_random_seed(rand_state) # seed
global e_w1, e_b1, e_a1, e_w2, e_b2, e_a2, e_w3, e_b3, e_a3
global d_w1, d_b1, d_a1, d_w2, d_b2, d_a2, d_w3, d_b3, d_a3
if p.L == 7:
# change with layer
with tf.name_scope('Encoder_L1'):
e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd)
e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder)
with tf.name_scope('Encoder_L2'):
e_w2, e_b2 = scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd)
e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder)
with tf.name_scope('Encoder_L3'):
e_w3, e_b3 = scimpute.weight_bias_variable('encoder3', p.n_hidden_2, p.n_hidden_3, p.sd)
e_a3 = scimpute.dense_layer('encoder3', e_a2, e_w3, e_b3, pHidden_holder)
# # with tf.name_scope('Encoder_L4'):
# # e_w4, e_b4 = scimpute.weight_bias_variable('encoder4', p.n_hidden_3, p.n_hidden_4, p.sd)
# # e_a4 = scimpute.dense_layer('encoder4', e_a3, e_w4, e_b4, pHidden_holder)
# # with tf.name_scope('Decoder_L4'):
# # d_w4, d_b4 = scimpute.weight_bias_variable('decoder4', p.n_hidden_4, p.n_hidden_3, p.sd)
# # d_a4 = scimpute.dense_layer('decoder4', e_a4, d_w4, d_b4, pHidden_holder)
with tf.name_scope('Decoder_L3'):
d_w3, d_b3 = scimpute.weight_bias_variable('decoder3', p.n_hidden_3, p.n_hidden_2, p.sd)
d_a3 = scimpute.dense_layer('decoder3', e_a3, d_w3, d_b3, pHidden_holder)
with tf.name_scope('Decoder_L2'):
d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd)
d_a2 = scimpute.dense_layer('decoder2', d_a3, d_w2, d_b2, pHidden_holder)
with tf.name_scope('Decoder_L1'):
d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd)
d_a1 = scimpute.dense_layer('decoder1', d_a2, d_w1, d_b1, pHidden_holder) # todo: change input activations if model changed
# define input/output
a_bottleneck = e_a3
elif p.L == 5:
# change with layer
with tf.name_scope('Encoder_L1'):
e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd)
e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder)
with tf.name_scope('Encoder_L2'):
e_w2, e_b2 = scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd)
e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder)
with tf.name_scope('Decoder_L2'):
d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd)
d_a2 = scimpute.dense_layer('decoder2', e_a2, d_w2, d_b2, pHidden_holder)
with tf.name_scope('Decoder_L1'):
d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd)
d_a1 = scimpute.dense_layer('decoder1', d_a2, d_w1, d_b1, pHidden_holder) # todo: change input activations if model changed
# define input/output
a_bottleneck = e_a2
elif p.L == 3:
# change with layer
with tf.name_scope('Encoder_L1'):
e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd)
e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder)
with tf.name_scope('Decoder_L1'):
d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd)
d_a1 = scimpute.dense_layer('decoder1', e_a1, d_w1, d_b1,
pHidden_holder) # todo: change input activations if model changed
# define input/output
a_bottleneck = e_a1
else:
raise Exception("{} L not defined, only 3, 5, 7 implemented".format(p.L))
h = d_a1
return a_bottleneck, h
def build_metrics(X, h, coef):
with tf.name_scope("Metrics"):
omega = tf.sign(X) # 0 if 0, 1 if > 0; not possibly < 0 in our data
mse_nz = tf.reduce_mean(
tf.multiply(
tf.pow(X-h, 2),
omega
)
)
mse = tf.reduce_mean(tf.pow(X-h, 2))
reg_term = tf.reduce_mean(tf.pow(h, 2)) * coef
tf.summary.scalar('mse_nz__Y_vs_X', mse_nz)
mse = tf.reduce_mean(tf.pow(X - h, 2)) # for report
tf.summary.scalar('mse__Y_vs_X', mse)
return mse_nz, mse, reg_term
def load_params(mode, infile):
'''load the 'global_params.py' file '''
cwd = os.getcwd()
param_file = 'global_params.py'
param_name = param_file.rstrip('.py')
p = SourceFileLoader(param_name,
cwd + '/' + param_file).load_module()
p.fname_input = infile
p.mode = mode
if mode == 'pre-training':
# step1/rand_init for pre-training on reference
p.stage = 'step1'
p.run_flag = 'rand_init'
p.learning_rate = 3e-4 # step1: 3e-4 for 3-7L, 3e-5 for 9L
elif mode == 'translate':
# step2/load_saved from step1, for transfer learning
p.stage = 'step2' # step1/step2 (not others)
p.run_flag = 'load_saved' # rand_init/load_saved
p.learning_rate = 3e-5 # step2: 3e-5 for 3-7L, 3e-6 for 9L
elif mode == 'late':
# step2/rand_init for one step training
p.stage = 'step2'
p.run_flag = 'rand_init'
p.learning_rate = 3e-4 # step1: 3e-4 for 3-7L, 3e-5 for 9L
elif mode == 'impute':
# step2/load_saved/learning_rate=0, just impute and output
p.stage = 'impute'
p.run_flag = 'impute'
p.learning_rate = 0.0
elif mode == 'analysis':
p.tag = 'Eval'
p.stage = 'Eval'
else:
print('The mode you entered cannot be recognized.')
print('Valid mode options: pre-training | late | translate | impute | analysis')
p.mode = 'invalid'
return p
if p.test_flag:
p.max_training_epochs = 10 # 3L:100, 5L:1000, 7L:1000, 9L:3000
p.display_step = 1 # interval on learning curve
p.snapshot_step = 5 # interval of saving session, imputation
p.m = 1000
p.n = 300
p.sample_size = int(240)
print('in test mode\n',
'num-genes set to {}, num-cells set to {}\n'.format(p.n, p.m),
'sample size set to {}'.format(p.sample_size))
return p
# to do: modify to display based on mode
#
def display_params(p):
# PRINT PARAMETERS
print('\nmode:', p.mode)
print('\nData:')
print('fname_input:', p.fname_input)
print('name_input:', p.name_input)
print('ori_input:', p.ori_input)
print('transformation_input:', p.transformation_input)
if (p.mode == 'pre-training') or (p.mode == 'late') or (p.mode == 'translate'):
print('data split: [{}/{}/{}]'.format(p.a, p.b, p.c))
print('\nParameters:')
print('mse_mode:', p.mse_mode)
print('stage:', p.stage)
print('init:', p.run_flag)
print('test_mode:', p.test_flag)
print('total number of layers: {}'.format(p.L))
for l_tmp in range(1, p.l+1):
print("n_hidden{}: {}".format(l_tmp, eval('p.n_hidden_'+str(l_tmp))))
print('learning_rate:', p.learning_rate)
print('reg_coef:', p.reg_coef)
print('batch_size:', p.batch_size)
print('sample_zie: ', p.sample_size)
print('pIn:', p.pIn)
print('pHidden:', p.pHidden)
print('max_training_epochs:', p.max_training_epochs)
print('display_step', p.display_step)
print('snapshot_step', p.snapshot_step)
elif p.mode == 'analysis':
print('fname_imputation:', p.fname_imputation)
print('transformation_imputation', p.transformation_imputation)
print('fname_ground_truth: ', p.fname_ground_truth)
print('transformation_ground_truth', p.transformation_ground_truth)
print('gene_pair_list: ', p.gene_pair_list)
print('\n')
def read_data(p):
'''READ DATA
Parameters
------------
p:
Return
-----------
'''
print('>READING DATA..')
print('RAM usage before reading data: {} M'.format(usage()))
if p.fname_input.endswith('h5'):
# for 10x genomics large h5 files
input_obj = scimpute.read_sparse_matrix_from_h5(p.fname_input, p.genome_input,
p.ori_input)
# gene_be_matrix.matrix = input_obj.matrix.log1p()
input_matrix = input_obj.matrix
gene_ids = input_obj.gene_ids
cell_ids = input_obj.barcodes
print('RAM usage after reading sparse matrix: {} M'.format(usage()))
gc.collect()
# Data Transformation
print('> DATA TRANSFORMATION..')
input_matrix = scimpute.sparse_matrix_transformation(input_matrix,
p.transformation_input)
del(input_obj)
gc.collect()
print('RAM usage after {} transformation: {} M'.format(p.transformation_input,
usage()))
# Test or not: m*n subset (1000 * 300). Delete later
if p.test_flag:
print('in test mode')
input_matrix = input_matrix[:p.m, :p.n]
gene_ids = gene_ids[:p.n]
cell_ids = cell_ids[:p.m]
gc.collect()
else:
# For smaller files (hd5, csv, csv.gz)
input_df = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input)
print('RAM usage after reading input_df: {} M'.format(usage()))
# Data Transformation
print('> DATA TRANSFORMATION..')
input_df = scimpute.df_transformation(
input_df.transpose(),
transformation=p.transformation_input
).transpose() # [genes, cells] in df_trans()
print('pandas input_df mem usage: ')
input_df.info(memory_usage='deep')
# Test or not
if p.test_flag:
print('in test mode')
input_df = input_df.ix[:p.m, :p.n]
gc.collect()
# To sparse
input_matrix = csr_matrix(input_df) # todo: directly read into csr, get rid of input_df
gene_ids = input_df.columns
cell_ids = input_df.index
print('RAM usage before deleting input_df: {} M'.format(usage()))
del(input_df)
gc.collect() # working on mac
print('RAM usage after deleting input_df: {} M'.format(usage()))
# Summary of data
print("name_input:", p.name_input)
_ = pd.DataFrame(data=input_matrix[:20, :4].todense(), index=cell_ids[:20],
columns=gene_ids[:4])
print("input_df:\n", _, "\n")
m, n = input_matrix.shape # m: n_cells; n: n_genes
print('input_matrix: {} cells, {} genes\n'.format(m, n))
return input_matrix, gene_ids, cell_ids
def load_results(p):
'''READ DATA
Parameters
------------
p: parameters from global_params.py and example.py
Return
-----------
X: input data matrix; genes in columns (same below)
Y: imputed data matrix
G: ground truth
'''
# print('>READING DATA..')
# X = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input)
X, gene_ids, cell_ids = read_data(p)
X = pd.DataFrame(data=X.todense(), index=cell_ids,
columns=gene_ids)
Y = scimpute.read_data_into_cell_row(p.fname_imputation, p.ori_imputation)
if p.fname_input == p.fname_ground_truth:
G = X
else:
G = scimpute.read_data_into_cell_row(p.fname_ground_truth, p.ori_ground_truth)
# print('> DATA TRANSFORMATION..')
Y = scimpute.df_transformation(Y.transpose(), transformation=p.transformation_imputation).transpose()
# X = scimpute.df_transformation(X.transpose(), transformation=p.transformation_input).transpose()
if p.fname_input == p.fname_ground_truth:
G = X
else:
G = scimpute.df_transformation(G.transpose(), transformation=p.transformation_ground_truth).transpose()
# subset/sort X, G to match Y
# todo: support sparse matrix
X = X.loc[Y.index, Y.columns]
G = G.loc[Y.index, Y.columns]
# TEST MODE OR NOT
if p.test_flag:
print('in test mode')
Y = Y.ix[0:p.m, 0:p.n]
G = G.ix[0:p.m, 0:p.n]
X = X.ix[0:p.m, 0:p.n]
# INPUT SUMMARY
print('\nIn this code, matrices should have already been transformed into cell_row')
print('Y (imputation):', p.fname_imputation, p.ori_imputation, p.transformation_imputation,'\n', Y.ix[0:20, 0:3])
print('X (input):', p.fname_input, p.ori_input, p.transformation_input,'\n', X.ix[0:20, 0:3])
print('G (ground truth):', p.fname_ground_truth, p.ori_ground_truth, p.transformation_ground_truth,'\n', G.ix[0:20, 0:3])
print('Y.shape', Y.shape)
print('X.shape', X.shape)
print('G.shape', G.shape)
return X, Y, G
def calculate_MSEs(X, Y, G):
'''calculate MSEs
MSE between imputation and input
MSE between imputation and ground truth
Parameters
------------
X: input data matrix; genes in columns (same below)
Y: imputed data matrix
G: ground truth
Return
-----------
4 MSEs
'''
print('\n> MSE Calculation')
max_y, min_y = scimpute.max_min_element_in_arrs([Y.values])
print('Max in Y is {}, Min in Y is{}'.format(max_y, min_y))
max_g, min_g = scimpute.max_min_element_in_arrs([G.values])
print('Max in G is {}, Min in G is{}'.format(max_g, min_g))
mse1_nz = scimpute.mse_omega(Y, X)
mse1_nz = round(mse1_nz, 7)
print('MSE1_NZ between Imputation and Input: ', mse1_nz)
mse1 = scimpute.mse(Y, X)
mse1 = round(mse1, 7)
print('MSE1 between Imputation and Input: ', mse1)
mse2_nz = scimpute.mse_omega(Y, G)
mse2_nz = round(mse2_nz, 7)
print('MSE2_NZ between Imputation and Ground_truth: ', mse2_nz)
mse2 = scimpute.mse(Y, G)
mse2 = round(mse2, 7)
print('MSE2 between Imputation and Ground_truth: ', mse2)
return mse1_nz, mse1, mse2_nz, mse2
def analyze_variation_in_genes(X, Y, G, p):
'''calculate and visualize standard deviation in each gene
write SDs to files
plot histograms of SDs
Parameters
------------
X: input data matrix; genes in columns (same below)
Y: imputed data matrix
G: ground truth
p: parameters
Return
-----------
None
'''
print('\n calculating standard deviation in each gene for input and imputed matrix')
x_std_df, y_std_df = scimpute.nz_std(X, Y)
x_std_df, g_std_df = scimpute.nz_std(X, G) # purpose: compare G with Y
#std_ratio_yx_df = pd.DataFrame(data= y_std_df.values / x_std_df.values, index=X.columns, columns=['sd_ratio'])
#std_ratio_yg_df = pd.DataFrame(data= y_std_df.values / g_std_df.values, index=X.columns, columns=['sd_ratio'])
std_ratio_yx_data = [(y/x if x!=0 else None) for y, x in zip(y_std_df.values, x_std_df.values)]
std_ratio_yx_df =pd.DataFrame(data = std_ratio_yx_data, index=X.columns, columns=['sd_ratio'])
std_ratio_yg_data = [(y/x if x!=0 else None) for y, x in zip(y_std_df.values, g_std_df.values)]
std_ratio_yg_df = pd.DataFrame(data= std_ratio_yg_data, index=X.columns, columns=['sd_ratio'])
std_min = min(y_std_df.min(), x_std_df.min(), g_std_df.min())
std_max = max(y_std_df.max(), x_std_df.max(), g_std_df.max())
print('generating histograms of standard deviations')
scimpute.hist_df(
y_std_df,
xlab='Standard Deviation', title='Imputation({})'.format(p.name_imputation),
range=(std_min, std_max),
dir=p.tag)
scimpute.hist_df(
x_std_df,
xlab='Standard Deviation', title='Input({})'.format(p.name_input),
range=(std_min, std_max),
dir=p.tag)
scimpute.hist_df(
g_std_df,
xlab='Standard Deviation', title='Ground Truth({})'.format(p.name_input),
range=(std_min, std_max),
dir=p.tag)
scimpute.hist_df(
std_ratio_yx_df,
xlab='Ratio of Imputation SD vs Input SD',
title='',
range=(std_min, std_max),
dir=p.tag)
scimpute.hist_df(
std_ratio_yg_df,
xlab='Ratio of Imputation SD vs Ground Truth SD',
title='',
range=(std_min, std_max),
dir=p.tag)
std_ratio_yx_df.to_csv('sd_ratio_imputed_vs_input.csv')
std_ratio_yg_df.to_csv('sd_ratio_imputed_vs_groundtruth.csv')
def visualize_all_genes(X, Y, G, p):
''' generate plots using all genes
Parameters
------------
X: input data matrix; genes in columns (same below)
Y: imputed data matrix
G: ground truth
p: parameters
Return
-----------
None
'''
# histograms of gene expression
max_expression = max(G.values.max(), X.values.max(), Y.values.max())
min_expression = min(G.values.min(), X.values.min(), Y.values.min())
print('\n max expression:', max_expression)
print('\n min expression:', min_expression)
scimpute.hist_df(
Y, xlab='Expression', title='Imputation({})'.format(p.name_imputation),
dir=p.tag, range=[min_expression, max_expression])
scimpute.hist_df(
X, xlab='Expression', title='Input({})'.format(p.name_input),
dir=p.tag, range=[min_expression, max_expression])
scimpute.hist_df(
G, xlab='Expression', title='Ground Truth({})'.format(p.name_ground_truth),
dir=p.tag, range=[min_expression, max_expression])
# histograms of correlations between genes in imputation and ground truth
# and of correlations between cells in imputation and ground truth
# when ground truth is not provide,
# input is used as ground truth
print('\n> Correlations between ground truth and imputation')
print('ground truth dimension: ', G.shape, 'imputation dimension: ', Y.shape)
print('generating histogram for correlations of genes between ground truth and imputation')
scimpute.hist_2matrix_corr(
G.values, Y.values,
title="Correlation for each gene\n(Ground_truth vs Imputation)\n{}\n{}".
format(p.name_ground_truth, p.name_imputation),
dir=p.tag, mode='column-wise', nz_mode='first' # or ignore
)
print('generating histogram for correlations of cells between ground truth and imputation')
scimpute.hist_2matrix_corr(
G.values, Y.values,
title="Correlation for each cell\n(Ground_truth vs Imputation)\n{}\n{}".
format(p.name_ground_truth, p.name_imputation),
dir=p.tag, mode='row-wise', nz_mode='first'
)
# heatmaps of data matrices
print('\n> Generating heatmaps of data matrices')
range_max, range_min = scimpute.max_min_element_in_arrs([Y.values, G.values, X.values])
print('\nrange:', range_max, ' ', range_min)
scimpute.heatmap_vis(Y.values,
title='Imputation ({})'.format(p.name_imputation),
xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag)
scimpute.heatmap_vis(X.values,
title='Input ({})'.format(p.name_input),
xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag)
scimpute.heatmap_vis(G.values,
title='Ground_truth ({})'.format(p.name_ground_truth),
xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag)
# PCA and tSNE plots
print('\n> Generating PCA and tSNE plots')
if p.cluster_file is not None:
cluster_info = scimpute.read_data_into_cell_row(p.cluster_file)
# cluster_info = cluster_info.astype('str')
else:
cluster_info = None
scimpute.pca_tsne(df_cell_row=Y, cluster_info=cluster_info,
title=p.name_imputation, dir=p.tag)
scimpute.pca_tsne(df_cell_row=X, cluster_info=cluster_info,
title=p.name_input, dir=p.tag)
scimpute.pca_tsne(df_cell_row=G, cluster_info=cluster_info,
title=p.name_ground_truth, dir=p.tag)
def visualize_selected_genes(X, Y, G, p):
''' generate plots for genes specified by the user
Parameters
------------
X: input data matrix; genes in columns (same below)
Y: imputed data matrix
G: ground truth
p: parameters
Return
-----------
None
'''
gene_pair_dir = p.tag+'/pairs'
List = p.gene_pair_list
print(">n> Scatterplots of selected gene pairs")
scimpute.gene_pair_plot(Y, list=List, tag='(Imputation)', dir=gene_pair_dir)
scimpute.gene_pair_plot(X, list=List, tag='(Input)', dir=gene_pair_dir)
scimpute.gene_pair_plot(G, list=List, tag='(Ground_truth)', dir=gene_pair_dir)
print("\n> Scatterplots for selected genes")
print("ground truth vs imputation, ground truth vs input")
gene_dir = p.tag+'/genes'
# genetate a list of genes using the gene_pair_list
gene_list = [gene for pair in List for gene in pair]
for j in gene_list:
try:
print('for ', j)
Y_j = Y.ix[:, j]
G_j = G.ix[:, j]
X_j = X.ix[:, j]
except KeyError:
print('KeyError: gene ID does not exist')
continue
scimpute.scatterplot2(G_j, Y_j, range='same',
title=str(str(j) + '\n(Ground Truth vs Imputation) '),
xlabel='Ground Truth',
ylabel='Imputation',
dir=gene_dir
)
scimpute.scatterplot2(G_j, X_j, range='same',
title=str(str(j) + '\n(Ground Truth vs Input) '),
xlabel='Ground Truth',
ylabel='Input',
dir=gene_dir
)
# Discretize gene expression values
# and re-generate pairwise plots
Y = scimpute.df_exp_discretize_log10(Y)
print('\n> Discrete gene pair relationship in imputation')
gene_pair_dir = p.tag+'/pairs_discrete'
# List = p.gene_pair_list
scimpute.gene_pair_plot(Y, list=List, tag='(Imputation Discrete) ',
dir=gene_pair_dir)
print("\n> Discrete imputation vs ground truth")
gene_dir = p.tag+'/genes_discrete'
for j in gene_list:
try:
print('for ', j)
Y_j = Y.ix[:, j]
G_j = G.ix[:, j]
X_j = X.ix[:, j]
except KeyError:
print('KeyError: gene ID does not exist')
continue
scimpute.scatterplot2(G_j, Y_j, range='same',
title=str(str(j) + '\n(Ground_truth vs Imputation) '),
xlabel='Ground Truth',
ylabel='Imputation',
dir=gene_dir
)
scimpute.scatterplot2(G_j, X_j, range='same',
title=str(str(j) + '\n(Ground_truth vs Input) '),
xlabel='Ground Truth',
ylabel='Input',
dir=gene_dir
)
def result_analysis_main(p):
'''analyzing imputation output
Parameters
------------
p: parameters from global_params.py and example.py
Return
-----------
None
'''
# load imputation results and input data
X, Y, G = load_results(p)
# calculate MSEs
mse1_nz, mse1, mse2_nz, mse2 = calculate_MSEs(X, Y, G)
# calculate and visualize variation in genes
analyze_variation_in_genes(X, Y, G, p)
# visualize results using all genes
visualize_all_genes(X, Y, G, p)
# visualize selected genes
visualize_selected_genes(X, Y, G, p)
def parse_args(argv):
parser = argparse.ArgumentParser(description = 'Help information')
parser.add_argument('-mode', help='mode options: pre-training | late | translate | impute | analysis')
parser.add_argument('-infile', help='file path of input data')
return parser.parse_args(argv)
if __name__ == '__main__':
##1. load parameter module and use name 'p'
#print("Usage: python late.py -mode <late> -infile <xx.hd5>")
argms = parse_args(sys.argv[1:])
p = load_params(argms.mode, argms.infile)
if p.mode =='invalid':
exit(0)
##2. refresh folder
log_dir = './{}'.format(p.stage)
scimpute.refresh_logfolder(log_dir)
tic_start = time.time()
#3. load data
input_matrix, gene_ids, cell_ids = read_data(p)
#4. call late
late_main(input_matrix, gene_ids, cell_ids, p, log_dir, rand_state = 3)
toc_stop = time.time()
time_finish = round((toc_stop - tic_start), 2)
print("Imputation Finished!")
print("Wall Time Used: {} seconds".format(time_finish))
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"tensorflow.reset_default_graph",
"scimpute.max_min_element_in_arrs",
"gc.collect",
"scimpute.read_data_into_cell_row",
"numpy.arange",
"scimpute.read_sparse_matrix_from_h5",
"pandas.DataFrame",
"scimpute.weight_bias_variable",
"scimpute.split__csr_matrix",
"tensorflow.sign",
"tensorflow.set_random_seed",
"time.clock",
"tensorflow.placeholder",
"importlib.machinery.SourceFileLoader",
"tensorflow.summary.FileWriter",
"scimpute.gene_pair_plot",
"tensorflow.name_scope",
"scimpute.sparse_matrix_transformation",
"scimpute.pca_tsne",
"scimpute.mse",
"scimpute.refresh_logfolder",
"tensorflow.train.Saver",
"tensorflow.summary.scalar",
"tensorflow.global_variables_initializer",
"scimpute.mse_omega",
"scimpute.nz_std",
"scimpute.df_exp_discretize_log10",
"tensorflow.Session",
"matplotlib.use",
"scipy.sparse.csr_matrix",
"scimpute.hist_df",
"scimpute.dense_layer",
"os.getpid",
"os.getcwd",
"math.floor",
"tensorflow.pow",
"time.time",
"tensorflow.train.AdamOptimizer"
] |
[((269, 290), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (283, 290), False, 'import matplotlib\n'), ((2503, 2567), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'Y_input_arr', 'columns': 'gene_ids', 'index': 'cell_ids'}), '(data=Y_input_arr, columns=gene_ids, index=cell_ids)\n', (2515, 2567), True, 'import pandas as pd\n'), ((8291, 8352), 'scimpute.split__csr_matrix', 'scimpute.split__csr_matrix', (['input_matrix'], {'a': 'p.a', 'b': 'p.b', 'c': 'p.c'}), '(input_matrix, a=p.a, b=p.b, c=p.c)\n', (8317, 8352), False, 'import scimpute\n'), ((10418, 10442), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (10440, 10442), True, 'import tensorflow as tf\n'), ((10485, 10538), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, n]'], {'name': '"""X_input"""'}), "(tf.float32, [None, n], name='X_input')\n", (10499, 10538), True, 'import tensorflow as tf\n'), ((10562, 10602), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""p.pIn"""'}), "(tf.float32, name='p.pIn')\n", (10576, 10602), True, 'import tensorflow as tf\n'), ((10645, 10689), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""p.pHidden"""'}), "(tf.float32, name='p.pHidden')\n", (10659, 10689), True, 'import tensorflow as tf\n'), ((11109, 11148), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['p.learning_rate'], {}), '(p.learning_rate)\n', (11131, 11148), True, 'import tensorflow as tf\n'), ((11479, 11491), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (11489, 11491), True, 'import tensorflow as tf\n'), ((11522, 11538), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (11536, 11538), True, 'import tensorflow as tf\n'), ((12337, 12390), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(log_dir + '/batch')", 'sess.graph'], {}), "(log_dir + '/batch', sess.graph)\n", (12358, 12390), True, 'import tensorflow as tf\n'), ((12407, 12460), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(log_dir + '/valid')", 'sess.graph'], {}), "(log_dir + '/valid', sess.graph)\n", (12428, 12460), True, 'import tensorflow as tf\n'), ((18266, 18296), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['rand_state'], {}), '(rand_state)\n', (18284, 18296), True, 'import tensorflow as tf\n'), ((22359, 22370), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (22368, 22370), False, 'import os\n'), ((28388, 28458), 'scimpute.read_data_into_cell_row', 'scimpute.read_data_into_cell_row', (['p.fname_imputation', 'p.ori_imputation'], {}), '(p.fname_imputation, p.ori_imputation)\n', (28420, 28458), False, 'import scimpute\n'), ((30132, 30176), 'scimpute.max_min_element_in_arrs', 'scimpute.max_min_element_in_arrs', (['[Y.values]'], {}), '([Y.values])\n', (30164, 30176), False, 'import scimpute\n'), ((30254, 30298), 'scimpute.max_min_element_in_arrs', 'scimpute.max_min_element_in_arrs', (['[G.values]'], {}), '([G.values])\n', (30286, 30298), False, 'import scimpute\n'), ((30372, 30396), 'scimpute.mse_omega', 'scimpute.mse_omega', (['Y', 'X'], {}), '(Y, X)\n', (30390, 30396), False, 'import scimpute\n'), ((30493, 30511), 'scimpute.mse', 'scimpute.mse', (['Y', 'X'], {}), '(Y, X)\n', (30505, 30511), False, 'import scimpute\n'), ((30599, 30623), 'scimpute.mse_omega', 'scimpute.mse_omega', (['Y', 'G'], {}), '(Y, G)\n', (30617, 30623), False, 'import scimpute\n'), ((30727, 30745), 'scimpute.mse', 'scimpute.mse', (['Y', 'G'], {}), '(Y, G)\n', (30739, 30745), False, 'import scimpute\n'), ((31315, 31336), 'scimpute.nz_std', 'scimpute.nz_std', (['X', 'Y'], {}), '(X, Y)\n', (31330, 31336), False, 'import scimpute\n'), ((31359, 31380), 'scimpute.nz_std', 'scimpute.nz_std', (['X', 'G'], {}), '(X, G)\n', (31374, 31380), False, 'import scimpute\n'), ((31753, 31828), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'std_ratio_yx_data', 'index': 'X.columns', 'columns': "['sd_ratio']"}), "(data=std_ratio_yx_data, index=X.columns, columns=['sd_ratio'])\n", (31765, 31828), True, 'import pandas as pd\n'), ((31948, 32023), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'std_ratio_yg_data', 'index': 'X.columns', 'columns': "['sd_ratio']"}), "(data=std_ratio_yg_data, index=X.columns, columns=['sd_ratio'])\n", (31960, 32023), True, 'import pandas as pd\n'), ((32650, 32777), 'scimpute.hist_df', 'scimpute.hist_df', (['std_ratio_yx_df'], {'xlab': '"""Ratio of Imputation SD vs Input SD"""', 'title': '""""""', 'range': '(std_min, std_max)', 'dir': 'p.tag'}), "(std_ratio_yx_df, xlab='Ratio of Imputation SD vs Input SD',\n title='', range=(std_min, std_max), dir=p.tag)\n", (32666, 32777), False, 'import scimpute\n'), ((32788, 32927), 'scimpute.hist_df', 'scimpute.hist_df', (['std_ratio_yg_df'], {'xlab': '"""Ratio of Imputation SD vs Ground Truth SD"""', 'title': '""""""', 'range': '(std_min, std_max)', 'dir': 'p.tag'}), "(std_ratio_yg_df, xlab=\n 'Ratio of Imputation SD vs Ground Truth SD', title='', range=(std_min,\n std_max), dir=p.tag)\n", (32804, 32927), False, 'import scimpute\n'), ((35239, 35303), 'scimpute.max_min_element_in_arrs', 'scimpute.max_min_element_in_arrs', (['[Y.values, G.values, X.values]'], {}), '([Y.values, G.values, X.values])\n', (35271, 35303), False, 'import scimpute\n'), ((36063, 36163), 'scimpute.pca_tsne', 'scimpute.pca_tsne', ([], {'df_cell_row': 'Y', 'cluster_info': 'cluster_info', 'title': 'p.name_imputation', 'dir': 'p.tag'}), '(df_cell_row=Y, cluster_info=cluster_info, title=p.\n name_imputation, dir=p.tag)\n', (36080, 36163), False, 'import scimpute\n'), ((36188, 36283), 'scimpute.pca_tsne', 'scimpute.pca_tsne', ([], {'df_cell_row': 'X', 'cluster_info': 'cluster_info', 'title': 'p.name_input', 'dir': 'p.tag'}), '(df_cell_row=X, cluster_info=cluster_info, title=p.\n name_input, dir=p.tag)\n', (36205, 36283), False, 'import scimpute\n'), ((36308, 36410), 'scimpute.pca_tsne', 'scimpute.pca_tsne', ([], {'df_cell_row': 'G', 'cluster_info': 'cluster_info', 'title': 'p.name_ground_truth', 'dir': 'p.tag'}), '(df_cell_row=G, cluster_info=cluster_info, title=p.\n name_ground_truth, dir=p.tag)\n', (36325, 36410), False, 'import scimpute\n'), ((36838, 36914), 'scimpute.gene_pair_plot', 'scimpute.gene_pair_plot', (['Y'], {'list': 'List', 'tag': '"""(Imputation)"""', 'dir': 'gene_pair_dir'}), "(Y, list=List, tag='(Imputation)', dir=gene_pair_dir)\n", (36861, 36914), False, 'import scimpute\n'), ((36916, 36987), 'scimpute.gene_pair_plot', 'scimpute.gene_pair_plot', (['X'], {'list': 'List', 'tag': '"""(Input)"""', 'dir': 'gene_pair_dir'}), "(X, list=List, tag='(Input)', dir=gene_pair_dir)\n", (36939, 36987), False, 'import scimpute\n'), ((36989, 37067), 'scimpute.gene_pair_plot', 'scimpute.gene_pair_plot', (['G'], {'list': 'List', 'tag': '"""(Ground_truth)"""', 'dir': 'gene_pair_dir'}), "(G, list=List, tag='(Ground_truth)', dir=gene_pair_dir)\n", (37012, 37067), False, 'import scimpute\n'), ((38145, 38180), 'scimpute.df_exp_discretize_log10', 'scimpute.df_exp_discretize_log10', (['Y'], {}), '(Y)\n', (38177, 38180), False, 'import scimpute\n'), ((38310, 38401), 'scimpute.gene_pair_plot', 'scimpute.gene_pair_plot', (['Y'], {'list': 'List', 'tag': '"""(Imputation Discrete) """', 'dir': 'gene_pair_dir'}), "(Y, list=List, tag='(Imputation Discrete) ', dir=\n gene_pair_dir)\n", (38333, 38401), False, 'import scimpute\n'), ((39897, 39952), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Help information"""'}), "(description='Help information')\n", (39920, 39952), False, 'import argparse\n'), ((40464, 40499), 'scimpute.refresh_logfolder', 'scimpute.refresh_logfolder', (['log_dir'], {}), '(log_dir)\n', (40490, 40499), False, 'import scimpute\n'), ((40514, 40525), 'time.time', 'time.time', ([], {}), '()\n', (40523, 40525), False, 'import time\n'), ((40692, 40703), 'time.time', 'time.time', ([], {}), '()\n', (40701, 40703), False, 'import time\n'), ((5223, 5287), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'Y_input_arr', 'columns': 'gene_ids', 'index': 'cell_ids'}), '(data=Y_input_arr, columns=gene_ids, index=cell_ids)\n', (5235, 5287), True, 'import pandas as pd\n'), ((5540, 5586), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'latent_code', 'index': 'cell_ids'}), '(data=latent_code, index=cell_ids)\n', (5552, 5586), True, 'import pandas as pd\n'), ((7678, 7689), 'os.getpid', 'os.getpid', ([], {}), '()\n', (7687, 7689), False, 'import os\n'), ((9147, 9164), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (9161, 9164), True, 'import numpy as np\n'), ((9902, 9914), 'gc.collect', 'gc.collect', ([], {}), '()\n', (9912, 9914), False, 'import gc\n'), ((9917, 9933), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (9931, 9933), True, 'import numpy as np\n'), ((21820, 21844), 'tensorflow.name_scope', 'tf.name_scope', (['"""Metrics"""'], {}), "('Metrics')\n", (21833, 21844), True, 'import tensorflow as tf\n'), ((21856, 21866), 'tensorflow.sign', 'tf.sign', (['X'], {}), '(X)\n', (21863, 21866), True, 'import tensorflow as tf\n'), ((22105, 22148), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mse_nz__Y_vs_X"""', 'mse_nz'], {}), "('mse_nz__Y_vs_X', mse_nz)\n", (22122, 22148), True, 'import tensorflow as tf\n'), ((22207, 22244), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mse__Y_vs_X"""', 'mse'], {}), "('mse__Y_vs_X', mse)\n", (22224, 22244), True, 'import tensorflow as tf\n'), ((25745, 25824), 'scimpute.read_sparse_matrix_from_h5', 'scimpute.read_sparse_matrix_from_h5', (['p.fname_input', 'p.genome_input', 'p.ori_input'], {}), '(p.fname_input, p.genome_input, p.ori_input)\n', (25780, 25824), False, 'import scimpute\n'), ((26063, 26075), 'gc.collect', 'gc.collect', ([], {}), '()\n', (26073, 26075), False, 'import gc\n'), ((26153, 26228), 'scimpute.sparse_matrix_transformation', 'scimpute.sparse_matrix_transformation', (['input_matrix', 'p.transformation_input'], {}), '(input_matrix, p.transformation_input)\n', (26190, 26228), False, 'import scimpute\n'), ((26264, 26276), 'gc.collect', 'gc.collect', ([], {}), '()\n', (26274, 26276), False, 'import gc\n'), ((26664, 26724), 'scimpute.read_data_into_cell_row', 'scimpute.read_data_into_cell_row', (['p.fname_input', 'p.ori_input'], {}), '(p.fname_input, p.ori_input)\n', (26696, 26724), False, 'import scimpute\n'), ((27227, 27247), 'scipy.sparse.csr_matrix', 'csr_matrix', (['input_df'], {}), '(input_df)\n', (27237, 27247), False, 'from scipy.sparse import csr_matrix\n'), ((27445, 27457), 'gc.collect', 'gc.collect', ([], {}), '()\n', (27455, 27457), False, 'import gc\n'), ((28523, 28597), 'scimpute.read_data_into_cell_row', 'scimpute.read_data_into_cell_row', (['p.fname_ground_truth', 'p.ori_ground_truth'], {}), '(p.fname_ground_truth, p.ori_ground_truth)\n', (28555, 28597), False, 'import scimpute\n'), ((35937, 35985), 'scimpute.read_data_into_cell_row', 'scimpute.read_data_into_cell_row', (['p.cluster_file'], {}), '(p.cluster_file)\n', (35969, 35985), False, 'import scimpute\n'), ((983, 1069), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '_', 'index': 'epoch_log', 'columns': "['Epoch', 'MSE_batch', 'MSE_valid']"}), "(data=_, index=epoch_log, columns=['Epoch', 'MSE_batch',\n 'MSE_valid'])\n", (995, 1069), True, 'import pandas as pd\n'), ((1940, 2032), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '_', 'index': 'epoch_log', 'columns': "['Epoch', 'MSE_NZ_batch', 'MSE_NZ_valid']"}), "(data=_, index=epoch_log, columns=['Epoch', 'MSE_NZ_batch',\n 'MSE_NZ_valid'])\n", (1952, 2032), True, 'import pandas as pd\n'), ((11716, 11749), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11747, 11749), True, 'import tensorflow as tf\n'), ((13688, 13700), 'time.clock', 'time.clock', ([], {}), '()\n', (13698, 13700), False, 'import time\n'), ((13702, 13713), 'time.time', 'time.time', ([], {}), '()\n', (13711, 13713), False, 'import time\n'), ((13893, 13944), 'numpy.arange', 'np.arange', (['(p.batch_size * i)', '(p.batch_size * (i + 1))'], {}), '(p.batch_size * i, p.batch_size * (i + 1))\n', (13902, 13944), True, 'import numpy as np\n'), ((14212, 14224), 'time.clock', 'time.clock', ([], {}), '()\n', (14222, 14224), False, 'import time\n'), ((14226, 14237), 'time.time', 'time.time', ([], {}), '()\n', (14235, 14237), False, 'import time\n'), ((14372, 14383), 'time.time', 'time.time', ([], {}), '()\n', (14381, 14383), False, 'import time\n'), ((15174, 15185), 'time.time', 'time.time', ([], {}), '()\n', (15183, 15185), False, 'import time\n'), ((15720, 15731), 'time.time', 'time.time', ([], {}), '()\n', (15729, 15731), False, 'import time\n'), ((17666, 17677), 'time.time', 'time.time', ([], {}), '()\n', (17675, 17677), False, 'import time\n'), ((18472, 18499), 'tensorflow.name_scope', 'tf.name_scope', (['"""Encoder_L1"""'], {}), "('Encoder_L1')\n", (18485, 18499), True, 'import tensorflow as tf\n'), ((18517, 18581), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""encoder1"""', 'n', 'p.n_hidden_1', 'p.sd'], {}), "('encoder1', n, p.n_hidden_1, p.sd)\n", (18546, 18581), False, 'import scimpute\n'), ((18592, 18651), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""encoder1"""', 'X', 'e_w1', 'e_b1', 'pIn_holder'], {}), "('encoder1', X, e_w1, e_b1, pIn_holder)\n", (18612, 18651), False, 'import scimpute\n'), ((18659, 18686), 'tensorflow.name_scope', 'tf.name_scope', (['"""Encoder_L2"""'], {}), "('Encoder_L2')\n", (18672, 18686), True, 'import tensorflow as tf\n'), ((18704, 18779), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""encoder2"""', 'p.n_hidden_1', 'p.n_hidden_2', 'p.sd'], {}), "('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd)\n", (18733, 18779), False, 'import scimpute\n'), ((18790, 18856), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""encoder2"""', 'e_a1', 'e_w2', 'e_b2', 'pHidden_holder'], {}), "('encoder2', e_a1, e_w2, e_b2, pHidden_holder)\n", (18810, 18856), False, 'import scimpute\n'), ((18864, 18891), 'tensorflow.name_scope', 'tf.name_scope', (['"""Encoder_L3"""'], {}), "('Encoder_L3')\n", (18877, 18891), True, 'import tensorflow as tf\n'), ((18909, 18984), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""encoder3"""', 'p.n_hidden_2', 'p.n_hidden_3', 'p.sd'], {}), "('encoder3', p.n_hidden_2, p.n_hidden_3, p.sd)\n", (18938, 18984), False, 'import scimpute\n'), ((18995, 19061), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""encoder3"""', 'e_a2', 'e_w3', 'e_b3', 'pHidden_holder'], {}), "('encoder3', e_a2, e_w3, e_b3, pHidden_holder)\n", (19015, 19061), False, 'import scimpute\n'), ((19515, 19542), 'tensorflow.name_scope', 'tf.name_scope', (['"""Decoder_L3"""'], {}), "('Decoder_L3')\n", (19528, 19542), True, 'import tensorflow as tf\n'), ((19560, 19635), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""decoder3"""', 'p.n_hidden_3', 'p.n_hidden_2', 'p.sd'], {}), "('decoder3', p.n_hidden_3, p.n_hidden_2, p.sd)\n", (19589, 19635), False, 'import scimpute\n'), ((19646, 19712), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""decoder3"""', 'e_a3', 'd_w3', 'd_b3', 'pHidden_holder'], {}), "('decoder3', e_a3, d_w3, d_b3, pHidden_holder)\n", (19666, 19712), False, 'import scimpute\n'), ((19720, 19747), 'tensorflow.name_scope', 'tf.name_scope', (['"""Decoder_L2"""'], {}), "('Decoder_L2')\n", (19733, 19747), True, 'import tensorflow as tf\n'), ((19765, 19840), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""decoder2"""', 'p.n_hidden_2', 'p.n_hidden_1', 'p.sd'], {}), "('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd)\n", (19794, 19840), False, 'import scimpute\n'), ((19851, 19917), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""decoder2"""', 'd_a3', 'd_w2', 'd_b2', 'pHidden_holder'], {}), "('decoder2', d_a3, d_w2, d_b2, pHidden_holder)\n", (19871, 19917), False, 'import scimpute\n'), ((19925, 19952), 'tensorflow.name_scope', 'tf.name_scope', (['"""Decoder_L1"""'], {}), "('Decoder_L1')\n", (19938, 19952), True, 'import tensorflow as tf\n'), ((19970, 20034), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""decoder1"""', 'p.n_hidden_1', 'n', 'p.sd'], {}), "('decoder1', p.n_hidden_1, n, p.sd)\n", (19999, 20034), False, 'import scimpute\n'), ((20045, 20111), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""decoder1"""', 'd_a2', 'd_w1', 'd_b1', 'pHidden_holder'], {}), "('decoder1', d_a2, d_w1, d_b1, pHidden_holder)\n", (20065, 20111), False, 'import scimpute\n'), ((22038, 22054), 'tensorflow.pow', 'tf.pow', (['(X - h)', '(2)'], {}), '(X - h, 2)\n', (22044, 22054), True, 'import tensorflow as tf\n'), ((22173, 22189), 'tensorflow.pow', 'tf.pow', (['(X - h)', '(2)'], {}), '(X - h, 2)\n', (22179, 22189), True, 'import tensorflow as tf\n'), ((22448, 22500), 'importlib.machinery.SourceFileLoader', 'SourceFileLoader', (['param_name', "(cwd + '/' + param_file)"], {}), "(param_name, cwd + '/' + param_file)\n", (22464, 22500), False, 'from importlib.machinery import SourceFileLoader\n'), ((26589, 26601), 'gc.collect', 'gc.collect', ([], {}), '()\n', (26599, 26601), False, 'import gc\n'), ((27182, 27194), 'gc.collect', 'gc.collect', ([], {}), '()\n', (27192, 27194), False, 'import gc\n'), ((20254, 20281), 'tensorflow.name_scope', 'tf.name_scope', (['"""Encoder_L1"""'], {}), "('Encoder_L1')\n", (20267, 20281), True, 'import tensorflow as tf\n'), ((20299, 20363), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""encoder1"""', 'n', 'p.n_hidden_1', 'p.sd'], {}), "('encoder1', n, p.n_hidden_1, p.sd)\n", (20328, 20363), False, 'import scimpute\n'), ((20374, 20433), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""encoder1"""', 'X', 'e_w1', 'e_b1', 'pIn_holder'], {}), "('encoder1', X, e_w1, e_b1, pIn_holder)\n", (20394, 20433), False, 'import scimpute\n'), ((20441, 20468), 'tensorflow.name_scope', 'tf.name_scope', (['"""Encoder_L2"""'], {}), "('Encoder_L2')\n", (20454, 20468), True, 'import tensorflow as tf\n'), ((20486, 20561), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""encoder2"""', 'p.n_hidden_1', 'p.n_hidden_2', 'p.sd'], {}), "('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd)\n", (20515, 20561), False, 'import scimpute\n'), ((20572, 20638), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""encoder2"""', 'e_a1', 'e_w2', 'e_b2', 'pHidden_holder'], {}), "('encoder2', e_a1, e_w2, e_b2, pHidden_holder)\n", (20592, 20638), False, 'import scimpute\n'), ((20646, 20673), 'tensorflow.name_scope', 'tf.name_scope', (['"""Decoder_L2"""'], {}), "('Decoder_L2')\n", (20659, 20673), True, 'import tensorflow as tf\n'), ((20691, 20766), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""decoder2"""', 'p.n_hidden_2', 'p.n_hidden_1', 'p.sd'], {}), "('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd)\n", (20720, 20766), False, 'import scimpute\n'), ((20777, 20843), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""decoder2"""', 'e_a2', 'd_w2', 'd_b2', 'pHidden_holder'], {}), "('decoder2', e_a2, d_w2, d_b2, pHidden_holder)\n", (20797, 20843), False, 'import scimpute\n'), ((20851, 20878), 'tensorflow.name_scope', 'tf.name_scope', (['"""Decoder_L1"""'], {}), "('Decoder_L1')\n", (20864, 20878), True, 'import tensorflow as tf\n'), ((20896, 20960), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""decoder1"""', 'p.n_hidden_1', 'n', 'p.sd'], {}), "('decoder1', p.n_hidden_1, n, p.sd)\n", (20925, 20960), False, 'import scimpute\n'), ((20971, 21037), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""decoder1"""', 'd_a2', 'd_w1', 'd_b1', 'pHidden_holder'], {}), "('decoder1', d_a2, d_w1, d_b1, pHidden_holder)\n", (20991, 21037), False, 'import scimpute\n'), ((21970, 21986), 'tensorflow.pow', 'tf.pow', (['(X - h)', '(2)'], {}), '(X - h, 2)\n', (21976, 21986), True, 'import tensorflow as tf\n'), ((22082, 22094), 'tensorflow.pow', 'tf.pow', (['h', '(2)'], {}), '(h, 2)\n', (22088, 22094), True, 'import tensorflow as tf\n'), ((21180, 21207), 'tensorflow.name_scope', 'tf.name_scope', (['"""Encoder_L1"""'], {}), "('Encoder_L1')\n", (21193, 21207), True, 'import tensorflow as tf\n'), ((21225, 21289), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""encoder1"""', 'n', 'p.n_hidden_1', 'p.sd'], {}), "('encoder1', n, p.n_hidden_1, p.sd)\n", (21254, 21289), False, 'import scimpute\n'), ((21300, 21359), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""encoder1"""', 'X', 'e_w1', 'e_b1', 'pIn_holder'], {}), "('encoder1', X, e_w1, e_b1, pIn_holder)\n", (21320, 21359), False, 'import scimpute\n'), ((21367, 21394), 'tensorflow.name_scope', 'tf.name_scope', (['"""Decoder_L1"""'], {}), "('Decoder_L1')\n", (21380, 21394), True, 'import tensorflow as tf\n'), ((21412, 21476), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""decoder1"""', 'p.n_hidden_1', 'n', 'p.sd'], {}), "('decoder1', p.n_hidden_1, n, p.sd)\n", (21441, 21476), False, 'import scimpute\n'), ((21487, 21553), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""decoder1"""', 'e_a1', 'd_w1', 'd_b1', 'pHidden_holder'], {}), "('decoder1', e_a1, d_w1, d_b1, pHidden_holder)\n", (21507, 21553), False, 'import scimpute\n'), ((16856, 16894), 'math.floor', 'math.floor', (['(epoch / 5 / p.display_step)'], {}), '(epoch / 5 / p.display_step)\n', (16866, 16894), False, 'import math\n'), ((17096, 17134), 'math.floor', 'math.floor', (['(epoch / 5 / p.display_step)'], {}), '(epoch / 5 / p.display_step)\n', (17106, 17134), False, 'import math\n')]
|
"""IRC message."""
import re
from typing import Optional
from irc.messages.base import IRCBaseMessage
# Regex for matching the individual parts of an IRC message
private_message_regex = re.compile("^:([^!]+)!(.*?) (PRIVMSG|NOTICE) ([^ ]+) :(.*)")
class IRCMessage(IRCBaseMessage):
"""An IRC private message."""
def __init__( # pylint: disable=too-many-arguments
self,
raw_message: str,
author: str,
hostname: str,
is_notice: bool,
target: str,
message: str
) -> None:
super().__init__(raw_message)
self.__author = author
self.__hostname = hostname
self.__is_notice = is_notice
self.__target = target
self.__message = message
@property
def author(self) -> str:
"""The author of the message."""
return self.__author
@property
def hostname(self) -> str:
"""The hostname of the message's author."""
return self.__hostname
@property
def is_notice(self) -> bool:
"""Whether or not the message is a NOTICE."""
return self.__is_notice
@property
def target(self) -> str:
"""The target of the message."""
return self.__target
@property
def message(self) -> str:
"""The message itself."""
return self.__message
def __str__(self) -> str:
"""String representation of the message."""
if self.__is_notice:
return "NOTICE {} : {}".format(self.__author, self.__message)
return "PRIVMSG {} : {}".format(self.__author, self.__message)
@staticmethod
def parse(line: str) -> Optional["IRCMessage"]:
"""Parse a message."""
match = private_message_regex.match(line)
if not match:
return None
author, hostname, type, target, message = match.groups()
is_notice = type == "NOTICE"
return IRCMessage(line, author, hostname, is_notice, target, message)
|
[
"re.compile"
] |
[((189, 249), 're.compile', 're.compile', (['"""^:([^!]+)!(.*?) (PRIVMSG|NOTICE) ([^ ]+) :(.*)"""'], {}), "('^:([^!]+)!(.*?) (PRIVMSG|NOTICE) ([^ ]+) :(.*)')\n", (199, 249), False, 'import re\n')]
|
#!/usr/bin/env python3
from calendar import day_name, weekday
month, day, year = map(int, input().split())
print(day_name[weekday(year, month, day)].upper())
|
[
"calendar.weekday"
] |
[((124, 149), 'calendar.weekday', 'weekday', (['year', 'month', 'day'], {}), '(year, month, day)\n', (131, 149), False, 'from calendar import day_name, weekday\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/1/21 10:05 PM
# @Author : w8ay
# @File : nmap.py
import nmap
from lib.data import logger
def nmapscan(host, ports):
# 接受从masscan上扫描出来的结果
# 为了可以多线程使用,此函数支持多线程调用
nm = nmap.PortScanner()
argument = "-sV -sS -Pn --host-timeout 1m -p{}".format(','.join(ports))
try:
ret = nm.scan(host, arguments=argument)
except nmap.PortScannerError:
logger.debug("Nmap PortScannerError host:{}".format(host))
return None
except:
return None
# debug
elapsed = ret["nmap"]["scanstats"]["elapsed"]
command_line = ret["nmap"]["command_line"]
logger.debug("[nmap] successed,elapsed:%s command_line:%s" % (elapsed, command_line))
if host in ret["scan"]:
try:
result = ret["scan"][host]["tcp"]
except KeyError:
return None
return result
return None
|
[
"nmap.PortScanner",
"lib.data.logger.debug"
] |
[((248, 266), 'nmap.PortScanner', 'nmap.PortScanner', ([], {}), '()\n', (264, 266), False, 'import nmap\n'), ((667, 756), 'lib.data.logger.debug', 'logger.debug', (["('[nmap] successed,elapsed:%s command_line:%s' % (elapsed, command_line))"], {}), "('[nmap] successed,elapsed:%s command_line:%s' % (elapsed,\n command_line))\n", (679, 756), False, 'from lib.data import logger\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-08-11 12:46
from __future__ import unicode_literals
import ckeditor_uploader.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('repo', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='QuestionsCollection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now=True, verbose_name='收藏/取消时间')),
('status', models.BooleanField(default=True, verbose_name='收藏状态')),
],
options={
'verbose_name': '收藏记录',
'verbose_name_plural': '收藏记录',
},
),
migrations.AlterField(
model_name='questions',
name='answer',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='题目答案'),
),
migrations.AlterField(
model_name='questions',
name='content',
field=ckeditor_uploader.fields.RichTextUploadingField(null=True, verbose_name='题目详情'),
),
migrations.AddField(
model_name='questionscollection',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to='repo.Questions', verbose_name='问题'),
),
migrations.AddField(
model_name='questionscollection',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to=settings.AUTH_USER_MODEL, verbose_name='收藏者'),
),
]
|
[
"django.db.migrations.swappable_dependency",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.DateTimeField"
] |
[((325, 382), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (356, 382), False, 'from django.db import migrations, models\n'), ((1523, 1671), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""questions_collection_set"""', 'to': '"""repo.Questions"""', 'verbose_name': '"""问题"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='questions_collection_set', to='repo.Questions', verbose_name='问题')\n", (1540, 1671), False, 'from django.db import migrations, models\n'), ((1797, 1959), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""questions_collection_set"""', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""收藏者"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='questions_collection_set', to=settings.AUTH_USER_MODEL, verbose_name=\n '收藏者')\n", (1814, 1959), False, 'from django.db import migrations, models\n'), ((560, 653), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (576, 653), False, 'from django.db import migrations, models\n'), ((684, 743), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'verbose_name': '"""收藏/取消时间"""'}), "(auto_now=True, verbose_name='收藏/取消时间')\n", (704, 743), False, 'from django.db import migrations, models\n'), ((773, 827), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'verbose_name': '"""收藏状态"""'}), "(default=True, verbose_name='收藏状态')\n", (792, 827), False, 'from django.db import migrations, models\n')]
|
import tqdm
import networkx as nx
import argparse
import numpy as np
import multiprocessing
import graph_tool as gt
from graph_tool.centrality import betweenness
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--graph", help='bundled graph')
parser.add_argument("-l","--length",help="contig length")
parser.add_argument("-o","--output",help="output file")
args = parser.parse_args()
G = nx.Graph()
cpus = multiprocessing.cpu_count()
print('Using {} cpus'.format(cpus))
print('Loading bundled graph...')
with open(args.graph,'r') as f:
for line in tqdm.tqdm(f, desc='Reading bundled'):
attrs = line.split()
G.add_edge(attrs[0],attrs[2],mean=float(attrs[4]),stdev=float(attrs[5]),bsize=int(attrs[6]),ori=attrs[1]+attrs[3])
node_set = set(G.nodes())
print('Loading contig lengths...')
contig_length = {}
with open(args.length,'r') as f:
for line in tqdm.tqdm(f, desc='Reading lengths'):
attrs = line.split()
if attrs[0] in node_set:
contig_length[attrs[0]] = int(attrs[1])
del node_set
nx.set_node_attributes(G,'length',contig_length)
repeat_nodes = {}
def get_prop_type(value, key=None):
"""
Performs typing and value conversion for the graph_tool PropertyMap class.
If a key is provided, it also ensures the key is in a format that can be
used with the PropertyMap. Returns a tuple, (type name, value, key)
"""
if isinstance(key, unicode):
# Encode the key as ASCII
key = key.encode('ascii', errors='replace')
# Deal with the value
if isinstance(value, bool):
tname = 'bool'
elif isinstance(value, int):
tname = 'float'
value = float(value)
elif isinstance(value, float):
tname = 'float'
elif isinstance(value, unicode):
tname = 'string'
value = value.encode('ascii', errors='replace')
elif isinstance(value, dict):
tname = 'object'
else:
tname = 'string'
value = str(value)
return tname, value, key
def nx2gt(nxG):
"""
Converts a networkx graph to a graph-tool graph.
"""
# Phase 0: Create a directed or undirected graph-tool Graph
gtG = gt.Graph(directed=nxG.is_directed())
# Add the Graph properties as "internal properties"
for key, value in nxG.graph.items():
# Convert the value and key into a type for graph-tool
tname, value, key = get_prop_type(value, key)
prop = gtG.new_graph_property(tname) # Create the PropertyMap
gtG.graph_properties[key] = prop # Set the PropertyMap
gtG.graph_properties[key] = value # Set the actual value
# Phase 1: Add the vertex and edge property maps
# Go through all nodes and edges and add seen properties
# Add the node properties first
nprops = set() # cache keys to only add properties once
for node, data in nxG.nodes_iter(data=True):
# Go through all the properties if not seen and add them.
for key, val in data.items():
if key in nprops: continue # Skip properties already added
# Convert the value and key into a type for graph-tool
tname, _, key = get_prop_type(val, key)
prop = gtG.new_vertex_property(tname) # Create the PropertyMap
gtG.vertex_properties[key] = prop # Set the PropertyMap
# Add the key to the already seen properties
nprops.add(key)
# Also add the node id: in NetworkX a node can be any hashable type, but
# in graph-tool node are defined as indices. So we capture any strings
# in a special PropertyMap called 'id' -- modify as needed!
gtG.vertex_properties['id'] = gtG.new_vertex_property('string')
# Add the edge properties second
eprops = set() # cache keys to only add properties once
for src, dst, data in nxG.edges_iter(data=True):
# Go through all the edge properties if not seen and add them.
for key, val in data.items():
if key in eprops: continue # Skip properties already added
# Convert the value and key into a type for graph-tool
tname, _, key = get_prop_type(val, key)
prop = gtG.new_edge_property(tname) # Create the PropertyMap
gtG.edge_properties[key] = prop # Set the PropertyMap
# Add the key to the already seen properties
eprops.add(key)
# Phase 2: Actually add all the nodes and vertices with their properties
# Add the nodes
vertices = {} # vertex mapping for tracking edges later
for node, data in nxG.nodes_iter(data=True):
# Create the vertex and annotate for our edges later
v = gtG.add_vertex()
vertices[node] = v
# Set the vertex properties, not forgetting the id property
data['id'] = str(node)
for key, value in data.items():
gtG.vp[key][v] = value # vp is short for vertex_properties
# Add the edges
for src, dst, data in nxG.edges_iter(data=True):
# Look up the vertex structs from our vertices mapping and add edge.
e = gtG.add_edge(vertices[src], vertices[dst])
# Add the edge properties
for key, value in data.items():
gtG.ep[key][e] = value # ep is short for edge_properties
# Done, finally!
return gtG
def get_centrality(subg):
# centralities = nx.betweenness_centrality(subg)
# print(centralities)
_g = nx2gt(subg)
centralities, _ = betweenness(_g)
v = centralities.get_array()
mean = float(np.mean(v))
stdev = float(np.std(v))
for node in _g.vertices():
if centralities[node] >= mean + 3*stdev:
repeat_nodes[_g.vertex_properties['id'][node]] = centralities[node]
def centrality_wrapper(graph):
n_comp = nx.number_connected_components(graph)
print('The graph has {} components'.format(n_comp))
for subg in tqdm.tqdm(nx.connected_component_subgraphs(graph), total=n_comp, desc='Component'):
if len(subg.nodes()) >= 50:
get_centrality(subg)
G_copy = G.copy()
print('Writing output...')
ofile = open(args.output,'w')
for i in xrange(3):
centrality_wrapper(G_copy)
for node in tqdm.tqdm(repeat_nodes, desc='Checking repeats'):
if G_copy.has_node(node):
G_copy.remove_node(node)
ofile.write(str(node)+'\t'+str(repeat_nodes[node])+'\n')
#for u,v,data in G_copy.edges(data=True):
# print u +"\t"+data[u][v]['ori'][0]+v+"\t"+data[u][v]['ori'][1]+"\t"+str(data[u][v]["mean"])+"\t"+str(data[u][v]["stdev"])+"\t"+str(data[u][v]["bsize"])
#nx.write_gml(G_copy,args.output)
|
[
"tqdm.tqdm",
"argparse.ArgumentParser",
"networkx.set_node_attributes",
"numpy.std",
"numpy.mean",
"networkx.Graph",
"graph_tool.centrality.betweenness",
"networkx.connected_component_subgraphs",
"networkx.number_connected_components",
"multiprocessing.cpu_count"
] |
[((172, 197), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (195, 197), False, 'import argparse\n'), ((402, 412), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (410, 412), True, 'import networkx as nx\n'), ((420, 447), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (445, 447), False, 'import multiprocessing\n'), ((1055, 1105), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['G', '"""length"""', 'contig_length'], {}), "(G, 'length', contig_length)\n", (1077, 1105), True, 'import networkx as nx\n'), ((567, 603), 'tqdm.tqdm', 'tqdm.tqdm', (['f'], {'desc': '"""Reading bundled"""'}), "(f, desc='Reading bundled')\n", (576, 603), False, 'import tqdm\n'), ((888, 924), 'tqdm.tqdm', 'tqdm.tqdm', (['f'], {'desc': '"""Reading lengths"""'}), "(f, desc='Reading lengths')\n", (897, 924), False, 'import tqdm\n'), ((5481, 5496), 'graph_tool.centrality.betweenness', 'betweenness', (['_g'], {}), '(_g)\n', (5492, 5496), False, 'from graph_tool.centrality import betweenness\n'), ((5794, 5831), 'networkx.number_connected_components', 'nx.number_connected_components', (['graph'], {}), '(graph)\n', (5824, 5831), True, 'import networkx as nx\n'), ((6202, 6250), 'tqdm.tqdm', 'tqdm.tqdm', (['repeat_nodes'], {'desc': '"""Checking repeats"""'}), "(repeat_nodes, desc='Checking repeats')\n", (6211, 6250), False, 'import tqdm\n'), ((5547, 5557), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (5554, 5557), True, 'import numpy as np\n'), ((5577, 5586), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (5583, 5586), True, 'import numpy as np\n'), ((5914, 5953), 'networkx.connected_component_subgraphs', 'nx.connected_component_subgraphs', (['graph'], {}), '(graph)\n', (5946, 5953), True, 'import networkx as nx\n')]
|
# Copyright (c) 2020 <NAME> <www.sean-graham.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from Target import Target
import os
import sys
import configparser
import logging
import time
class WikiFileTarget(Target):
pluginName = "Wiki File Writer"
enableArchive = True
episodeNumber = "XX"
showArtist = ""
filePath = ""
archiveURL = ""
wikiFile = None
def __init__(self, config, episode, episodeDate):
logger = logging.getLogger("wiki updater")
self.episodeNumber = episode
if(episodeDate):
self.episodeDate = episodeDate
logger.debug(f"overriding date with {self.episodeDate}")
# read config entries
try:
self.filePath = config.get('ListCommon', 'filePath')
self.archiveURL = config.get('ListCommon', 'archiveURL')
self.showArtist = config.get('ListCommon', 'showArtist')
except configparser.NoSectionError:
logger.error("ListCommon: No [ListCommon] section in config")
return
except configparser.NoOptionError:
logger.error("ListCommon: Missing values in config")
return
# if I gave a shit about non-unix platforms I might
# try to use the proper path sep here. exercise left
# for the reader.
if(self.filePath.endswith("/") != True):
self.filePath += "/"
self.filePath = os.path.expanduser(self.filePath)
fileDate = '{dt:%Y}{dt:%m}{dt:%d}'.format(dt=self.episodeDate)
self.archiveURL = f"{self.archiveURL}{fileDate}.mp3"
headerText = ""
headerText += "\n\n=== "
if(self.archiveURL != ""):
headerText += "[" + self.archiveURL + " "
headerText += "Show #" + self.episodeNumber + " - "
headerText += self.getLongDate()
if(self.archiveURL != ""):
headerText += "]"
headerText += " ===\n"
headerText += "{| border=1 cellspacing=0 cellpadding=5\n"
headerText += "|'''Song'''\n"
headerText += "|'''Artist'''\n"
headerText += "|'''Album'''\n"
self.wikiFile = open(self.filePath + fileDate + "-wiki.txt", 'w+')
self.logToFile(self.wikiFile, headerText)
return
def logTrack(self, track, startTime):
if( track.ignore is not True ):
trackText = f"|-\n|{track.title}\n|{track.artist}\n|{track.album}\n"
self.logToFile(self.wikiFile, trackText)
return
def close(self):
print("Closing Wiki File...")
self.logToFile(self.wikiFile, "|}" )
self.wikiFile.close()
return
|
[
"os.path.expanduser",
"logging.getLogger"
] |
[((1476, 1509), 'logging.getLogger', 'logging.getLogger', (['"""wiki updater"""'], {}), "('wiki updater')\n", (1493, 1509), False, 'import logging\n'), ((2461, 2494), 'os.path.expanduser', 'os.path.expanduser', (['self.filePath'], {}), '(self.filePath)\n', (2479, 2494), False, 'import os\n')]
|
import json
import os
import threading
import grpc
from tiktorch.proto.inference_pb2 import Empty
from tiktorch.proto.inference_pb2_grpc import FlightControlStub
from tiktorch.server.grpc import serve
from tiktorch.utils import wait
def test_serving_on_random_port(tmpdir):
conn_file_path = str(tmpdir / "conn.json")
def _server():
serve("127.0.0.1", 0, connection_file_path=conn_file_path)
srv_thread = threading.Thread(target=_server)
srv_thread.start()
wait(lambda: os.path.exists(conn_file_path))
with open(conn_file_path, "r") as conn_file:
conn_data = json.load(conn_file)
assert conn_data["addr"] == "127.0.0.1"
assert conn_data["port"] > 0
addr, port = conn_data["addr"], conn_data["port"]
chan = grpc.insecure_channel(f"{addr}:{port}")
client = FlightControlStub(chan)
result = client.Ping(Empty())
assert isinstance(result, Empty)
client.Shutdown(Empty())
|
[
"threading.Thread",
"json.load",
"tiktorch.proto.inference_pb2.Empty",
"tiktorch.proto.inference_pb2_grpc.FlightControlStub",
"grpc.insecure_channel",
"os.path.exists",
"tiktorch.server.grpc.serve"
] |
[((430, 462), 'threading.Thread', 'threading.Thread', ([], {'target': '_server'}), '(target=_server)\n', (446, 462), False, 'import threading\n'), ((772, 811), 'grpc.insecure_channel', 'grpc.insecure_channel', (['f"""{addr}:{port}"""'], {}), "(f'{addr}:{port}')\n", (793, 811), False, 'import grpc\n'), ((825, 848), 'tiktorch.proto.inference_pb2_grpc.FlightControlStub', 'FlightControlStub', (['chan'], {}), '(chan)\n', (842, 848), False, 'from tiktorch.proto.inference_pb2_grpc import FlightControlStub\n'), ((353, 411), 'tiktorch.server.grpc.serve', 'serve', (['"""127.0.0.1"""', '(0)'], {'connection_file_path': 'conn_file_path'}), "('127.0.0.1', 0, connection_file_path=conn_file_path)\n", (358, 411), False, 'from tiktorch.server.grpc import serve\n'), ((606, 626), 'json.load', 'json.load', (['conn_file'], {}), '(conn_file)\n', (615, 626), False, 'import json\n'), ((875, 882), 'tiktorch.proto.inference_pb2.Empty', 'Empty', ([], {}), '()\n', (880, 882), False, 'from tiktorch.proto.inference_pb2 import Empty\n'), ((941, 948), 'tiktorch.proto.inference_pb2.Empty', 'Empty', ([], {}), '()\n', (946, 948), False, 'from tiktorch.proto.inference_pb2 import Empty\n'), ((504, 534), 'os.path.exists', 'os.path.exists', (['conn_file_path'], {}), '(conn_file_path)\n', (518, 534), False, 'import os\n')]
|
import os
import pymongo
DB_NAME = os.getenv("DB_NAME")
client = pymongo.MongoClient("mongodb://db:27017")
db = client[DB_NAME]
|
[
"pymongo.MongoClient",
"os.getenv"
] |
[((37, 57), 'os.getenv', 'os.getenv', (['"""DB_NAME"""'], {}), "('DB_NAME')\n", (46, 57), False, 'import os\n'), ((69, 110), 'pymongo.MongoClient', 'pymongo.MongoClient', (['"""mongodb://db:27017"""'], {}), "('mongodb://db:27017')\n", (88, 110), False, 'import pymongo\n')]
|
import logging
import math
"""
1. Note
- Loop from 2 till Square Root of N and keep dividing N at every step.
2. Optimisation(s)
- Apart from 2, only ODD numbers are tested for divisiblity.
- Only numbers upto SquareRoot(n) are tested for divisibility.
3. Limitation(s)
- Do not try with numbers which has more than 15-digit prime factors.
"""
def prime_factors_using_trial_division(n):
"""Returns a list of all prime prime_factors of n"""
prime_factors = []
# Test for 2 separately so that only ODD numbers can be tested in the loop
while n % 2 == 0:
factor = 2
prime_factors.append(factor)
n = n // 2
# Test only for ODD numbers starting with 3
for i in xrange(3, int(math.sqrt(n)) + 1, 2):
# logging.debug("i = {0}".format(i))
while n % i == 0:
factor = i
prime_factors.append(factor)
n = n // i
logging.debug("Factor = {0}, N = {1}".format(i, n))
# All factors have been found if N is reduced to 0.
if n == 1:
break
# If no factor has been found then N is PRIME and the only prime factor of itself.
if n > 1:
prime_factors.append(n)
return prime_factors
|
[
"math.sqrt"
] |
[((739, 751), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (748, 751), False, 'import math\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 14 14:11:07 2019
@author: mimbres
"""
import pandas as pd
import numpy as np
from tqdm import trange
LASTFM_FILEPATH = './data/final_mapping.json'
OUTPUT_FILEPATH1 = './data/lastfm_top50_tagmtx.npy'
OUTPUT_FILEPATH2 = './data/lastfm_top50_featmtx.npy'
OUTPUT_FILEPATH3 = './data/lastfm_top50_track_ids.npy'
OUTPUT_FILEPATH4 = './data/lastfm_top50_tag_avail_cnt.npy'
SAVED_SCALER_FILEPATH = './data/std_scaler.sav'
TOP50A = ['rock', 'pop', 'alternative', 'indie', 'favorites', 'female vocalists',
'Love', 'alternative rock', 'electronic', 'beautiful', 'jazz', '00s',
'singer-songwriter', 'metal', 'male vocalists', 'Awesome', 'american',
'Mellow', 'classic rock', '90s', 'soul', 'chillout', 'punk', '80s', 'chill',
'indie rock', 'folk', 'dance', 'instrumental', 'hard rock', 'oldies',
'seen live', 'Favorite', 'country', 'blues', 'guitar', 'cool', 'british',
'acoustic', 'electronica', '70s', 'Favourites', 'Hip-Hop', 'experimental',
'easy listening', 'female vocalist', 'ambient', 'punk rock', 'funk', 'hardcore']
_dict = {'major': 1, 'minor': 0}
# Load .json file...
df=pd.read_json(LASTFM_FILEPATH)
num_items = len(df)
# Shuffle (we can split train/test later)
df = df.sample(frac=1).reset_index(drop=True)
# Create an empty result matrix
tag_mtx = np.zeros((num_items,50))
feat_mtx = np.zeros((num_items,29))
track_ids = np.ndarray((num_items,), dtype=object)
tag_avail_cnt = np.zeros((num_items,))
for i in trange(num_items):
item = np.asarray(df[0][i]) # Get one item
tag_cnt = 0
for tag in TOP50A:
# Check availability of each tag in this item
_idx = np.where(tag == item)[0]
if len(_idx) is not 0: # If top50-tag available...
tag_cnt += 1
column_idx = _idx[0]
#print(i, item[column_idx,:])
tag_mtx[i,TOP50A.index(tag)] = item[column_idx,1].astype(np.float)
tag_avail_cnt[i] = tag_cnt
track_ids[i] = df[1][i][0]
if tag_cnt is not 0:
_feat = np.asarray(df[1][i])
_feat[20] = _dict.get(_feat[20]) # {'major', 'minor'} --> {0,1}
_feat[5] = _feat[5][:4] # '2005-01-01' --> '2005'
feat_mtx[i,:] = _feat[[4,5,6,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33]]
print('max available tags =', np.max(tag_avail_cnt), '\n',
'avg available tags =', np.mean(tag_avail_cnt[np.where(tag_avail_cnt!=0)]), '\n',
'items with top50 unavailable =', len(np.where(tag_avail_cnt==0)[0]), '\n',
'items with top50 available =', len(np.where(tag_avail_cnt!=0)[0]) )
'''
max available tags = 31.0
avg available tags = 4.705301775916366
items with top50 unavailable = 38595
items with top50 available = 123204
'''
# Reduce top50 unavailable items
tag_mtx = tag_mtx[tag_avail_cnt!=0,:]
feat_mtx = feat_mtx[tag_avail_cnt!=0,:]
track_ids = track_ids[tag_avail_cnt!=0]
# Feature normalization
import pickle
#from sklearn.preprocessing import StandardScaler
scaler = pickle.load(open(SAVED_SCALER_FILEPATH, 'rb'))
feat_mtx_new = scaler.fit_transform(feat_mtx)
feat_mtx_new[:,15] = feat_mtx[:,15]
# Save results as .npy
np.save(OUTPUT_FILEPATH1, tag_mtx.astype(np.int8))
#np.save(OUTPUT_FILEPATH2, feat_mtx.astype(np.int8))
np.save(OUTPUT_FILEPATH2, feat_mtx_new.astype(np.float32))
np.save(OUTPUT_FILEPATH3, track_ids)
np.save(OUTPUT_FILEPATH4, tag_avail_cnt.astype(np.int8))
|
[
"numpy.save",
"tqdm.trange",
"numpy.asarray",
"numpy.zeros",
"pandas.read_json",
"numpy.max",
"numpy.where",
"numpy.ndarray"
] |
[((1219, 1248), 'pandas.read_json', 'pd.read_json', (['LASTFM_FILEPATH'], {}), '(LASTFM_FILEPATH)\n', (1231, 1248), True, 'import pandas as pd\n'), ((1402, 1427), 'numpy.zeros', 'np.zeros', (['(num_items, 50)'], {}), '((num_items, 50))\n', (1410, 1427), True, 'import numpy as np\n'), ((1438, 1463), 'numpy.zeros', 'np.zeros', (['(num_items, 29)'], {}), '((num_items, 29))\n', (1446, 1463), True, 'import numpy as np\n'), ((1475, 1513), 'numpy.ndarray', 'np.ndarray', (['(num_items,)'], {'dtype': 'object'}), '((num_items,), dtype=object)\n', (1485, 1513), True, 'import numpy as np\n'), ((1530, 1552), 'numpy.zeros', 'np.zeros', (['(num_items,)'], {}), '((num_items,))\n', (1538, 1552), True, 'import numpy as np\n'), ((1564, 1581), 'tqdm.trange', 'trange', (['num_items'], {}), '(num_items)\n', (1570, 1581), False, 'from tqdm import trange\n'), ((3414, 3450), 'numpy.save', 'np.save', (['OUTPUT_FILEPATH3', 'track_ids'], {}), '(OUTPUT_FILEPATH3, track_ids)\n', (3421, 3450), True, 'import numpy as np\n'), ((1594, 1614), 'numpy.asarray', 'np.asarray', (['df[0][i]'], {}), '(df[0][i])\n', (1604, 1614), True, 'import numpy as np\n'), ((2422, 2443), 'numpy.max', 'np.max', (['tag_avail_cnt'], {}), '(tag_avail_cnt)\n', (2428, 2443), True, 'import numpy as np\n'), ((2117, 2137), 'numpy.asarray', 'np.asarray', (['df[1][i]'], {}), '(df[1][i])\n', (2127, 2137), True, 'import numpy as np\n'), ((1744, 1765), 'numpy.where', 'np.where', (['(tag == item)'], {}), '(tag == item)\n', (1752, 1765), True, 'import numpy as np\n'), ((2503, 2531), 'numpy.where', 'np.where', (['(tag_avail_cnt != 0)'], {}), '(tag_avail_cnt != 0)\n', (2511, 2531), True, 'import numpy as np\n'), ((2583, 2611), 'numpy.where', 'np.where', (['(tag_avail_cnt == 0)'], {}), '(tag_avail_cnt == 0)\n', (2591, 2611), True, 'import numpy as np\n'), ((2663, 2691), 'numpy.where', 'np.where', (['(tag_avail_cnt != 0)'], {}), '(tag_avail_cnt != 0)\n', (2671, 2691), True, 'import numpy as np\n')]
|
import pygame
class StartMenu:
def __init__(self, screen, font, text_colour, button_colour):
self.__screen = screen
self.__font = font
self.__text_colour = text_colour
self.__button_colour = button_colour
self.__click = False
self.__button_width = 150
self.__button_height = 75
self.__option = None
self.__buttons_xy = None
self.__button_objects = None
self.__button_command = ["start game", "iterative mode", "quit game"]
self.__title = "Conway's Game of Life - by <NAME>"
@property
def Option(self):
return self.__option
def setup(self):
pygame.display.set_caption(f"{self.__title}")
self.__screen.fill((0,0,0))
def draw_text(self, text, x, y):
textobj = self.__font.render(text, 1, self.__text_colour)
textrect = textobj.get_rect()
textrect.center = (x, y)
self.__screen.blit(textobj, textrect)
def get_button_objects(self):
self.__buttons_xy = [
((self.__screen.get_width() // 2) - (self.__button_width // 2), (self.__screen.get_width() // 2) - i)
for i in reversed(range(-100, 200, 100))
]
self.__button_objects = {
f"button {i}": pygame.Rect(self.__buttons_xy[i][0], self.__buttons_xy[i][1], self.__button_width, self.__button_height)
for i, button in enumerate(self.__buttons_xy)
}
def check_collisions(self):
mousex, mousey = pygame.mouse.get_pos()
if self.__button_objects[f"button 0"].collidepoint((mousex, mousey)):
if self.__click:
self.__option = self.__button_command[0]
elif self.__button_objects[f"button 1"].collidepoint((mousex, mousey)):
if self.__click:
self.__option = self.__button_command[1]
elif self.__button_objects[f"button 2"].collidepoint((mousex, mousey)):
if self.__click:
self.__option = self.__button_command[2]
def display_buttons(self):
for i, button_object in enumerate(self.__button_objects):
pygame.draw.rect(self.__screen, self.__button_colour, self.__button_objects[button_object])
self.draw_text(f"{self.__title}", self.__screen.get_width() // 2, self.__screen.get_height() // 4)
self.draw_text(f"{self.__button_command[0]}", self.__buttons_xy[0][0] + 75, self.__buttons_xy[0][1] + 35)
self.draw_text(f"{self.__button_command[1]}", self.__buttons_xy[1][0] + 75, self.__buttons_xy[1][1] + 35)
self.draw_text(f"{self.__button_command[2]}", self.__buttons_xy[2][0] + 75, self.__buttons_xy[2][1] + 35)
pygame.display.update()
def is_clicked(self):
self.__click = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == True:
self.__click = True
|
[
"pygame.quit",
"pygame.event.get",
"pygame.draw.rect",
"pygame.Rect",
"pygame.display.update",
"pygame.mouse.get_pos",
"pygame.display.set_caption"
] |
[((671, 716), 'pygame.display.set_caption', 'pygame.display.set_caption', (['f"""{self.__title}"""'], {}), "(f'{self.__title}')\n", (697, 716), False, 'import pygame\n'), ((1517, 1539), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (1537, 1539), False, 'import pygame\n'), ((2765, 2788), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2786, 2788), False, 'import pygame\n'), ((2875, 2893), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2891, 2893), False, 'import pygame\n'), ((1286, 1395), 'pygame.Rect', 'pygame.Rect', (['self.__buttons_xy[i][0]', 'self.__buttons_xy[i][1]', 'self.__button_width', 'self.__button_height'], {}), '(self.__buttons_xy[i][0], self.__buttons_xy[i][1], self.\n __button_width, self.__button_height)\n', (1297, 1395), False, 'import pygame\n'), ((2206, 2302), 'pygame.draw.rect', 'pygame.draw.rect', (['self.__screen', 'self.__button_colour', 'self.__button_objects[button_object]'], {}), '(self.__screen, self.__button_colour, self.__button_objects\n [button_object])\n', (2222, 2302), False, 'import pygame\n'), ((2953, 2966), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2964, 2966), False, 'import pygame\n')]
|
#Programmer: <NAME>
#This file contains a test step function for debugging the swept rule
import numpy, h5py, mpi4py.MPI as MPI
try:
import pycuda.driver as cuda
from pycuda.compiler import SourceModule
except Exception as e:
pass
def step(state,iidx,arrayTimeIndex,globalTimeStep):
"""This is the method that will be called by the swept solver.
state - 4D numpy array(t,v,x,y (v is variables length))
iidx - an iterable of indexs
arrayTimeIndex - the current time step
globalTimeStep - a step counter that allows implementation of the scheme
"""
if scheme:
checkerOneStep(state,iidx,arrayTimeIndex,globalTimeStep)
else:
checkerTwoStep(state,iidx,arrayTimeIndex,globalTimeStep)
def checkerOneStep(state,iidx,arrayTimeIndex,globalTimeStep):
"""Use this function as the one step checker pattern"""
vs = slice(0,state.shape[1],1)
for idx,idy in iidx:
ntidx = (arrayTimeIndex+1,vs,idx,idy) #next step index
state[ntidx] = state[arrayTimeIndex,vs,idx+1,idy]
state[ntidx] += state[arrayTimeIndex,vs,idx-1,idy]
state[ntidx] += state[arrayTimeIndex,vs,idx,idy+1]
state[ntidx] += state[arrayTimeIndex,vs,idx,idy-1]
state[ntidx] /= 4
def checkerTwoStep(state,iidx,arrayTimeIndex,globalTimeStep):
"""Use this function as the two step checker pattern"""
vs = slice(0,state.shape[1],1)
for idx,idy in iidx:
ntidx = (arrayTimeIndex+1,vs,idx,idy) #next step index
state[ntidx] = state[arrayTimeIndex,vs,idx+1,idy]
state[ntidx] += state[arrayTimeIndex,vs,idx-1,idy]
state[ntidx] += state[arrayTimeIndex,vs,idx,idy+1]
state[ntidx] += state[arrayTimeIndex,vs,idx,idy-1]
state[ntidx] /= 4
def createInitialConditions(nv,nx,ny,filename="checkerConditions.hdf5"):
"""Use this function to create a set of initial conditions in an hdf5 file."""
comm = MPI.COMM_WORLD
data = numpy.zeros((nv,nx,ny))
for i in range(0,nx,2):
for j in range(0,ny,2):
data[:,i,j]=1
for i in range(1,nx,2):
for j in range(1,ny,2):
data[:,i,j]=1
with h5py.File(filename,"w",driver="mpio",comm=comm) as hf:
hf.create_dataset("data",data.shape,data=data)
return filename
def set_globals(*args,source_mod=None):
"""Use this function to set cpu global variables"""
global dt,dx,dy,scheme #true for one step
t0,tf,dt,dx,dy,scheme = args
if source_mod is not None:
keys = "<KEY>"
nargs = args[2:]
fc = lambda x:numpy.float64(x)
for i,key in enumerate(keys):
ckey,_ = source_mod.get_global(key)
cuda.memcpy_htod(ckey,fc(nargs[i]))
ckey,_ = source_mod.get_global("SCHEME")
cuda.memcpy_htod(ckey,bytes(scheme))
|
[
"numpy.float64",
"h5py.File",
"numpy.zeros"
] |
[((1951, 1976), 'numpy.zeros', 'numpy.zeros', (['(nv, nx, ny)'], {}), '((nv, nx, ny))\n', (1962, 1976), False, 'import numpy, h5py, mpi4py.MPI as MPI\n'), ((2156, 2206), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {'driver': '"""mpio"""', 'comm': 'comm'}), "(filename, 'w', driver='mpio', comm=comm)\n", (2165, 2206), False, 'import numpy, h5py, mpi4py.MPI as MPI\n'), ((2563, 2579), 'numpy.float64', 'numpy.float64', (['x'], {}), '(x)\n', (2576, 2579), False, 'import numpy, h5py, mpi4py.MPI as MPI\n')]
|
import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
def test_aggregator_node_serialization():
tn.check_serialization(tn.AggregatorNode("a"))
def test_elementwise_cost_node_serialization():
tn.check_serialization(tn.ElementwiseCostNode(
"foo",
{"pred": tn.IdentityNode("foo"),
"target": tn.IdentityNode("bar")}))
def test_total_cost_node_serialization():
tn.check_serialization(tn.TotalCostNode(
"foo",
{"pred": tn.IdentityNode("foo"),
"target": tn.IdentityNode("bar")}))
def test_auxilliary_cost_node_serialization():
tn.check_serialization(tn.AuxiliaryCostNode(
"foo",
{"target": tn.IdentityNode("bar")}))
def test_total_cost_node():
network = tn.TotalCostNode(
"cost",
{"pred": tn.InputNode("x", shape=(3, 4, 5)),
"target": tn.InputNode("y", shape=(3, 4, 5))},
cost_function=treeano.utils.squared_error).network()
fn = network.function(["x", "y"], ["cost"])
x = np.random.rand(3, 4, 5).astype(fX)
y = np.random.rand(3, 4, 5).astype(fX)
np.testing.assert_allclose(fn(x, y)[0],
((x - y) ** 2).mean(),
rtol=1e-5)
np.testing.assert_allclose(fn(x, x)[0],
0)
np.testing.assert_allclose(fn(y, y)[0],
0)
def test_total_cost_node_with_weight():
network = tn.TotalCostNode(
"cost",
{"pred": tn.InputNode("x", shape=(3, 4, 5)),
"weight": tn.InputNode("w", shape=(3, 4, 5)),
"target": tn.InputNode("y", shape=(3, 4, 5))},
cost_function=treeano.utils.squared_error).network()
fn = network.function(["x", "y", "w"], ["cost"])
x = np.random.rand(3, 4, 5).astype(fX)
w = np.random.rand(3, 4, 5).astype(fX)
y = np.random.rand(3, 4, 5).astype(fX)
np.testing.assert_allclose(fn(x, y, w)[0],
(((x - y) ** 2) * w).mean(),
rtol=1e-5)
np.testing.assert_allclose(fn(x, x, w)[0],
0)
np.testing.assert_allclose(fn(y, y, w)[0],
0)
def test_auxiliary_cost_node():
network = tn.HyperparameterNode(
"hp",
tn.SequentialNode(
"seq",
[tn.InputNode("x", shape=(3, 4, 5)),
tn.AuxiliaryCostNode(
"cost1",
{"target": tn.InputNode("y1", shape=(3, 4, 5))}),
tn.AddConstantNode("a1", value=2),
tn.AuxiliaryCostNode(
"cost2",
{"target": tn.InputNode("y2", shape=(3, 4, 5))}),
tn.MultiplyConstantNode("m1", value=2),
tn.AuxiliaryCostNode(
"cost3",
{"target": tn.InputNode("y3", shape=(3, 4, 5))}),
tn.ConstantNode("const", value=0),
tn.InputElementwiseSumNode("cost")]
),
cost_reference="cost",
cost_function=treeano.utils.squared_error,
).network()
fn = network.function(["x", "y1", "y2", "y3"], ["cost"])
x = np.random.rand(3, 4, 5).astype(fX)
ys = [np.random.rand(3, 4, 5).astype(fX) for _ in range(3)]
def mse(x, y):
return ((x - y) ** 2).mean()
expected_output = (mse(x, ys[0])
+ mse(x + 2, ys[1])
+ mse(2 * (x + 2), ys[2]))
np.testing.assert_allclose(fn(x, *ys)[0],
expected_output,
rtol=1e-5)
|
[
"treeano.nodes.ConstantNode",
"treeano.nodes.MultiplyConstantNode",
"treeano.nodes.InputElementwiseSumNode",
"treeano.nodes.AddConstantNode",
"treeano.nodes.AggregatorNode",
"treeano.nodes.InputNode",
"numpy.random.rand",
"treeano.nodes.IdentityNode"
] |
[((224, 246), 'treeano.nodes.AggregatorNode', 'tn.AggregatorNode', (['"""a"""'], {}), "('a')\n", (241, 246), True, 'import treeano.nodes as tn\n'), ((1102, 1125), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (1116, 1125), True, 'import numpy as np\n'), ((1145, 1168), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (1159, 1168), True, 'import numpy as np\n'), ((1852, 1875), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (1866, 1875), True, 'import numpy as np\n'), ((1895, 1918), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (1909, 1918), True, 'import numpy as np\n'), ((1938, 1961), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (1952, 1961), True, 'import numpy as np\n'), ((3224, 3247), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (3238, 3247), True, 'import numpy as np\n'), ((381, 403), 'treeano.nodes.IdentityNode', 'tn.IdentityNode', (['"""foo"""'], {}), "('foo')\n", (396, 403), True, 'import treeano.nodes as tn\n'), ((424, 446), 'treeano.nodes.IdentityNode', 'tn.IdentityNode', (['"""bar"""'], {}), "('bar')\n", (439, 446), True, 'import treeano.nodes as tn\n'), ((571, 593), 'treeano.nodes.IdentityNode', 'tn.IdentityNode', (['"""foo"""'], {}), "('foo')\n", (586, 593), True, 'import treeano.nodes as tn\n'), ((614, 636), 'treeano.nodes.IdentityNode', 'tn.IdentityNode', (['"""bar"""'], {}), "('bar')\n", (629, 636), True, 'import treeano.nodes as tn\n'), ((772, 794), 'treeano.nodes.IdentityNode', 'tn.IdentityNode', (['"""bar"""'], {}), "('bar')\n", (787, 794), True, 'import treeano.nodes as tn\n'), ((3269, 3292), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (3283, 3292), True, 'import numpy as np\n'), ((893, 927), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""x"""'], {'shape': '(3, 4, 5)'}), "('x', shape=(3, 4, 5))\n", (905, 927), True, 'import treeano.nodes as tn\n'), ((948, 982), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""y"""'], {'shape': '(3, 4, 5)'}), "('y', shape=(3, 4, 5))\n", (960, 982), True, 'import treeano.nodes as tn\n'), ((1583, 1617), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""x"""'], {'shape': '(3, 4, 5)'}), "('x', shape=(3, 4, 5))\n", (1595, 1617), True, 'import treeano.nodes as tn\n'), ((1638, 1672), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""w"""'], {'shape': '(3, 4, 5)'}), "('w', shape=(3, 4, 5))\n", (1650, 1672), True, 'import treeano.nodes as tn\n'), ((1693, 1727), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""y"""'], {'shape': '(3, 4, 5)'}), "('y', shape=(3, 4, 5))\n", (1705, 1727), True, 'import treeano.nodes as tn\n'), ((2428, 2462), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""x"""'], {'shape': '(3, 4, 5)'}), "('x', shape=(3, 4, 5))\n", (2440, 2462), True, 'import treeano.nodes as tn\n'), ((2605, 2638), 'treeano.nodes.AddConstantNode', 'tn.AddConstantNode', (['"""a1"""'], {'value': '(2)'}), "('a1', value=2)\n", (2623, 2638), True, 'import treeano.nodes as tn\n'), ((2781, 2819), 'treeano.nodes.MultiplyConstantNode', 'tn.MultiplyConstantNode', (['"""m1"""'], {'value': '(2)'}), "('m1', value=2)\n", (2804, 2819), True, 'import treeano.nodes as tn\n'), ((2962, 2995), 'treeano.nodes.ConstantNode', 'tn.ConstantNode', (['"""const"""'], {'value': '(0)'}), "('const', value=0)\n", (2977, 2995), True, 'import treeano.nodes as tn\n'), ((3010, 3044), 'treeano.nodes.InputElementwiseSumNode', 'tn.InputElementwiseSumNode', (['"""cost"""'], {}), "('cost')\n", (3036, 3044), True, 'import treeano.nodes as tn\n'), ((2553, 2588), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""y1"""'], {'shape': '(3, 4, 5)'}), "('y1', shape=(3, 4, 5))\n", (2565, 2588), True, 'import treeano.nodes as tn\n'), ((2729, 2764), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""y2"""'], {'shape': '(3, 4, 5)'}), "('y2', shape=(3, 4, 5))\n", (2741, 2764), True, 'import treeano.nodes as tn\n'), ((2910, 2945), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""y3"""'], {'shape': '(3, 4, 5)'}), "('y3', shape=(3, 4, 5))\n", (2922, 2945), True, 'import treeano.nodes as tn\n')]
|
# NOTE WARNING NEVER CHANGE THIS FIRST LINE!!!! NEVER EVER
import cudf
from collections import OrderedDict
from enum import Enum
from urllib.parse import urlparse
from threading import Lock
from weakref import ref
from pyblazing.apiv2.filesystem import FileSystem
from pyblazing.apiv2 import DataType
from .hive import *
import time
import datetime
import socket
import errno
import subprocess
import os
import re
import pandas
import numpy as np
import pyarrow
from urllib.parse import urlparse
from urllib.parse import ParseResult
from pathlib import PurePath
import cio
import pyblazing
import cudf
import dask_cudf
import dask
import jpype
import dask.distributed
import netifaces as ni
import random
jpype.addClassPath(
os.path.join(
os.getenv("CONDA_PREFIX"),
'lib/blazingsql-algebra.jar'))
jpype.addClassPath(
os.path.join(
os.getenv("CONDA_PREFIX"),
'lib/blazingsql-algebra-core.jar'))
jpype.startJVM(jpype.getDefaultJVMPath(), '-ea', convertStrings=False)
ArrayClass = jpype.JClass('java.util.ArrayList')
ColumnTypeClass = jpype.JClass(
'com.blazingdb.calcite.catalog.domain.CatalogColumnDataType')
dataType = ColumnTypeClass.fromString("GDF_INT8")
ColumnClass = jpype.JClass(
'com.blazingdb.calcite.catalog.domain.CatalogColumnImpl')
TableClass = jpype.JClass(
'com.blazingdb.calcite.catalog.domain.CatalogTableImpl')
DatabaseClass = jpype.JClass(
'com.blazingdb.calcite.catalog.domain.CatalogDatabaseImpl')
BlazingSchemaClass = jpype.JClass('com.blazingdb.calcite.schema.BlazingSchema')
RelationalAlgebraGeneratorClass = jpype.JClass(
'com.blazingdb.calcite.application.RelationalAlgebraGenerator')
def get_np_dtype_to_gdf_dtype_str(dtype):
dtypes = {
np.dtype('float64'): 'GDF_FLOAT64',
np.dtype('float32'): 'GDF_FLOAT32',
np.dtype('int64'): 'GDF_INT64',
np.dtype('int32'): 'GDF_INT32',
np.dtype('int16'): 'GDF_INT16',
np.dtype('int8'): 'GDF_INT8',
np.dtype('bool_'): 'GDF_BOOL8',
np.dtype('datetime64[s]'): 'GDF_DATE64',
np.dtype('datetime64[ms]'): 'GDF_DATE64',
np.dtype('datetime64[ns]'): 'GDF_TIMESTAMP',
np.dtype('datetime64[us]'): 'GDF_TIMESTAMP',
np.dtype('datetime64'): 'GDF_DATE64',
np.dtype('object_'): 'GDF_STRING',
np.dtype('str_'): 'GDF_STRING',
np.dtype('<M8[s]'): 'GDF_DATE64',
np.dtype('<M8[ms]'): 'GDF_DATE64',
np.dtype('<M8[ns]'): 'GDF_TIMESTAMP',
np.dtype('<M8[us]'): 'GDF_TIMESTAMP'
}
ret = dtypes[np.dtype(dtype)]
return ret
def checkSocket(socketNum):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_free = False
try:
s.bind(("127.0.0.1", socketNum))
socket_free = True
except socket.error as e:
if e.errno == errno.EADDRINUSE:
socket_free = False
else:
# something else raised the socket.error exception
print("ERROR: Something happened when checking socket " + str(socketNum))
#print(e)
s.close()
return socket_free
def initializeBlazing(ralId=0, networkInterface='lo', singleNode=False,
allocator="managed", pool=True,initial_pool_size=None, enable_logging=False):
#print(networkInterface)
workerIp = ni.ifaddresses(networkInterface)[ni.AF_INET][0]['addr']
ralCommunicationPort = random.randint(10000, 32000) + ralId
while checkSocket(ralCommunicationPort) == False:
ralCommunicationPort = random.randint(10000, 32000) + ralId
cudf.set_allocator(allocator=allocator,
pool=pool,
initial_pool_size=initial_pool_size,# Default is 1/2 total GPU memory
enable_logging=enable_logging)
cio.initializeCaller(
ralId,
0,
networkInterface.encode(),
workerIp.encode(),
ralCommunicationPort,
singleNode)
cwd = os.getcwd()
return ralCommunicationPort, workerIp, cwd
def getNodePartitions(df, client):
df = df.persist()
workers = client.scheduler_info()['workers']
connectionToId = {}
for worker in workers:
connectionToId[worker] = workers[worker]['name']
dask.distributed.wait(df)
#print(client.who_has(df))
worker_part = client.who_has(df)
worker_partitions = {}
for key in worker_part:
worker = worker_part[key][0]
partition = int(key[key.find(",") + 2:(len(key) - 1)])
if connectionToId[worker] not in worker_partitions:
worker_partitions[connectionToId[worker]] = []
worker_partitions[connectionToId[worker]].append(partition)
#print("worker partitions")
#print(worker_partitions)
return worker_partitions
def collectPartitionsRunQuery(
masterIndex,
nodes,
tables,
fileTypes,
ctxToken,
algebra,
accessToken):
import dask.distributed
worker_id = dask.distributed.get_worker().name
for table_name in tables:
if(isinstance(tables[table_name].input, dask_cudf.core.DataFrame)):
partitions = tables[table_name].get_partitions(worker_id)
if (len(partitions) == 0):
tables[table_name].input = tables[table_name].input.get_partition(
0).head(0)
elif (len(partitions) == 1):
tables[table_name].input = tables[table_name].input.get_partition(
partitions[0]).compute(scheduler='threads')
else:
table_partitions = []
for partition in partitions:
table_partitions.append(
tables[table_name].input.get_partition(partition).compute())
tables[table_name].input = cudf.concat(table_partitions)
return cio.runQueryCaller(
masterIndex,
nodes,
tables,
fileTypes,
ctxToken,
algebra,
accessToken)
# returns a map of table names to the indices of the columns needed. If there are more than one table scan for one table, it merged the needed columns
# if the column list is empty, it means we want all columns
def mergeTableScans(tableScanInfo):
table_names = tableScanInfo.keys()
table_columns = {}
for table_name in table_names:
table_columns[table_name] = []
for table_name in table_names:
for index in range(0, len(tableScanInfo[table_name]['table_columns'])):
if len(tableScanInfo[table_name]['table_columns'][index]) > 0:
table_columns[table_name] = list(set(table_columns[table_name] + tableScanInfo[table_name]['table_columns'][index]))
table_columns[table_name].sort()
else: # if the column list is empty, it means we want all columns
table_columns[table_name] = []
break
return table_columns
def modifyAlegebraAndTablesForArrowBasedOnColumnUsage(algebra, tableScanInfo, originalTables, table_columns_in_use):
newTables={}
for table_name in tableScanInfo:
if originalTables[table_name].fileType == DataType.ARROW:
newTables[table_name] = originalTables[table_name].filterAndRemapColumns(table_columns_in_use[table_name])
for index in range(0,len(tableScanInfo[table_name]['table_scans'])):
orig_scan = tableScanInfo[table_name]['table_scans'][index]
orig_col_indexes = tableScanInfo[table_name]['table_columns'][index]
table_columns_we_want = table_columns_in_use[table_name]
new_col_indexes = []
if len(table_columns_we_want) > 0:
if orig_col_indexes == table_columns_we_want:
new_col_indexes = list(range(0, len(orig_col_indexes)))
else:
for new_index, merged_col_index in enumerate(table_columns_we_want):
if merged_col_index in orig_col_indexes:
new_col_indexes.append(new_index)
orig_project = 'projects=[' + str(orig_col_indexes) + ']'
new_project = 'projects=[' + str(new_col_indexes) + ']'
new_scan = orig_scan.replace(orig_project, new_project)
algebra = algebra.replace(orig_scan, new_scan)
else:
newTables[table_name] = originalTables[table_name]
return newTables, algebra
class BlazingTable(object):
def __init__(
self,
input,
fileType,
files=None,
datasource=[],
calcite_to_file_indices=None,
num_row_groups=None,
args={},
convert_gdf_to_dask=False,
convert_gdf_to_dask_partitions=1,
client=None,
uri_values=[],
in_file=[],
force_conversion=False,
metadata=None):
self.fileType = fileType
if fileType == DataType.ARROW:
if force_conversion:
#converts to cudf for querying
self.input = cudf.DataFrame.from_arrow(input)
self.fileType = DataType.CUDF
else:
self.input = cudf.DataFrame.from_arrow(input.schema.empty_table())
self.arrow_table = input
else:
self.input = input
self.calcite_to_file_indices = calcite_to_file_indices
self.files = files
self.datasource = datasource
# TODO, cc @percy, @cristian!
# num_row_groups: this property is computed in create_table.parse_schema, but not used in run_query.
self.num_row_groups = num_row_groups
self.args = args
if fileType == DataType.CUDF or DataType.DASK_CUDF:
if(convert_gdf_to_dask and isinstance(self.input, cudf.DataFrame)):
self.input = dask_cudf.from_cudf(
self.input, npartitions=convert_gdf_to_dask_partitions)
if(isinstance(self.input, dask_cudf.core.DataFrame)):
self.dask_mapping = getNodePartitions(self.input, client)
self.uri_values = uri_values
self.in_file = in_file
# slices, this is computed in create table, and then reused in sql method
self.slices = None
# metadata, this is computed in create table, after call get_metadata
self.metadata = metadata
# row_groups_ids, vector<vector<int>> one vector of row_groups per file
self.row_groups_id = []
# a pair of values with the startIndex and batchSize info for each slice
self.offset = (0,0)
def has_metadata(self) :
if isinstance(self.metadata, dask_cudf.core.DataFrame):
return not self.metadata.compute().empty
if self.metadata is not None :
return not self.metadata.empty
return False
def filterAndRemapColumns(self,tableColumns):
#only used for arrow
if len(tableColumns) == 0: # len = 0 means all columns
return BlazingTable(self.arrow_table,DataType.ARROW,force_conversion=True)
new_table = self.arrow_table
columns = []
names = []
i = 0
for column in new_table.itercolumns():
for index in tableColumns:
if i == index:
names.append(self.arrow_table.field(i).name)
columns.append(column)
i = i + 1
new_table = pyarrow.Table.from_arrays(columns,names=names)
new_table = BlazingTable(new_table,DataType.ARROW,force_conversion=True)
return new_table
def convertForQuery(self):
return BlazingTable(self.arrow_table,DataType.ARROW,force_conversion=True)
# until this is implemented we cant do self join with arrow tables
# def unionColumns(self,otherTable):
def getSlices(self, numSlices):
nodeFilesList = []
if self.files is None:
for i in range(0, numSlices):
nodeFilesList.append(BlazingTable(self.input, self.fileType))
return nodeFilesList
remaining = len(self.files)
startIndex = 0
for i in range(0, numSlices):
batchSize = int(remaining / (numSlices - i))
# #print(batchSize)
# #print(startIndex)
tempFiles = self.files[startIndex: startIndex + batchSize]
uri_values = self.uri_values[startIndex: startIndex + batchSize]
if isinstance(self.metadata, cudf.DataFrame) or self.metadata is None:
slice_metadata = self.metadata
else:
slice_metadata = self.metadata.get_partition(i).compute()
if self.num_row_groups is not None:
bt = BlazingTable(self.input,
self.fileType,
files=tempFiles,
calcite_to_file_indices=self.calcite_to_file_indices,
num_row_groups=self.num_row_groups[startIndex: startIndex + batchSize],
uri_values=uri_values,
args=self.args,
metadata=slice_metadata)
bt.offset = (startIndex, batchSize)
nodeFilesList.append(bt)
else:
bt = BlazingTable(
self.input,
self.fileType,
files=tempFiles,
calcite_to_file_indices=self.calcite_to_file_indices,
uri_values=uri_values,
args=self.args,
metadata=slice_metadata)
bt.offset = (startIndex, batchSize)
nodeFilesList.append(bt)
startIndex = startIndex + batchSize
remaining = remaining - batchSize
return nodeFilesList
def get_partitions(self, worker):
return self.dask_mapping[worker]
class BlazingContext(object):
def __init__(self,
dask_client=None, # if None, it will run in single node
network_interface=None,
allocator="managed", # options are "default" or "managed". Where "managed" uses Unified Virtual Memory (UVM) and may use system memory if GPU memory runs out
pool=True, # if True, it will allocate a memory pool in the beginning. This can greatly improve performance
initial_pool_size=None, # Initial size of memory pool in bytes (if pool=True). If None, it will default to using half of the GPU memory
enable_logging=False): # If set to True the memory allocator logging will be enabled, but can negatively impact perforamance
"""
:param connection: BlazingSQL cluster URL to connect to
(e.g. 172.16.17.32:8889, blazingsql-gateway:7887).
"""
self.lock = Lock()
self.finalizeCaller = ref(cio.finalizeCaller)
self.dask_client = dask_client
self.nodes = []
self.node_cwds = []
self.finalizeCaller = lambda: NotImplemented
if(dask_client is not None):
if network_interface is None:
network_interface = 'eth0'
worker_list = []
dask_futures = []
masterIndex = 0
i = 0
##print(network_interface)
for worker in list(self.dask_client.scheduler_info()["workers"]):
dask_futures.append(
self.dask_client.submit(
initializeBlazing,
ralId=i,
networkInterface=network_interface,
singleNode=False,
allocator=allocator,
pool=pool,
initial_pool_size=initial_pool_size,
enable_logging=enable_logging,
workers=[worker]))
worker_list.append(worker)
i = i + 1
i = 0
for connection in dask_futures:
ralPort, ralIp, cwd = connection.result()
node = {}
node['worker'] = worker_list[i]
node['ip'] = ralIp
node['communication_port'] = ralPort
#print("ralport is")
#print(ralPort)
self.nodes.append(node)
self.node_cwds.append(cwd)
i = i + 1
else:
ralPort, ralIp, cwd = initializeBlazing(
ralId=0, networkInterface='lo', singleNode=True,
allocator=allocator, pool=pool, initial_pool_size=initial_pool_size, enable_logging=enable_logging)
node = {}
node['ip'] = ralIp
node['communication_port'] = ralPort
self.nodes.append(node)
self.node_cwds.append(cwd)
# NOTE ("//"+) is a neat trick to handle ip:port cases
#internal_api.SetupOrchestratorConnection(orchestrator_host_ip, orchestrator_port)
self.fs = FileSystem()
self.db = DatabaseClass("main")
self.schema = BlazingSchemaClass(self.db)
self.generator = RelationalAlgebraGeneratorClass(self.schema)
self.tables = {}
self.logs_initialized = False
# waitForPingSuccess(self.client)
print("BlazingContext ready")
def ready(self, wait=False):
if wait:
waitForPingSuccess(self.client)
return True
else:
return self.client.ping()
def __del__(self):
self.finalizeCaller()
def __repr__(self):
return "BlazingContext('%s')" % (self.connection)
def __str__(self):
return self.connection
# BEGIN FileSystem interface
def localfs(self, prefix, **kwargs):
return self.fs.localfs(self.dask_client, prefix, **kwargs)
# Use result, error_msg = hdfs(args) where result can be True|False
def hdfs(self, prefix, **kwargs):
return self.fs.hdfs(self.dask_client, prefix, **kwargs)
def s3(self, prefix, **kwargs):
return self.fs.s3(self.dask_client, prefix, **kwargs)
def gs(self, prefix, **kwargs):
return self.fs.gs(self.dask_client, prefix, **kwargs)
def show_filesystems(self):
print(self.fs)
# END FileSystem interface
def _to_url(self, str_input):
url = urlparse(str_input)
return url
def _to_path(self, url):
path = PurePath(url.path)
return path
# BEGIN SQL interface
def explain(self, sql):
return str(self.generator.getRelationalAlgebraString(sql))
def add_remove_table(self, tableName, addTable, table=None):
self.lock.acquire()
try:
if(addTable):
self.db.removeTable(tableName)
self.tables[tableName] = table
arr = ArrayClass()
order = 0
for column in table.input.columns:
if(isinstance(table.input, dask_cudf.core.DataFrame)):
dataframe_column = table.input.head(0)._data[column]
else:
dataframe_column = table.input._data[column]
data_sz = len(dataframe_column)
dtype = get_np_dtype_to_gdf_dtype_str(
dataframe_column.dtype)
dataType = ColumnTypeClass.fromString(dtype)
column = ColumnClass(column, dataType, order)
arr.add(column)
order = order + 1
tableJava = TableClass(tableName, self.db, arr)
self.db.addTable(tableJava)
self.schema = BlazingSchemaClass(self.db)
self.generator = RelationalAlgebraGeneratorClass(self.schema)
else:
self.db.removeTable(tableName)
self.schema = BlazingSchemaClass(self.db)
self.generator = RelationalAlgebraGeneratorClass(self.schema)
del self.tables[tableName]
finally:
self.lock.release()
def create_table(self, table_name, input, **kwargs):
table = None
extra_columns = []
uri_values = []
file_format_hint = kwargs.get(
'file_format', 'undefined') # See datasource.file_format
extra_kwargs = {}
in_file = []
if(isinstance(input, hive.Cursor)):
hive_table_name = kwargs.get('hive_table_name', table_name)
folder_list, uri_values, file_format_hint, extra_kwargs, extra_columns, in_file = get_hive_table(
input, hive_table_name)
kwargs.update(extra_kwargs)
input = folder_list
if isinstance(input, str):
input = [input, ]
if isinstance(input, pandas.DataFrame):
input = cudf.DataFrame.from_pandas(input)
if isinstance(input, pyarrow.Table):
if (self.dask_client is not None):
input = cudf.DataFrame.from_arrow(input)
else:
table = BlazingTable(
input,
DataType.ARROW)
if isinstance(input, cudf.DataFrame):
if (self.dask_client is not None):
table = BlazingTable(
input,
DataType.DASK_CUDF,
convert_gdf_to_dask=True,
convert_gdf_to_dask_partitions=len(
self.nodes),
client=self.dask_client)
else:
table = BlazingTable(input, DataType.CUDF)
elif isinstance(input, list):
parsedSchema = self._parseSchema(
input, file_format_hint, kwargs, extra_columns)
file_type = parsedSchema['file_type']
table = BlazingTable(
parsedSchema['columns'],
file_type,
files=parsedSchema['files'],
datasource=parsedSchema['datasource'],
calcite_to_file_indices=parsedSchema['calcite_to_file_indices'],
num_row_groups=parsedSchema['num_row_groups'],
args=parsedSchema['args'],
uri_values=uri_values,
in_file=in_file)
table.slices = table.getSlices(len(self.nodes))
if parsedSchema['file_type'] == DataType.PARQUET :
parsedMetadata = self._parseMetadata(input, file_format_hint, table.slices, parsedSchema, kwargs, extra_columns)
if isinstance(parsedMetadata, cudf.DataFrame):
table.metadata = parsedMetadata
else:
table.metadata = parsedMetadata
elif isinstance(input, dask_cudf.core.DataFrame):
table = BlazingTable(
input,
DataType.DASK_CUDF,
client=self.dask_client)
if table is not None:
self.add_remove_table(table_name, True, table)
return table
def drop_table(self, table_name):
self.add_remove_table(table_name, False)
def _parseSchema(self, input, file_format_hint, kwargs, extra_columns):
if self.dask_client:
worker = tuple(self.dask_client.scheduler_info()['workers'])[0]
connection = self.dask_client.submit(
cio.parseSchemaCaller,
input,
file_format_hint,
kwargs,
extra_columns,
workers=[worker])
return connection.result()
else:
return cio.parseSchemaCaller(
input, file_format_hint, kwargs, extra_columns)
def _parseMetadata(self, input, file_format_hint, currentTableNodes, schema, kwargs, extra_columns):
if self.dask_client:
dask_futures = []
workers = tuple(self.dask_client.scheduler_info()['workers'])
worker_id = 0
for worker in workers:
file_subset = [ file.decode() for file in currentTableNodes[worker_id].files]
connection = self.dask_client.submit(
cio.parseMetadataCaller,
file_subset,
currentTableNodes[worker_id].offset,
schema,
file_format_hint,
kwargs,
extra_columns,
workers=[worker])
dask_futures.append(connection)
worker_id += 1
return dask.dataframe.from_delayed(dask_futures)
else:
return cio.parseMetadataCaller(
input, currentTableNodes[0].offset, schema, file_format_hint, kwargs, extra_columns)
def _optimize_with_skip_data(self, masterIndex, table_name, table_files, nodeTableList, scan_table_query, fileTypes):
if self.dask_client is None:
current_table = nodeTableList[0][table_name]
table_tuple = (table_name, current_table)
file_indices_and_rowgroup_indices = cio.runSkipDataCaller(masterIndex, self.nodes, table_tuple, fileTypes, 0, scan_table_query, 0)
if not file_indices_and_rowgroup_indices.empty:
file_and_rowgroup_indices = file_indices_and_rowgroup_indices.to_pandas()
files = file_and_rowgroup_indices['file_handle_index'].values.tolist()
grouped = file_and_rowgroup_indices.groupby('file_handle_index')
actual_files = []
current_table.row_groups_ids = []
for group_id in grouped.groups:
row_indices = grouped.groups[group_id].values.tolist()
actual_files.append(table_files[group_id])
row_groups_col = file_and_rowgroup_indices['row_group_index'].values.tolist()
row_group_ids = [row_groups_col[i] for i in row_indices]
current_table.row_groups_ids.append(row_group_ids)
current_table.files = actual_files
else:
dask_futures = []
i = 0
for node in self.nodes:
worker = node['worker']
current_table = nodeTableList[i][table_name]
table_tuple = (table_name, current_table)
dask_futures.append(
self.dask_client.submit(
cio.runSkipDataCaller,
masterIndex, self.nodes, table_tuple, fileTypes, 0, scan_table_query, 0,
workers=[worker]))
i = i + 1
result = dask.dataframe.from_delayed(dask_futures)
for index in range(len(self.nodes)):
file_indices_and_rowgroup_indices = result.get_partition(index).compute()
if file_indices_and_rowgroup_indices.empty :
continue
file_and_rowgroup_indices = file_indices_and_rowgroup_indices.to_pandas()
files = file_and_rowgroup_indices['file_handle_index'].values.tolist()
grouped = file_and_rowgroup_indices.groupby('file_handle_index')
actual_files = []
current_table.row_groups_ids = []
for group_id in grouped.groups:
row_indices = grouped.groups[group_id].values.tolist()
actual_files.append(table_files[group_id])
row_groups_col = file_and_rowgroup_indices['row_group_index'].values.tolist()
row_group_ids = [row_groups_col[i] for i in row_indices]
current_table.row_groups_ids.append(row_group_ids)
current_table.files = actual_files
def sql(self, sql, table_list=[], algebra=None):
# TODO: remove hardcoding
masterIndex = 0
nodeTableList = [{} for _ in range(len(self.nodes))]
fileTypes = []
if (algebra is None):
algebra = self.explain(sql)
if self.dask_client is None:
relational_algebra_steps = cio.getTableScanInfoCaller(algebra)
else:
worker = tuple(self.dask_client.scheduler_info()['workers'])[0]
connection = self.dask_client.submit(
cio.getTableScanInfoCaller,
algebra,
workers=[worker])
relational_algebra_steps = connection.result()
table_columns = mergeTableScans(relational_algebra_steps)
new_tables, algebra = modifyAlegebraAndTablesForArrowBasedOnColumnUsage(algebra, relational_algebra_steps,self.tables, table_columns)
for table in new_tables:
fileTypes.append(new_tables[table].fileType)
ftype = new_tables[table].fileType
if(ftype == DataType.PARQUET or ftype == DataType.ORC or ftype == DataType.JSON or ftype == DataType.CSV):
currentTableNodes = new_tables[table].getSlices(len(self.nodes))
elif(new_tables[table].fileType == DataType.DASK_CUDF):
currentTableNodes = []
for node in self.nodes:
currentTableNodes.append(new_tables[table])
elif(new_tables[table].fileType == DataType.CUDF or new_tables[table].fileType == DataType.ARROW):
currentTableNodes = []
for node in self.nodes:
currentTableNodes.append(new_tables[table])
j = 0
for nodeList in nodeTableList:
nodeList[table] = currentTableNodes[j]
j = j + 1
if new_tables[table].has_metadata():
scan_table_query = relational_algebra_steps[table]['table_scans'][0]
self._optimize_with_skip_data(masterIndex, table, new_tables[table].files, nodeTableList, scan_table_query, fileTypes)
ctxToken = random.randint(0, 64000)
accessToken = 0
if (len(table_list) > 0):
print("NOTE: You no longer need to send a table list to the .sql() funtion")
if self.dask_client is None:
result = cio.runQueryCaller(
masterIndex,
self.nodes,
nodeTableList[0],
fileTypes,
ctxToken,
algebra,
accessToken)
else:
dask_futures = []
i = 0
for node in self.nodes:
worker = node['worker']
dask_futures.append(
self.dask_client.submit(
collectPartitionsRunQuery,
masterIndex,
self.nodes,
nodeTableList[i],
fileTypes,
ctxToken,
algebra,
accessToken,
workers=[worker]))
i = i + 1
result = dask.dataframe.from_delayed(dask_futures)
return result
# END SQL interface
# BEGIN LOG interface
def log(self, query, logs_table_name='bsql_logs'):
if not self.logs_initialized:
self.logs_table_name = logs_table_name
log_files = [self.node_cwds[i] + '/RAL.' + \
str(i) + '.log' for i in range(0, len(self.node_cwds))]
#print(log_files)
dtypes = [
'date64',
'int32',
'str',
'int32',
'int16',
'int16',
'str',
'float32',
'str',
'int32',
'str',
'int32']
names = [
'log_time',
'node_id',
'type',
'query_id',
'step',
'substep',
'info',
'duration',
'extra1',
'data1',
'extra2',
'data2']
t = self.create_table(
self.logs_table_name,
log_files,
delimiter='|',
dtype=dtypes,
names=names,
file_format='csv')
#print("table created")
#print(t)
self.logs_initialized = True
return self.sql(query)
|
[
"socket.socket",
"cudf.DataFrame.from_arrow",
"cudf.set_allocator",
"weakref.ref",
"urllib.parse.urlparse",
"random.randint",
"dask_cudf.from_cudf",
"dask.distributed.wait",
"dask.dataframe.from_delayed",
"pyarrow.Table.from_arrays",
"pyblazing.apiv2.filesystem.FileSystem",
"threading.Lock",
"cio.runSkipDataCaller",
"cio.parseMetadataCaller",
"cudf.concat",
"cudf.DataFrame.from_pandas",
"dask.distributed.get_worker",
"cio.getTableScanInfoCaller",
"jpype.getDefaultJVMPath",
"os.getenv",
"cio.parseSchemaCaller",
"jpype.JClass",
"os.getcwd",
"cio.runQueryCaller",
"numpy.dtype",
"netifaces.ifaddresses",
"pathlib.PurePath"
] |
[((1027, 1062), 'jpype.JClass', 'jpype.JClass', (['"""java.util.ArrayList"""'], {}), "('java.util.ArrayList')\n", (1039, 1062), False, 'import jpype\n'), ((1081, 1155), 'jpype.JClass', 'jpype.JClass', (['"""com.blazingdb.calcite.catalog.domain.CatalogColumnDataType"""'], {}), "('com.blazingdb.calcite.catalog.domain.CatalogColumnDataType')\n", (1093, 1155), False, 'import jpype\n'), ((1225, 1295), 'jpype.JClass', 'jpype.JClass', (['"""com.blazingdb.calcite.catalog.domain.CatalogColumnImpl"""'], {}), "('com.blazingdb.calcite.catalog.domain.CatalogColumnImpl')\n", (1237, 1295), False, 'import jpype\n'), ((1314, 1383), 'jpype.JClass', 'jpype.JClass', (['"""com.blazingdb.calcite.catalog.domain.CatalogTableImpl"""'], {}), "('com.blazingdb.calcite.catalog.domain.CatalogTableImpl')\n", (1326, 1383), False, 'import jpype\n'), ((1405, 1477), 'jpype.JClass', 'jpype.JClass', (['"""com.blazingdb.calcite.catalog.domain.CatalogDatabaseImpl"""'], {}), "('com.blazingdb.calcite.catalog.domain.CatalogDatabaseImpl')\n", (1417, 1477), False, 'import jpype\n'), ((1504, 1562), 'jpype.JClass', 'jpype.JClass', (['"""com.blazingdb.calcite.schema.BlazingSchema"""'], {}), "('com.blazingdb.calcite.schema.BlazingSchema')\n", (1516, 1562), False, 'import jpype\n'), ((1597, 1673), 'jpype.JClass', 'jpype.JClass', (['"""com.blazingdb.calcite.application.RelationalAlgebraGenerator"""'], {}), "('com.blazingdb.calcite.application.RelationalAlgebraGenerator')\n", (1609, 1673), False, 'import jpype\n'), ((957, 982), 'jpype.getDefaultJVMPath', 'jpype.getDefaultJVMPath', ([], {}), '()\n', (980, 982), False, 'import jpype\n'), ((2627, 2679), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM', '(0)'], {}), '(socket.AF_INET, socket.SOCK_STREAM, 0)\n', (2640, 2679), False, 'import socket\n'), ((3617, 3740), 'cudf.set_allocator', 'cudf.set_allocator', ([], {'allocator': 'allocator', 'pool': 'pool', 'initial_pool_size': 'initial_pool_size', 'enable_logging': 'enable_logging'}), '(allocator=allocator, pool=pool, initial_pool_size=\n initial_pool_size, enable_logging=enable_logging)\n', (3635, 3740), False, 'import cudf\n'), ((4016, 4027), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4025, 4027), False, 'import os\n'), ((4295, 4320), 'dask.distributed.wait', 'dask.distributed.wait', (['df'], {}), '(df)\n', (4316, 4320), False, 'import dask\n'), ((5894, 5987), 'cio.runQueryCaller', 'cio.runQueryCaller', (['masterIndex', 'nodes', 'tables', 'fileTypes', 'ctxToken', 'algebra', 'accessToken'], {}), '(masterIndex, nodes, tables, fileTypes, ctxToken, algebra,\n accessToken)\n', (5912, 5987), False, 'import cio\n'), ((758, 783), 'os.getenv', 'os.getenv', (['"""CONDA_PREFIX"""'], {}), "('CONDA_PREFIX')\n", (767, 783), False, 'import os\n'), ((870, 895), 'os.getenv', 'os.getenv', (['"""CONDA_PREFIX"""'], {}), "('CONDA_PREFIX')\n", (879, 895), False, 'import os\n'), ((1746, 1765), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (1754, 1765), True, 'import numpy as np\n'), ((1790, 1809), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (1798, 1809), True, 'import numpy as np\n'), ((1834, 1851), 'numpy.dtype', 'np.dtype', (['"""int64"""'], {}), "('int64')\n", (1842, 1851), True, 'import numpy as np\n'), ((1874, 1891), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (1882, 1891), True, 'import numpy as np\n'), ((1914, 1931), 'numpy.dtype', 'np.dtype', (['"""int16"""'], {}), "('int16')\n", (1922, 1931), True, 'import numpy as np\n'), ((1954, 1970), 'numpy.dtype', 'np.dtype', (['"""int8"""'], {}), "('int8')\n", (1962, 1970), True, 'import numpy as np\n'), ((1992, 2009), 'numpy.dtype', 'np.dtype', (['"""bool_"""'], {}), "('bool_')\n", (2000, 2009), True, 'import numpy as np\n'), ((2032, 2057), 'numpy.dtype', 'np.dtype', (['"""datetime64[s]"""'], {}), "('datetime64[s]')\n", (2040, 2057), True, 'import numpy as np\n'), ((2081, 2107), 'numpy.dtype', 'np.dtype', (['"""datetime64[ms]"""'], {}), "('datetime64[ms]')\n", (2089, 2107), True, 'import numpy as np\n'), ((2131, 2157), 'numpy.dtype', 'np.dtype', (['"""datetime64[ns]"""'], {}), "('datetime64[ns]')\n", (2139, 2157), True, 'import numpy as np\n'), ((2184, 2210), 'numpy.dtype', 'np.dtype', (['"""datetime64[us]"""'], {}), "('datetime64[us]')\n", (2192, 2210), True, 'import numpy as np\n'), ((2237, 2259), 'numpy.dtype', 'np.dtype', (['"""datetime64"""'], {}), "('datetime64')\n", (2245, 2259), True, 'import numpy as np\n'), ((2283, 2302), 'numpy.dtype', 'np.dtype', (['"""object_"""'], {}), "('object_')\n", (2291, 2302), True, 'import numpy as np\n'), ((2326, 2342), 'numpy.dtype', 'np.dtype', (['"""str_"""'], {}), "('str_')\n", (2334, 2342), True, 'import numpy as np\n'), ((2366, 2384), 'numpy.dtype', 'np.dtype', (['"""<M8[s]"""'], {}), "('<M8[s]')\n", (2374, 2384), True, 'import numpy as np\n'), ((2408, 2427), 'numpy.dtype', 'np.dtype', (['"""<M8[ms]"""'], {}), "('<M8[ms]')\n", (2416, 2427), True, 'import numpy as np\n'), ((2451, 2470), 'numpy.dtype', 'np.dtype', (['"""<M8[ns]"""'], {}), "('<M8[ns]')\n", (2459, 2470), True, 'import numpy as np\n'), ((2497, 2516), 'numpy.dtype', 'np.dtype', (['"""<M8[us]"""'], {}), "('<M8[us]')\n", (2505, 2516), True, 'import numpy as np\n'), ((2557, 2572), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (2565, 2572), True, 'import numpy as np\n'), ((3453, 3481), 'random.randint', 'random.randint', (['(10000)', '(32000)'], {}), '(10000, 32000)\n', (3467, 3481), False, 'import random\n'), ((5027, 5056), 'dask.distributed.get_worker', 'dask.distributed.get_worker', ([], {}), '()\n', (5054, 5056), False, 'import dask\n'), ((11570, 11617), 'pyarrow.Table.from_arrays', 'pyarrow.Table.from_arrays', (['columns'], {'names': 'names'}), '(columns, names=names)\n', (11595, 11617), False, 'import pyarrow\n'), ((15156, 15162), 'threading.Lock', 'Lock', ([], {}), '()\n', (15160, 15162), False, 'from threading import Lock\n'), ((15193, 15216), 'weakref.ref', 'ref', (['cio.finalizeCaller'], {}), '(cio.finalizeCaller)\n', (15196, 15216), False, 'from weakref import ref\n'), ((17335, 17347), 'pyblazing.apiv2.filesystem.FileSystem', 'FileSystem', ([], {}), '()\n', (17345, 17347), False, 'from pyblazing.apiv2.filesystem import FileSystem\n'), ((18669, 18688), 'urllib.parse.urlparse', 'urlparse', (['str_input'], {}), '(str_input)\n', (18677, 18688), False, 'from urllib.parse import urlparse\n'), ((18753, 18771), 'pathlib.PurePath', 'PurePath', (['url.path'], {}), '(url.path)\n', (18761, 18771), False, 'from pathlib import PurePath\n'), ((30345, 30369), 'random.randint', 'random.randint', (['(0)', '(64000)'], {}), '(0, 64000)\n', (30359, 30369), False, 'import random\n'), ((3575, 3603), 'random.randint', 'random.randint', (['(10000)', '(32000)'], {}), '(10000, 32000)\n', (3589, 3603), False, 'import random\n'), ((21160, 21193), 'cudf.DataFrame.from_pandas', 'cudf.DataFrame.from_pandas', (['input'], {}), '(input)\n', (21186, 21193), False, 'import cudf\n'), ((23913, 23982), 'cio.parseSchemaCaller', 'cio.parseSchemaCaller', (['input', 'file_format_hint', 'kwargs', 'extra_columns'], {}), '(input, file_format_hint, kwargs, extra_columns)\n', (23934, 23982), False, 'import cio\n'), ((24851, 24892), 'dask.dataframe.from_delayed', 'dask.dataframe.from_delayed', (['dask_futures'], {}), '(dask_futures)\n', (24878, 24892), False, 'import dask\n'), ((24927, 25039), 'cio.parseMetadataCaller', 'cio.parseMetadataCaller', (['input', 'currentTableNodes[0].offset', 'schema', 'file_format_hint', 'kwargs', 'extra_columns'], {}), '(input, currentTableNodes[0].offset, schema,\n file_format_hint, kwargs, extra_columns)\n', (24950, 25039), False, 'import cio\n'), ((25389, 25487), 'cio.runSkipDataCaller', 'cio.runSkipDataCaller', (['masterIndex', 'self.nodes', 'table_tuple', 'fileTypes', '(0)', 'scan_table_query', '(0)'], {}), '(masterIndex, self.nodes, table_tuple, fileTypes, 0,\n scan_table_query, 0)\n', (25410, 25487), False, 'import cio\n'), ((27050, 27091), 'dask.dataframe.from_delayed', 'dask.dataframe.from_delayed', (['dask_futures'], {}), '(dask_futures)\n', (27077, 27091), False, 'import dask\n'), ((28555, 28590), 'cio.getTableScanInfoCaller', 'cio.getTableScanInfoCaller', (['algebra'], {}), '(algebra)\n', (28581, 28590), False, 'import cio\n'), ((30576, 30684), 'cio.runQueryCaller', 'cio.runQueryCaller', (['masterIndex', 'self.nodes', 'nodeTableList[0]', 'fileTypes', 'ctxToken', 'algebra', 'accessToken'], {}), '(masterIndex, self.nodes, nodeTableList[0], fileTypes,\n ctxToken, algebra, accessToken)\n', (30594, 30684), False, 'import cio\n'), ((31465, 31506), 'dask.dataframe.from_delayed', 'dask.dataframe.from_delayed', (['dask_futures'], {}), '(dask_futures)\n', (31492, 31506), False, 'import dask\n'), ((3370, 3402), 'netifaces.ifaddresses', 'ni.ifaddresses', (['networkInterface'], {}), '(networkInterface)\n', (3384, 3402), True, 'import netifaces as ni\n'), ((9182, 9214), 'cudf.DataFrame.from_arrow', 'cudf.DataFrame.from_arrow', (['input'], {}), '(input)\n', (9207, 9214), False, 'import cudf\n'), ((9964, 10039), 'dask_cudf.from_cudf', 'dask_cudf.from_cudf', (['self.input'], {'npartitions': 'convert_gdf_to_dask_partitions'}), '(self.input, npartitions=convert_gdf_to_dask_partitions)\n', (9983, 10039), False, 'import dask_cudf\n'), ((21311, 21343), 'cudf.DataFrame.from_arrow', 'cudf.DataFrame.from_arrow', (['input'], {}), '(input)\n', (21336, 21343), False, 'import cudf\n'), ((5853, 5882), 'cudf.concat', 'cudf.concat', (['table_partitions'], {}), '(table_partitions)\n', (5864, 5882), False, 'import cudf\n')]
|
from __future__ import annotations
__copyright__ = """
Copyright (C) 2020 <NAME>
Copyright (C) 2020 <NAME>
Copyright (C) 2020 <NAME>
Copyright (C) 2021 <NAME>
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# {{{ docs
__doc__ = """
.. currentmodule:: pytato
.. autofunction:: abs
.. autofunction:: sqrt
.. autofunction:: sin
.. autofunction:: cos
.. autofunction:: tan
.. autofunction:: arcsin
.. autofunction:: arccos
.. autofunction:: arctan
.. autofunction:: conj
.. autofunction:: arctan2
.. autofunction:: sinh
.. autofunction:: cosh
.. autofunction:: tanh
.. autofunction:: exp
.. autofunction:: log
.. autofunction:: log10
.. autofunction:: isnan
.. autofunction:: real
.. autofunction:: imag
"""
# }}}
import numpy as np
import pymbolic.primitives as prim
from typing import Tuple, Optional
from pytato.array import Array, ArrayOrScalar, IndexLambda, _dtype_any
from pytato.scalar_expr import SCALAR_CLASSES
from pymbolic import var
def _apply_elem_wise_func(inputs: Tuple[ArrayOrScalar],
func_name: str,
ret_dtype: Optional[_dtype_any] = None
) -> ArrayOrScalar:
if all(isinstance(x, SCALAR_CLASSES) for x in inputs):
np_func = getattr(np, func_name)
return np_func(*inputs) # type: ignore
if not inputs:
raise ValueError("at least one argument must be present")
shape = None
sym_args = []
bindings = {}
for index, inp in enumerate(inputs):
if isinstance(inp, Array):
if inp.dtype.kind not in ["f", "c"]:
raise ValueError("only floating-point or complex "
"arguments supported")
if shape is None:
shape = inp.shape
elif inp.shape != shape:
# FIXME: merge this logic with arithmetic, so that broadcasting
# is implemented properly
raise NotImplementedError("broadcasting in function application")
if ret_dtype is None:
ret_dtype = inp.dtype
bindings[f"in_{index}"] = inp
sym_args.append(
prim.Subscript(var(f"in_{index}"),
tuple(var(f"_{i}") for i in range(len(shape)))))
else:
sym_args.append(inp)
assert shape is not None
assert ret_dtype is not None
return IndexLambda(
prim.Call(var(f"pytato.c99.{func_name}"), tuple(sym_args)),
shape, ret_dtype, bindings)
def abs(x: Array) -> ArrayOrScalar:
if x.dtype.kind == "c":
result_dtype = np.empty(0, dtype=x.dtype).real.dtype
else:
result_dtype = x.dtype
return _apply_elem_wise_func((x,), "abs", ret_dtype=result_dtype)
def sqrt(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "sqrt")
def sin(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "sin")
def cos(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "cos")
def tan(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "tan")
def arcsin(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "asin")
def arccos(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "acos")
def arctan(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "atan")
def conj(x: Array) -> ArrayOrScalar:
if x.dtype.kind != "c":
return x
return _apply_elem_wise_func((x,), "conj")
def arctan2(y: Array, x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((y, x), "atan2") # type:ignore
def sinh(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "sinh")
def cosh(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "cosh")
def tanh(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "tanh")
def exp(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "exp")
def log(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "log")
def log10(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "log10")
def isnan(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "isnan", np.dtype(np.int32))
def real(x: Array) -> ArrayOrScalar:
if x.dtype.kind == "c":
result_dtype = np.empty(0, dtype=x.dtype).real.dtype
else:
return x
return _apply_elem_wise_func((x,), "real", ret_dtype=result_dtype)
def imag(x: Array) -> ArrayOrScalar:
if x.dtype.kind == "c":
result_dtype = np.empty(0, dtype=x.dtype).real.dtype
else:
import pytato as pt
return pt.zeros(x.shape, dtype=x.dtype)
return _apply_elem_wise_func((x,), "imag", ret_dtype=result_dtype)
# vim: fdm=marker
|
[
"pytato.zeros",
"numpy.dtype",
"pymbolic.var",
"numpy.empty"
] |
[((5205, 5223), 'numpy.dtype', 'np.dtype', (['np.int32'], {}), '(np.int32)\n', (5213, 5223), True, 'import numpy as np\n'), ((5632, 5664), 'pytato.zeros', 'pt.zeros', (['x.shape'], {'dtype': 'x.dtype'}), '(x.shape, dtype=x.dtype)\n', (5640, 5664), True, 'import pytato as pt\n'), ((3424, 3454), 'pymbolic.var', 'var', (['f"""pytato.c99.{func_name}"""'], {}), "(f'pytato.c99.{func_name}')\n", (3427, 3454), False, 'from pymbolic import var\n'), ((3603, 3629), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'x.dtype'}), '(0, dtype=x.dtype)\n', (3611, 3629), True, 'import numpy as np\n'), ((5315, 5341), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'x.dtype'}), '(0, dtype=x.dtype)\n', (5323, 5341), True, 'import numpy as np\n'), ((5541, 5567), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'x.dtype'}), '(0, dtype=x.dtype)\n', (5549, 5567), True, 'import numpy as np\n'), ((3174, 3192), 'pymbolic.var', 'var', (['f"""in_{index}"""'], {}), "(f'in_{index}')\n", (3177, 3192), False, 'from pymbolic import var\n'), ((3224, 3236), 'pymbolic.var', 'var', (['f"""_{i}"""'], {}), "(f'_{i}')\n", (3227, 3236), False, 'from pymbolic import var\n')]
|
import numpy as np
from io import TextIOWrapper
from typing import Iterable, Any, Union, TextIO, List, Optional
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from plagiarism.sources import Source
class Output(object):
"""
Class that format ndarray data to a plain format
:parameter
data: ndarray
mapping: mapping of a target array with data (array)
sorted: is soring data array
nim_percentage: percentage of minimum marching similarity
>>> out = Output(data=...)
>>> out.getlist()
>>> out.get()
"""
def __init__(
self,
data: np.ndarray,
*,
mapping: Optional[list],
sorted: Optional[bool] = True,
nim_percentage: Optional[float] = 1.0
) -> None:
self.data = data
self.map = mapping
self.sorted = sorted
self.nim_percentage = nim_percentage
@staticmethod
def _sorting(d: Iterable[dict], *, reverse=False) -> Iterable:
"""
Sorting of an array containing dictionary
:parameter:
d: array of dictionary
reverse: is reverse ordering
:return:
a sorted array
"""
return sorted(d, key=lambda x: x['score'], reverse=reverse)
def getlist(self) -> List:
"""
Get list of dictionary in a array
:return:
An array
"""
return list(self._sorting(self._generate_result()) if self.sorted else list(self._generate_result()))
def get(self) -> float:
"""
Get an array of values if there are no mapping
:return:
An array
"""
result = [item[0] * 100 for item in self.data]
result.sort(reverse=True)
return result[0]
def _generate_result(self) -> Iterable:
""" Generator that convert ndarray for an array of dictionary """
if self.map:
for index, score in enumerate(self.data):
_score: float = score[0] * 100
if _score >= self.nim_percentage:
yield dict(doc=self.map[index], score="{:.2f}".format(_score))
else:
for item in self.data:
yield dict(score="{:.2f}".format(item[0] * 100))
def __call__(self, *args, **kwargs):
return self.getlist()
def __iter__(self):
return self.getlist()
class Plagiarism(object):
"""
Find plagiarism in a dataset with the given input using scikit-learn (tf-idf algorithm) cosine similarity
:parameter
source: `Source` instance having file or file content
>>> plg = Plagiarism(source=...)
>>> plg.compare(...).get() # get percentage in number (float)
>>> plg.compare(...).getlist()
"""
def __init__(
self,
source: Source,
*,
nim_percentage: Optional[float] = 1.0
) -> None:
self._tfidf_vectorizer = TfidfVectorizer()
self.source = source
self.nim_percentage = nim_percentage
def _cosine_similarity(self, x, y) -> Any:
""" Compute cosine similarity between samples in x and y. K(x, y) = <Xx, y> / (||x||*||y||) """
return cosine_similarity(x, y)
def _get_source(self) -> Union[Iterable, list]:
return self.source.get_content()
def _compare_transform(self, raw_document) -> Any:
tfidf = self._tfidf_vectorizer.fit_transform(list(self._get_source()) + [raw_document])
return (tfidf * tfidf.T).A[0, 1]
@staticmethod
def _get_input_content(f: Union[bytes, TextIO]) -> str:
if type(f) is bytes:
return f.decode()
return f.read()
def compare(
self,
raw_document: Union[TextIOWrapper, TextIO, bytes, str]
) -> Output:
"""
Compare cosine similarity between documents
:param raw_document: Text file or text contents
:return:
Instance of Output
"""
raw_document = raw_document if type(raw_document) == str else self._get_input_content(raw_document)
vect_x = self._tfidf_vectorizer.fit_transform(self.source.get_content())
vect_y = self._tfidf_vectorizer.transform([raw_document])
similarity = self._cosine_similarity(vect_x, vect_y)
return Output(data=similarity, mapping=self.source.get_mapping(), nim_percentage=self.nim_percentage)
|
[
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.metrics.pairwise.cosine_similarity"
] |
[((2982, 2999), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (2997, 2999), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((3241, 3264), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['x', 'y'], {}), '(x, y)\n', (3258, 3264), False, 'from sklearn.metrics.pairwise import cosine_similarity\n')]
|
import cv2
import numpy as np
import random
import os
def imread(path):
# print(path)
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
# covert BRG to RGB
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# convert BGR to RGB
img = img[:,:,[2, 1, 0]]
return img
def imsave(path, img):
# convert RGB to BGR
img = img[:,:,[2, 1, 0]]
# save
cv2.imwrite(path, img)
# dataset_path = '/DATA/wangshen_data/ShortLongDataset/Sony240/full_sharp'
dataset_path = '/DATA/wangshen_data/ShortLongDataset/Sony240/test'
all_dirs = sorted(os.listdir(dataset_path))
# counter = 0
for dir in all_dirs:
list_path = os.path.join(dataset_path, dir)
items = sorted(os.listdir(list_path)) # imgs
num = len(items)
print(list_path, num)
for it in items:
img_path = os.path.join(list_path, it)
# counter = counter + 1
try:
# print(img_path)
a = imread(img_path)
# print(counter)
except:
print(img_path)
|
[
"cv2.imread",
"os.path.join",
"os.listdir",
"cv2.imwrite"
] |
[((101, 139), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_UNCHANGED'], {}), '(path, cv2.IMREAD_UNCHANGED)\n', (111, 139), False, 'import cv2\n'), ((375, 397), 'cv2.imwrite', 'cv2.imwrite', (['path', 'img'], {}), '(path, img)\n', (386, 397), False, 'import cv2\n'), ((560, 584), 'os.listdir', 'os.listdir', (['dataset_path'], {}), '(dataset_path)\n', (570, 584), False, 'import os\n'), ((638, 669), 'os.path.join', 'os.path.join', (['dataset_path', 'dir'], {}), '(dataset_path, dir)\n', (650, 669), False, 'import os\n'), ((689, 710), 'os.listdir', 'os.listdir', (['list_path'], {}), '(list_path)\n', (699, 710), False, 'import os\n'), ((807, 834), 'os.path.join', 'os.path.join', (['list_path', 'it'], {}), '(list_path, it)\n', (819, 834), False, 'import os\n')]
|
import sentry_top
from collections import defaultdict
from nydus.db import create_cluster
from time import time
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from sentry.models import Project
from sentry.plugins.base import Plugin
if not getattr(settings, 'SENTRY_TOP', None):
raise ImproperlyConfigured('You need to configure SENTRY_TOP')
def get_cluster(hosts=None, router='nydus.db.routers.keyvalue.PartitionRouter'):
if hosts is None:
hosts = {
0: {} # localhost / default
}
return create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': router,
'hosts': hosts,
})
redis = get_cluster(**settings.SENTRY_TOP['redis'])
MINUTES = settings.SENTRY_TOP.get('total_minutes', 15)
class TopPlugin(Plugin):
author = 'Sentry Team'
author_url = 'https://github.com/getsentry/sentry-top'
version = sentry_top.VERSION
description = 'Tracks active projects ala `top`'
resource_links = [
('Bug Tracker', 'https://github.com/getsentry/sentry-top/issues'),
('Source', 'https://github.com/getsentry/sentry-top'),
]
slug = 'top'
title = 'Top'
conf_title = title
conf_key = 'top'
def can_enable_for_projects(self):
return False
def add_event(self, project, client=redis):
minute = int(time() / 60)
keys = [
# 'stop:e:{0}:{1}'.format(event.group_id),
'stop:p:{0}'.format(minute),
]
with client.map() as conn:
for key in keys:
conn.zincrby(key, project.id)
conn.expire(key, (MINUTES + 1) * 60)
def top_projects(self, minutes=15, num=100, client=redis):
now = int(time() / 60)
keys = []
for minute in xrange(minutes):
keys.append('stop:p:{0}'.format(now - minute))
counts = []
with client.map() as conn:
for key in keys:
counts.append(conn.zrevrange(key, 0, num, withscores=True))
results = defaultdict(int)
for countset in counts:
for project_id, count in countset:
results[int(project_id)] += int(count)
sorted_results = sorted(
results.items(), key=lambda x: x[1], reverse=True)[:num]
project_map = dict(
(p.id, p) for p in Project.objects.filter(id__in=[
p_id for p_id, _ in sorted_results
]).select_related('team')
)
return [
(project_map[p_id], c)
for (p_id, c) in sorted_results
if p_id in project_map
]
def is_rate_limited(self, project):
# TODO(dcramer): we need a way to hook into Sentry at event input
# that guarantees this stat
self.add_event(project)
|
[
"django.core.exceptions.ImproperlyConfigured",
"time.time",
"collections.defaultdict",
"sentry.models.Project.objects.filter",
"nydus.db.create_cluster",
"django.conf.settings.SENTRY_TOP.get"
] |
[((767, 811), 'django.conf.settings.SENTRY_TOP.get', 'settings.SENTRY_TOP.get', (['"""total_minutes"""', '(15)'], {}), "('total_minutes', 15)\n", (790, 811), False, 'from django.conf import settings\n'), ((335, 391), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""You need to configure SENTRY_TOP"""'], {}), "('You need to configure SENTRY_TOP')\n", (355, 391), False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((578, 675), 'nydus.db.create_cluster', 'create_cluster', (["{'engine': 'nydus.db.backends.redis.Redis', 'router': router, 'hosts': hosts}"], {}), "({'engine': 'nydus.db.backends.redis.Redis', 'router': router,\n 'hosts': hosts})\n", (592, 675), False, 'from nydus.db import create_cluster\n'), ((2079, 2095), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2090, 2095), False, 'from collections import defaultdict\n'), ((1389, 1395), 'time.time', 'time', ([], {}), '()\n', (1393, 1395), False, 'from time import time\n'), ((1770, 1776), 'time.time', 'time', ([], {}), '()\n', (1774, 1776), False, 'from time import time\n'), ((2393, 2460), 'sentry.models.Project.objects.filter', 'Project.objects.filter', ([], {'id__in': '[p_id for p_id, _ in sorted_results]'}), '(id__in=[p_id for p_id, _ in sorted_results])\n', (2415, 2460), False, 'from sentry.models import Project\n')]
|
import os
from aoc.utils.file_reader import read_file_line
from aoc.utils.file_reader import path_join
directory_path = os.path.dirname(os.path.realpath(__file__))
input_filename = "input.txt"
target_number = 2020
"""
"""
def problem_part1(lines):
seen = set()
answer = None
for number in lines:
if number in seen:
answer = number * (target_number - number)
break
else:
seen.add(target_number - number)
return answer
"""
"""
def problem_part2(lines):
seen = set()
mapping = {}
answer = None
for index, number in enumerate(lines):
for inner_index in range(len(lines)):
summation = number + lines[inner_index]
seen.add(target_number - summation)
mapping[summation] = (number, lines[inner_index])
for number in lines:
if number in seen:
number1 = mapping[target_number-number][0]
number2 = mapping[target_number-number][1]
answer = number * number1 * number2
break
return answer
def day1_main():
print("2020 AOC Challenge Day 1: Report Repair")
input_path = path_join(directory_path, input_filename)
raw_texts = read_file_line(input_path)
lines = [int(number) for number in raw_texts]
part1_answer = problem_part1(lines)
print("Part 1, Answer: {}".format(part1_answer))
part2_answer = problem_part2(lines)
print("Part 2, Answer: {}".format(part2_answer))
if __name__ == "__main__":
day1_main()
|
[
"aoc.utils.file_reader.read_file_line",
"os.path.realpath",
"aoc.utils.file_reader.path_join"
] |
[((138, 164), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (154, 164), False, 'import os\n'), ((1163, 1204), 'aoc.utils.file_reader.path_join', 'path_join', (['directory_path', 'input_filename'], {}), '(directory_path, input_filename)\n', (1172, 1204), False, 'from aoc.utils.file_reader import path_join\n'), ((1221, 1247), 'aoc.utils.file_reader.read_file_line', 'read_file_line', (['input_path'], {}), '(input_path)\n', (1235, 1247), False, 'from aoc.utils.file_reader import read_file_line\n')]
|
#!/usr/bin/env python3
import numpy as np
import math
import random
def compute_z(theta, x):
z = 0
for j in range(len(x)):
z += theta[j] * x[j]
z += theta[len(x)]
return z
def compute_g(z):
return (1)/(1 + math.exp(-z))
def compute_h(z):
return compute_g(z)
def binary_cross_entropy_loss(Y_train, Y_predict):
total = 0
for i in range(len(Y_train)):
total -= (Y_train[i] * math.log(Y_predict[i])) + \
((1 - Y_train[i]) * math.log(1-Y_predict[i]))
average = total / len(Y_train)
return average
def compute_loss_gradients(theta, X_train, Y_train, Y_predict):
delta_theta = []
for j in range(len(X_train[0])):
grad = 0
for i in range(len(Y_train)):
grad += ((Y_predict[i] - Y_train[i]) * X_train[i][j])/len(Y_train)
delta_theta.append(grad)
return delta_theta
def main():
# f = int(input("no of features: "))
n = int(input("no of rows: "))
X_train = []
Y_train = []
for i in range(n):
row = [int(r) for r in input().split()]
X_train.append(row[0:-1])
Y_train.append(row[-1])
theta = [np.random.randn() for i in range(len(X_train))]
print("theta", theta)
for i in range(n):
print(X_train[i], Y_train[i])
epochs = 5
epsilon = 0.00000000000000001
alpha = 0.001
for e in range(epochs):
Y_predict = []
for i in range(n):
print(X_train[i])
Y_predict.append(compute_h(compute_z(theta, X_train[i])))
current_loss = binary_cross_entropy_loss(Y_train, Y_predict)
print("=========> Epoch number:", e, "Current Loss: ", current_loss)
print("Y_predict", Y_predict)
if current_loss <= epsilon:
break
delta_theta = compute_loss_gradients(
theta, X_train, Y_train, Y_predict)
print("delta_theta", delta_theta)
for j in range(len(theta) - 1):
theta[j] = theta[j] - alpha * delta_theta[j]
if __name__ == "__main__":
main()
|
[
"math.log",
"math.exp",
"numpy.random.randn"
] |
[((1164, 1181), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (1179, 1181), True, 'import numpy as np\n'), ((238, 250), 'math.exp', 'math.exp', (['(-z)'], {}), '(-z)\n', (246, 250), False, 'import math\n'), ((429, 451), 'math.log', 'math.log', (['Y_predict[i]'], {}), '(Y_predict[i])\n', (437, 451), False, 'import math\n'), ((489, 515), 'math.log', 'math.log', (['(1 - Y_predict[i])'], {}), '(1 - Y_predict[i])\n', (497, 515), False, 'import math\n')]
|
###
# Copyright 2008-2011 Diamond Light Source Ltd.
# This file is part of Diffcalc.
#
# Diffcalc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diffcalc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the*
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diffcalc. If not, see <http://www.gnu.org/licenses/>.
###
from math import asin, atan2, cos, degrees, radians, sin
# from diffcalc.hkl.vlieg.geometry import VliegPosition
from diffcalc.hkl.calc import sign
from diffcalc.hkl.geometry import Position
from diffcalc.ub.reference import Reflection
def PosFromI16sEuler(phi, chi, eta, mu, delta, gamma):
return Position(
mu=mu,
delta=delta,
nu=gamma,
eta=eta,
chi=chi,
phi=phi,
)
def VliegPos(alpha=None, delta=None, gamma=None, omega=None, chi=None, phi=None):
"""Convert six-circle Vlieg diffractometer angles into 4S+2D You geometry"""
sin_alpha = sin(radians(alpha))
cos_alpha = cos(radians(alpha))
sin_delta = sin(radians(delta))
cos_delta = cos(radians(delta))
sin_gamma = sin(radians(gamma))
cos_gamma = cos(radians(gamma))
asin_delta = degrees(asin(sin_delta * cos_gamma)) # Eq.(83)
vals_delta = [asin_delta, 180.0 - asin_delta]
idx, _ = min(
[(i, abs(delta - d)) for i, d in enumerate(vals_delta)], key=lambda x: x[1]
)
pos_delta = vals_delta[idx]
sgn = sign(cos(radians(pos_delta)))
pos_nu = degrees(
atan2(
sgn * (cos_delta * cos_gamma * sin_alpha + cos_alpha * sin_gamma),
sgn * (cos_delta * cos_gamma * cos_alpha - sin_alpha * sin_gamma),
)
) # Eq.(84)
return Position(mu=alpha, delta=pos_delta, nu=pos_nu, eta=omega, chi=chi, phi=phi)
class SessionScenario:
"""
A test scenario. The test case must have __name, lattice and bmatrix set
and if umatrix is set then so must ref 1 and ref 2. Matrices should be 3*3
python arrays of lists and ref1 and ref2 in the format (h, k, l, position,
energy, tag)."""
def __init__(self):
self.name = None
self.lattice = None
self.bmatrix = None
self.ref1 = None
self.ref2 = None
self.umatrix = None
self.calculations = [] # CalculationScenarios
def __str__(self):
toReturn = "\nTestScenario:"
toReturn += "\n name: " + self.name
toReturn += "\n lattice:" + str(self.lattice)
toReturn += "\n bmatrix:" + str(self.bmatrix)
toReturn += "\n ref1:" + str(self.ref1)
toReturn += "\n ref2:" + str(self.ref2)
toReturn += "\n umatrix:" + str(self.umatrix)
return toReturn
class CalculationScenario:
"""
Used as part of a test scenario. A UB matrix appropriate for this
calcaultion will have been calculated or loaded
"""
def __init__(self, tag, package, mode, energy, modeToTest, modeNumber):
self.tag = tag
self.package = package
self.mode = mode
self.energy = energy
self.wavelength = 12.39842 / energy
self.modeToTest = modeToTest
self.modeNumber = modeNumber
self.hklList = None # hkl triples
self.posList = []
self.paramList = []
def sessions(P=VliegPos):
############################ SESSION0 ############################
# From the dif_init.mat next to dif_dos.exe on Vlieg'session2 cd
# session2 = SessionScenario()
# session2.name = 'latt1'
# session2.lattice = ([4.0004, 4.0004, 2.270000, 90, 90, 90])
# session2.bmatrix = (((1.570639, 0, 0) ,(0.0, 1.570639, 0) ,
# (0.0, 0.0, 2.767923)))
# self.scenarios.append(session2)
############################ SESSION1 ############################
# From b16 on 27June2008 (From <NAME>)
session1 = SessionScenario()
session1.name = "b16_270608"
session1.lattice = (3.8401, 3.8401, 5.43072, 90, 90, 90)
session1.bmatrix = ((1.636204, 0, 0), (0, 1.636204, 0), (0, 0, 1.156971))
session1.ref1 = Reflection(
1,
0,
1.0628,
P(5.000, 22.790, 0.000, 1.552, 22.400, 14.255),
10,
"ref1",
)
session1.ref2 = Reflection(
0,
1,
1.0628,
P(5.000, 22.790, 0.000, 4.575, 24.275, 101.320),
10,
"ref2",
)
session1.umatrix = (
(0.997161, -0.062217, 0.042420),
(0.062542, 0.998022, -0.006371),
(-0.041940, 0.009006, 0.999080),
)
session1.ref1calchkl = (1, 0, 1.0628) # Must match the guessed value!
session1.ref2calchkl = (-0.0329, 1.0114, 1.04)
############################ SESSION2 ############################
# cubic crystal from bliss tutorial
session2 = SessionScenario()
session2.name = "cubic_from_bliss_tutorial"
session2.lattice = (1.54, 1.54, 1.54, 90, 90, 90)
session2.ref1 = Reflection(1, 0, 0, P(0, 60, 0, 30, 0, 0), 12.39842 / 1.54, "ref1")
session2.ref2 = Reflection(
0, 1, 0, P(0, 60, 0, 30, 0, -90), 12.39842 / 1.54, "ref2"
)
session2.bmatrix = ((4.07999, 0, 0), (0, 4.07999, 0), (0, 0, 4.07999))
session2.umatrix = ((1, 0, 0), (0, -1, 0), (0, 0, -1))
session2.ref1calchkl = (1, 0, 0) # Must match the guessed value!
session2.ref2calchkl = (0, 1, 0)
# sixc-0a : fixed omega = 0
c = CalculationScenario("sixc-0a", "sixc", "0", 12.39842 / 1.54, "4cBeq", 1)
c.alpha = 0
c.gamma = 0
c.w = 0
# c.hklList=((0.7, 0.9, 1.3), (1,0,0), (0,1,0), (1, 1, 0))
c.hklList = ((0.7, 0.9, 1.3),)
c.posList.append(
P(0.000000, 119.669750, 0.000000, 59.834875, -48.747500, 307.874983651098)
)
# c.posList.append(P(0.000000, 60.000000, 0.000000, 30.000, 0.000000, 0.000000))
# c.posList.append(P(0.000000, 60.000000, 0.000000, 30.000, 0.000000, -90.0000))
# c.posList.append(P(0.000000, 90.000000, 0.000000, 45.000, 0.000000, -45.0000))
session2.calculations.append(c)
############################ SESSION3 ############################
# AngleCalc scenarios from SPEC sixc. using crystal and alignment
session3 = SessionScenario()
session3.name = "spec_sixc_b16_270608"
session3.lattice = (3.8401, 3.8401, 5.43072, 90, 90, 90)
session3.bmatrix = ((1.636204, 0, 0), (0, 1.636204, 0), (0, 0, 1.156971))
session3.umatrix = (
(0.997161, -0.062217, 0.042420),
(0.062542, 0.998022, -0.006371),
(-0.041940, 0.009006, 0.999080),
)
session3.ref1 = Reflection(
1,
0,
1.0628,
P(5.000, 22.790, 0.000, 1.552, 22.400, 14.255),
12.39842 / 1.24,
"ref1",
)
session3.ref2 = Reflection(
0,
1,
1.0628,
P(5.000, 22.790, 0.000, 4.575, 24.275, 101.320),
12.39842 / 1.24,
"ref2",
)
session3.ref1calchkl = (1, 0, 1.0628)
session3.ref2calchkl = (-0.0329, 1.0114, 1.04)
# sixc-0a : fixed omega = 0
ac = CalculationScenario("sixc-0a", "sixc", "0", 12.39842 / 1.24, "4cBeq", 1)
ac.alpha = 0
ac.gamma = 0
ac.w = 0
### with 'omega_low':-90, 'omega_high':270, 'phi_low':-180, 'phi_high':180
ac.hklList = []
ac.hklList.append((0.7, 0.9, 1.3))
ac.posList.append(P(0.0, 27.352179, 0.000000, 13.676090, 37.774500, 53.965500))
ac.paramList.append(
{
"Bin": 8.3284,
"Bout": 8.3284,
"rho": 36.5258,
"eta": 0.1117,
"twotheta": 27.3557,
}
)
ac.hklList.append((1, 0, 0))
ac.posList.append(P(0.0, 18.580230, 0.000000, 9.290115, -2.403500, 3.589000))
ac.paramList.append(
{
"Bin": -0.3880,
"Bout": -0.3880,
"rho": -2.3721,
"eta": -0.0089,
"twotheta": 18.5826,
}
)
ac.hklList.append((0, 1, 0))
ac.posList.append(P(0.0, 18.580230, 0.000000, 9.290115, 0.516000, 93.567000))
ac.paramList.append(
{
"Bin": 0.0833,
"Bout": 0.0833,
"rho": 0.5092,
"eta": -0.0414,
"twotheta": 18.5826,
}
)
ac.hklList.append((1, 1, 0))
ac.posList.append(P(0.0, 26.394192, 0.000000, 13.197096, -1.334500, 48.602000))
ac.paramList.append(
{
"Bin": -0.3047,
"Bout": -0.3047,
"rho": -1.2992,
"eta": -0.0351,
"twotheta": 26.3976,
}
)
session3.calculations.append(ac)
############################ SESSION4 ############################
# test crystal
session4 = SessionScenario()
session4.name = "test_orth"
session4.lattice = (1.41421, 1.41421, 1.00000, 90, 90, 90)
session4.system = "Orthorhombic"
session4.bmatrix = ((4.44288, 0, 0), (0, 4.44288, 0), (0, 0, 6.28319))
session4.ref1 = Reflection(
0,
1,
2,
P(0.0000, 122.4938, 0.0000, 80.7181, 90.0000, -45.0000),
15.0,
"ref1",
)
session4.ref2 = Reflection(
1,
0,
2,
P(0.0000, 122.4938, 0.000, 61.2469, 70.5288, -45.0000),
15,
"ref2",
)
session4.ref3 = Reflection(
1,
0,
1,
P(0.0000, 60.8172, 0.000, 30.4086, 54.7356, -45.0000),
15,
"ref3",
)
session4.ref4 = Reflection(
1,
1,
2,
P(0.0000, 135.0736, 0.000, 67.5368, 63.4349, 0.0000),
15,
"ref4",
)
session4.reflist = (session4.ref1, session4.ref2, session4.ref3, session4.ref4)
session4.umatrix = (
(0.70711, 0.70711, 0.00),
(-0.70711, 0.70711, 0.00),
(0.00, 0.00, 1.00),
)
session4.ref1calchkl = (0, 1, 2) # Must match the guessed value!
session4.ref2calchkl = (1, 0, 2)
############################ SESSION5 ############################
# test crystal
session5 = SessionScenario()
session5.name = "Dalyite"
session5.lattice = (7.51, 7.73, 7.00, 106.0, 113.5, 99.5)
session5.system = "Triclinic"
session5.bmatrix = (
(0.96021, 0.27759, 0.49527),
(0, 0.84559, 0.25738),
(0, 0, 0.89760),
)
session5.ref1 = Reflection(
0,
1,
2,
P(0.0000, 23.7405, 0.0000, 11.8703, 46.3100, 43.1304),
12.3984,
"ref1",
)
session5.ref2 = Reflection(
1,
0,
3,
P(0.0000, 34.4282, 0.000, 17.2141, 46.4799, 12.7852),
12.3984,
"ref2",
)
session5.ref3 = Reflection(
2,
2,
6,
P(0.0000, 82.8618, 0.000, 41.4309, 41.5154, 26.9317),
12.3984,
"ref3",
)
session5.ref4 = Reflection(
4,
1,
4,
P(0.0000, 71.2763, 0.000, 35.6382, 29.5042, 14.5490),
12.3984,
"ref4",
)
session5.ref5 = Reflection(
8,
3,
1,
P(0.0000, 97.8850, 0.000, 48.9425, 5.6693, 16.7929),
12.3984,
"ref5",
)
session5.ref6 = Reflection(
6,
4,
5,
P(0.0000, 129.6412, 0.000, 64.8206, 24.1442, 24.6058),
12.3984,
"ref6",
)
session5.ref7 = Reflection(
3,
5,
7,
P(0.0000, 135.9159, 0.000, 67.9579, 34.3696, 35.1816),
12.3984,
"ref7",
)
session5.reflist = (
session5.ref1,
session5.ref2,
session5.ref3,
session5.ref4,
session5.ref5,
session5.ref6,
session5.ref7,
)
session5.umatrix = (
(0.99982, 0.00073, 0.01903),
(0.00073, 0.99710, -0.07612),
(-0.01903, 0.07612, 0.99692),
)
session5.ref1calchkl = (0, 1, 2) # Must match the guessed value!
session5.ref2calchkl = (1, 0, 3)
############################ SESSION6 ############################
# test crystal
session6 = SessionScenario()
session6.name = "Acanthite"
session6.lattice = (4.229, 6.931, 7.862, 90, 99.61, 90)
session6.system = "Monoclinic"
session6.bmatrix = (
(1.50688, 0.00000, 0.13532),
(0.00000, 0.90653, 0.00000),
(0.00000, 0.00000, 0.79918),
)
session6.ref1 = Reflection(
0,
1,
2,
P(0.0000, 21.1188, 0.0000, 10.5594, 59.6447, 61.8432),
10.0,
"ref1",
)
session6.ref2 = Reflection(
1,
0,
3,
P(0.0000, 35.2291, 0.000, 62.4207, 87.1516, -90.0452),
10.0,
"ref2",
)
session6.ref3 = Reflection(
1,
1,
6,
P(0.0000, 64.4264, 0.000, 63.9009, 97.7940, -88.8808),
10.0,
"ref3",
)
session6.ref4 = Reflection(
1,
2,
2,
P(0.0000, 34.4369, 0.000, 72.4159, 60.1129, -29.0329),
10.0,
"ref4",
)
session6.ref5 = Reflection(
2,
2,
1,
P(0.0000, 43.0718, 0.000, 21.5359, 8.3873, 29.0230),
10.0,
"ref5",
)
session6.reflist = (
session6.ref1,
session6.ref2,
session6.ref3,
session6.ref4,
session6.ref5,
)
session6.umatrix = (
(0.99411, 0.00079, 0.10835),
(0.00460, 0.99876, -0.04949),
(-0.10825, 0.04969, 0.99288),
)
session6.ref1calchkl = (0, 1, 2) # Must match the guessed value!
session6.ref2calchkl = (1, 0, 3)
########################################################################
return (session1, session2, session3, session4, session5, session6)
|
[
"math.radians",
"diffcalc.hkl.geometry.Position",
"math.asin",
"math.atan2"
] |
[((1020, 1085), 'diffcalc.hkl.geometry.Position', 'Position', ([], {'mu': 'mu', 'delta': 'delta', 'nu': 'gamma', 'eta': 'eta', 'chi': 'chi', 'phi': 'phi'}), '(mu=mu, delta=delta, nu=gamma, eta=eta, chi=chi, phi=phi)\n', (1028, 1085), False, 'from diffcalc.hkl.geometry import Position\n'), ((2050, 2125), 'diffcalc.hkl.geometry.Position', 'Position', ([], {'mu': 'alpha', 'delta': 'pos_delta', 'nu': 'pos_nu', 'eta': 'omega', 'chi': 'chi', 'phi': 'phi'}), '(mu=alpha, delta=pos_delta, nu=pos_nu, eta=omega, chi=chi, phi=phi)\n', (2058, 2125), False, 'from diffcalc.hkl.geometry import Position\n'), ((1326, 1340), 'math.radians', 'radians', (['alpha'], {}), '(alpha)\n', (1333, 1340), False, 'from math import asin, atan2, cos, degrees, radians, sin\n'), ((1362, 1376), 'math.radians', 'radians', (['alpha'], {}), '(alpha)\n', (1369, 1376), False, 'from math import asin, atan2, cos, degrees, radians, sin\n'), ((1398, 1412), 'math.radians', 'radians', (['delta'], {}), '(delta)\n', (1405, 1412), False, 'from math import asin, atan2, cos, degrees, radians, sin\n'), ((1434, 1448), 'math.radians', 'radians', (['delta'], {}), '(delta)\n', (1441, 1448), False, 'from math import asin, atan2, cos, degrees, radians, sin\n'), ((1470, 1484), 'math.radians', 'radians', (['gamma'], {}), '(gamma)\n', (1477, 1484), False, 'from math import asin, atan2, cos, degrees, radians, sin\n'), ((1506, 1520), 'math.radians', 'radians', (['gamma'], {}), '(gamma)\n', (1513, 1520), False, 'from math import asin, atan2, cos, degrees, radians, sin\n'), ((1547, 1574), 'math.asin', 'asin', (['(sin_delta * cos_gamma)'], {}), '(sin_delta * cos_gamma)\n', (1551, 1574), False, 'from math import asin, atan2, cos, degrees, radians, sin\n'), ((1847, 1991), 'math.atan2', 'atan2', (['(sgn * (cos_delta * cos_gamma * sin_alpha + cos_alpha * sin_gamma))', '(sgn * (cos_delta * cos_gamma * cos_alpha - sin_alpha * sin_gamma))'], {}), '(sgn * (cos_delta * cos_gamma * sin_alpha + cos_alpha * sin_gamma), \n sgn * (cos_delta * cos_gamma * cos_alpha - sin_alpha * sin_gamma))\n', (1852, 1991), False, 'from math import asin, atan2, cos, degrees, radians, sin\n'), ((1796, 1814), 'math.radians', 'radians', (['pos_delta'], {}), '(pos_delta)\n', (1803, 1814), False, 'from math import asin, atan2, cos, degrees, radians, sin\n')]
|
'''
Functions to compute fast distance covariance using mergesort.
'''
import warnings
from numba import float64, int64, boolean
import numba
import numpy as np
from ._utils import CompileMode, _transform_to_2d
def _compute_weight_sums(y, weights):
n_samples = len(y)
weight_sums = np.zeros((n_samples,) + weights.shape[1:], dtype=y.dtype)
# Buffer that contains the indexes of the current and
# last iterations
indexes = np.arange(2 * n_samples).reshape((2, n_samples))
indexes[1] = 0 # Remove this
previous_indexes = indexes[0]
current_indexes = indexes[1]
weights_cumsum = np.zeros(
(n_samples + 1,) + weights.shape[1:], dtype=weights.dtype)
merged_subarray_len = 1
# For all lengths that are a power of two
while merged_subarray_len < n_samples:
gap = 2 * merged_subarray_len
indexes_idx = 0
# Numba does not support axis, nor out parameter.
for var in range(weights.shape[1]):
weights_cumsum[1:, var] = np.cumsum(
weights[previous_indexes, var])
# Select the subarrays in pairs
for subarray_pair_idx in range(0, n_samples, gap):
subarray_1_idx = subarray_pair_idx
subarray_2_idx = subarray_pair_idx + merged_subarray_len
subarray_1_idx_last = min(
subarray_1_idx + merged_subarray_len - 1, n_samples - 1)
subarray_2_idx_last = min(
subarray_2_idx + merged_subarray_len - 1, n_samples - 1)
# Merge the subarrays
while (subarray_1_idx <= subarray_1_idx_last and
subarray_2_idx <= subarray_2_idx_last):
previous_index_1 = previous_indexes[subarray_1_idx]
previous_index_2 = previous_indexes[subarray_2_idx]
if y[previous_index_1].item() >= y[previous_index_2].item():
current_indexes[indexes_idx] = previous_index_1
subarray_1_idx += 1
else:
current_indexes[indexes_idx] = previous_index_2
subarray_2_idx += 1
weight_sums[previous_index_2] += (
weights_cumsum[subarray_1_idx_last + 1] -
weights_cumsum[subarray_1_idx])
indexes_idx += 1
# Join the remaining elements of one of the arrays (already sorted)
if subarray_1_idx <= subarray_1_idx_last:
n_remaining = subarray_1_idx_last - subarray_1_idx + 1
indexes_idx_next = indexes_idx + n_remaining
current_indexes[indexes_idx:indexes_idx_next] = (
previous_indexes[subarray_1_idx:subarray_1_idx_last + 1])
indexes_idx = indexes_idx_next
elif subarray_2_idx <= subarray_2_idx_last:
n_remaining = subarray_2_idx_last - subarray_2_idx + 1
indexes_idx_next = indexes_idx + n_remaining
current_indexes[indexes_idx:indexes_idx_next] = (
previous_indexes[subarray_2_idx:subarray_2_idx_last + 1])
indexes_idx = indexes_idx_next
merged_subarray_len = gap
# Swap buffer
previous_indexes, current_indexes = (current_indexes, previous_indexes)
return weight_sums
_compute_weight_sums_compiled = numba.njit(
float64[:, :](float64[:, :], float64[:, :]),
cache=True)(_compute_weight_sums)
def _generate_compute_aijbij_term(compiled):
def _compute_aijbij_term(x, y):
compute_weight_sums = (_compute_weight_sums_compiled
if compiled else _compute_weight_sums)
# x must be sorted
n = len(x)
weights = np.hstack((np.ones_like(y), y, x, x * y))
weight_sums = compute_weight_sums(y, weights)
x = x.ravel()
y = y.ravel()
term_1 = (x * y).T @ weight_sums[:, 0].ravel()
term_2 = x.T @ weight_sums[:, 1].ravel()
term_3 = y.T @ weight_sums[:, 2].ravel()
term_4 = np.sum(weight_sums[:, 3])
# First term in the equation
sums_term = term_1 - term_2 - term_3 + term_4
# Second term in the equation
sum_x = np.sum(x)
sum_y = np.sum(y)
cov_term = n * x.T @ y - np.sum(sum_x * y + sum_y * x) + sum_x * sum_y
d = 4 * sums_term - 2 * cov_term
return d.item()
return _compute_aijbij_term
_compute_aijbij_term = _generate_compute_aijbij_term(compiled=False)
_compute_aijbij_term_compiled = numba.njit(
float64(float64[:, :], float64[:, :]),
cache=True)(
_generate_compute_aijbij_term(compiled=True))
def _compute_row_sums(x):
# x must be sorted
x = x.ravel()
n_samples = len(x)
term_1 = (2 * np.arange(1, n_samples + 1) - n_samples) * x
sums = np.cumsum(x)
term_2 = sums[-1] - 2 * sums
return term_1 + term_2
_compute_row_sums_compiled = numba.njit(
float64[:](float64[:]),
cache=True)(_compute_row_sums)
def _generate_distance_covariance_sqr_mergesort_generic_impl(
compiled):
def _distance_covariance_sqr_mergesort_generic_impl(x, y, unbiased):
compute_aijbij_term = (_compute_aijbij_term_compiled
if compiled else _compute_aijbij_term)
compute_row_sums = (_compute_row_sums_compiled if compiled
else _compute_row_sums)
n = len(x)
# Sort x in ascending order
ordered_indexes = np.argsort(x.ravel())
x = x[ordered_indexes]
y = y[ordered_indexes]
aijbij = compute_aijbij_term(x, y)
a_i = compute_row_sums(x.ravel())
ordered_indexes_y = np.argsort(y.ravel())
b_i_perm = compute_row_sums(y.ravel()[ordered_indexes_y])
b_i = np.empty_like(b_i_perm)
b_i[ordered_indexes_y] = b_i_perm
a_dot_dot = np.sum(a_i)
b_dot_dot = np.sum(b_i)
sum_ab = a_i.ravel().T @ b_i.ravel()
if unbiased:
d3 = (n - 3)
d2 = (n - 2)
d1 = (n - 1)
else:
d3 = d2 = d1 = n
d_cov = (aijbij / n / d3 - 2 * sum_ab / n / d2 / d3 +
a_dot_dot / n * b_dot_dot / d1 / d2 / d3)
return d_cov
return _distance_covariance_sqr_mergesort_generic_impl
_distance_covariance_sqr_mergesort_generic_impl = (
_generate_distance_covariance_sqr_mergesort_generic_impl(
compiled=False))
_distance_covariance_sqr_mergesort_generic_impl_compiled = numba.njit(
float64(float64[:, :], float64[:, :], boolean),
cache=True)(
_generate_distance_covariance_sqr_mergesort_generic_impl(
compiled=True))
impls_dict = {
CompileMode.AUTO: (
_distance_covariance_sqr_mergesort_generic_impl_compiled,
_distance_covariance_sqr_mergesort_generic_impl),
CompileMode.NO_COMPILE: (_distance_covariance_sqr_mergesort_generic_impl,),
CompileMode.COMPILE_CPU: (
_distance_covariance_sqr_mergesort_generic_impl_compiled,)
}
def _distance_covariance_sqr_mergesort_generic(x, y,
*, exponent=1, unbiased=False,
compile_mode=CompileMode.AUTO):
if exponent != 1:
raise ValueError(f"Exponent should be 1 but is {exponent} instead.")
x = _transform_to_2d(x)
y = _transform_to_2d(y)
if compile_mode not in (CompileMode.AUTO, CompileMode.COMPILE_CPU,
CompileMode.NO_COMPILE):
return NotImplementedError(
f"Compile mode {compile_mode} not implemented.")
for impl in impls_dict[compile_mode]:
try:
return impl(x, y,
unbiased)
except TypeError as e:
if compile_mode is not CompileMode.AUTO:
raise e
warnings.warn(f"Falling back to uncompiled MERGESORT fast "
f"distance covariance because of TypeError "
f"exception raised: {e}. Rembember: only floating "
f"point values can be used in the compiled "
f"implementations.")
|
[
"numpy.sum",
"numpy.ones_like",
"numpy.empty_like",
"numpy.zeros",
"numpy.cumsum",
"numpy.arange",
"numba.float64",
"warnings.warn"
] |
[((298, 355), 'numpy.zeros', 'np.zeros', (['((n_samples,) + weights.shape[1:])'], {'dtype': 'y.dtype'}), '((n_samples,) + weights.shape[1:], dtype=y.dtype)\n', (306, 355), True, 'import numpy as np\n'), ((624, 691), 'numpy.zeros', 'np.zeros', (['((n_samples + 1,) + weights.shape[1:])'], {'dtype': 'weights.dtype'}), '((n_samples + 1,) + weights.shape[1:], dtype=weights.dtype)\n', (632, 691), True, 'import numpy as np\n'), ((4847, 4859), 'numpy.cumsum', 'np.cumsum', (['x'], {}), '(x)\n', (4856, 4859), True, 'import numpy as np\n'), ((4065, 4090), 'numpy.sum', 'np.sum', (['weight_sums[:, 3]'], {}), '(weight_sums[:, 3])\n', (4071, 4090), True, 'import numpy as np\n'), ((4238, 4247), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (4244, 4247), True, 'import numpy as np\n'), ((4264, 4273), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (4270, 4273), True, 'import numpy as np\n'), ((4572, 4609), 'numba.float64', 'float64', (['float64[:, :]', 'float64[:, :]'], {}), '(float64[:, :], float64[:, :])\n', (4579, 4609), False, 'from numba import float64, int64, boolean\n'), ((5820, 5843), 'numpy.empty_like', 'np.empty_like', (['b_i_perm'], {}), '(b_i_perm)\n', (5833, 5843), True, 'import numpy as np\n'), ((5907, 5918), 'numpy.sum', 'np.sum', (['a_i'], {}), '(a_i)\n', (5913, 5918), True, 'import numpy as np\n'), ((5939, 5950), 'numpy.sum', 'np.sum', (['b_i'], {}), '(b_i)\n', (5945, 5950), True, 'import numpy as np\n'), ((6557, 6603), 'numba.float64', 'float64', (['float64[:, :]', 'float64[:, :]', 'boolean'], {}), '(float64[:, :], float64[:, :], boolean)\n', (6564, 6603), False, 'from numba import float64, int64, boolean\n'), ((451, 475), 'numpy.arange', 'np.arange', (['(2 * n_samples)'], {}), '(2 * n_samples)\n', (460, 475), True, 'import numpy as np\n'), ((1023, 1064), 'numpy.cumsum', 'np.cumsum', (['weights[previous_indexes, var]'], {}), '(weights[previous_indexes, var])\n', (1032, 1064), True, 'import numpy as np\n'), ((3764, 3779), 'numpy.ones_like', 'np.ones_like', (['y'], {}), '(y)\n', (3776, 3779), True, 'import numpy as np\n'), ((4307, 4336), 'numpy.sum', 'np.sum', (['(sum_x * y + sum_y * x)'], {}), '(sum_x * y + sum_y * x)\n', (4313, 4336), True, 'import numpy as np\n'), ((4790, 4817), 'numpy.arange', 'np.arange', (['(1)', '(n_samples + 1)'], {}), '(1, n_samples + 1)\n', (4799, 4817), True, 'import numpy as np\n'), ((7888, 8104), 'warnings.warn', 'warnings.warn', (['f"""Falling back to uncompiled MERGESORT fast distance covariance because of TypeError exception raised: {e}. Rembember: only floating point values can be used in the compiled implementations."""'], {}), "(\n f'Falling back to uncompiled MERGESORT fast distance covariance because of TypeError exception raised: {e}. Rembember: only floating point values can be used in the compiled implementations.'\n )\n", (7901, 8104), False, 'import warnings\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2014-10-28 04:41:23
# @Last Modified by: marinheiro
# @Last Modified time: 2014-12-08 23:30:01
"""
Auxiliary functions to convert between different rotation representations.
"""
import numpy
import numpy.linalg
import scipy
import math
# Axis-Angle <-> Log Conversion
def axis_angle_to_log(n, theta):
"""Converts from the axis-angle representation to the log representation
"""
return n*theta
def log_to_axis_angle(w):
"""OI
"""
theta = numpy.linalg.norm(w)
n = numpy.zeros((3,))
if theta != 0.0:
n = w/theta
return (n, theta)
# Quaternion <-> Axis-Angle conversion
def quaternion_to_axis_angle(quat):
"""OI
"""
theta = 2.0*math.atan2(numpy.linalg.norm(quat[1:]), quat[0])
n = numpy.zeros((3,1))
if theta != 0.0:
n = quat[1:]/math.sin(theta/2)
return (n, theta)
def axis_angle_to_quaternion(n, theta):
"""OI
"""
c = math.cos(theta/2)
s = math.sin(theta/2)
quat = numpy.zeros((4,1))
quat[0] = c
quat[1:] = n*s
return quat
# Matrix <-> Quaternion conversion
def matrix_to_quaternion(rot):
"""OI
"""
s = math.sqrt(numpy.trace(rot) + 1.0)/2
quat = numpy.array([[s],
[(rot[2, 1]-rot[1, 2])/(4*s)],
[(rot[0, 2]-rot[2, 0])/(4*s)],
[(rot[1, 0]-rot[0, 1])/(4*s)],
])
return quat
def quaternion_to_matrix(quat):
"""OI
"""
qw = quat[0][0]
qx = quat[1][0]
qy = quat[2][0]
qz = quat[3][0]
rot = numpy.array([[1 - 2*qy*qy - 2*qz*qz, 2*qx*qy - 2*qz*qw, 2*qx*qz + 2*qy*qw],
[2*qx*qy + 2*qz*qw, 1 - 2*qx*qx - 2*qz*qz, 2*qy*qz - 2*qx*qw],
[2*qx*qz - 2*qy*qw, 2*qy*qz + 2*qx*qw, 1 - 2*qx*qx - 2*qy*qy]])
return rot
# Matrix <-> Axis-Angle conversion
def matrix_to_axis_angle(rot):
"""OI
"""
return quaternion_to_axis_angle(matrix_to_quaternion(rot))
def axis_angle_to_matrix(n, theta):
"""OI
"""
# print n.shape, theta
return quaternion_to_matrix(axis_angle_to_quaternion(n, theta))
|
[
"numpy.trace",
"numpy.zeros",
"math.sin",
"numpy.linalg.norm",
"math.cos",
"numpy.array"
] |
[((525, 545), 'numpy.linalg.norm', 'numpy.linalg.norm', (['w'], {}), '(w)\n', (542, 545), False, 'import numpy\n'), ((551, 568), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {}), '((3,))\n', (562, 568), False, 'import numpy\n'), ((776, 795), 'numpy.zeros', 'numpy.zeros', (['(3, 1)'], {}), '((3, 1))\n', (787, 795), False, 'import numpy\n'), ((924, 943), 'math.cos', 'math.cos', (['(theta / 2)'], {}), '(theta / 2)\n', (932, 943), False, 'import math\n'), ((947, 966), 'math.sin', 'math.sin', (['(theta / 2)'], {}), '(theta / 2)\n', (955, 966), False, 'import math\n'), ((973, 992), 'numpy.zeros', 'numpy.zeros', (['(4, 1)'], {}), '((4, 1))\n', (984, 992), False, 'import numpy\n'), ((1165, 1298), 'numpy.array', 'numpy.array', (['[[s], [(rot[2, 1] - rot[1, 2]) / (4 * s)], [(rot[0, 2] - rot[2, 0]) / (4 *\n s)], [(rot[1, 0] - rot[0, 1]) / (4 * s)]]'], {}), '([[s], [(rot[2, 1] - rot[1, 2]) / (4 * s)], [(rot[0, 2] - rot[2,\n 0]) / (4 * s)], [(rot[1, 0] - rot[0, 1]) / (4 * s)]])\n', (1176, 1298), False, 'import numpy\n'), ((1437, 1723), 'numpy.array', 'numpy.array', (['[[1 - 2 * qy * qy - 2 * qz * qz, 2 * qx * qy - 2 * qz * qw, 2 * qx * qz + 2 *\n qy * qw], [2 * qx * qy + 2 * qz * qw, 1 - 2 * qx * qx - 2 * qz * qz, 2 *\n qy * qz - 2 * qx * qw], [2 * qx * qz - 2 * qy * qw, 2 * qy * qz + 2 *\n qx * qw, 1 - 2 * qx * qx - 2 * qy * qy]]'], {}), '([[1 - 2 * qy * qy - 2 * qz * qz, 2 * qx * qy - 2 * qz * qw, 2 *\n qx * qz + 2 * qy * qw], [2 * qx * qy + 2 * qz * qw, 1 - 2 * qx * qx - 2 *\n qz * qz, 2 * qy * qz - 2 * qx * qw], [2 * qx * qz - 2 * qy * qw, 2 * qy *\n qz + 2 * qx * qw, 1 - 2 * qx * qx - 2 * qy * qy]])\n', (1448, 1723), False, 'import numpy\n'), ((733, 760), 'numpy.linalg.norm', 'numpy.linalg.norm', (['quat[1:]'], {}), '(quat[1:])\n', (750, 760), False, 'import numpy\n'), ((828, 847), 'math.sin', 'math.sin', (['(theta / 2)'], {}), '(theta / 2)\n', (836, 847), False, 'import math\n'), ((1131, 1147), 'numpy.trace', 'numpy.trace', (['rot'], {}), '(rot)\n', (1142, 1147), False, 'import numpy\n')]
|
from abc import ABCMeta, abstractmethod
from typing import Optional, Dict, List, Tuple
import re
from .base import (
HTTPClient,
IParser,
APIResponseType,
ILoginFetcher,
ISemesterFetcher,
ResourceData,
ErrorData,
ParserPrecondition,
SemesterData,
)
from ..reqeust import Response
from ..exceptions import ParsingError
from .common import (
httpdate_to_unixtime,
extract_alerts,
extract_hidden_tags,
urlencode,
parse_table,
)
__all__ = (
"IParserPrecondition",
"Login",
"StudentPhoto",
"Chapel",
"Timetable",
"Course",
)
DOMAIN_NAME: str = "https://kbuis.bible.ac.kr" # with protocol
_SEMESTER_KEY: str = "ctl00$ContentPlaceHolder1$cbo_YearHg"
class IParserPrecondition(metaclass=ABCMeta):
@staticmethod
@abstractmethod
def is_blocking(response: Response) -> Optional[ErrorData]:
""" 진행할 수 없는 사전조건인 경우 ErrorData, 그렇지 않은 경우 None """
pass
_ParserPrecondition = ParserPrecondition(IParserPrecondition)
class _SessionExpiredChecker(IParserPrecondition):
@staticmethod
def is_blocking(response: Response) -> Optional[ErrorData]:
alerts = extract_alerts(response.soup)
for alert in alerts:
if "세션" in alert or "수업평가" in alert:
return ErrorData(
error={"title": alert, "alert_messages": alerts}, link=response.url
)
return None
def _extract_semester(response: Response) -> SemesterData:
select_tag = response.soup.find("select", attrs={"name": _SEMESTER_KEY})
if not select_tag:
raise ParsingError("학기 셀렉트 태그를 찾을 수 없습니다.", response)
options = select_tag.find_all("option", selected=True)
if not options:
raise ParsingError("학기 옵션 태그를 찾을 수 없습니다.", response)
try:
selectables: List[str] = [
opt.attrs["value"] for opt in select_tag.find_all("option")
]
selected: str = select_tag.find("option", selected=True).attrs["value"]
except (KeyError, AttributeError):
raise ParsingError("학기 옵션 태그를 정상적으로 선택할 수 없습니다.", response)
return SemesterData(selected=selected, selectable=selectables)
async def _post_with_semester(
url,
cookies: Dict[str, str],
semester: Optional[str] = None,
*,
headers: Optional[Dict[str, str]] = None,
timeout: Optional[float] = None,
**kwargs,
) -> Response:
""" 인트라넷에서 특정 학기의 정보 조회를 위한 메서드
특정 학기 조회를 위해서는 POST 메서드로 정보를 전송해야하는데, 그 전에 hidden 태그를 함께 보내야함.
1. GET 요청, 해당 페이지를 불러와서 form hidden-tag 의 (name,key) 쌍을 얻는다.
- 여기서 얻는 정보는 학교에서 미리 지정해놓은터 학기, 일반적으로 최신 학기
2. POST 요청, hidden-tag와 학기를 body에 담아 전송한다.
"""
response = await HTTPClient.connector.get(
url, cookies=cookies, headers=headers, timeout=timeout, **kwargs
)
if _SessionExpiredChecker.is_blocking(response):
return response
semester_info: SemesterData = _extract_semester(response)
if (
semester
and semester != semester_info.selected
and semester in semester_info.selectable
):
body = extract_hidden_tags(response.soup)
body[_SEMESTER_KEY] = semester
body["ctl00$ContentPlaceHolder1$hidActionMode"] = "S"
response = await HTTPClient.connector.post(
url, body=body, cookies=cookies, headers=headers, timeout=timeout, **kwargs
)
semester_info: SemesterData = _extract_semester(response)
response.etc["semester"] = semester_info
return response
class Login(ILoginFetcher, IParser):
# TODO: URL 변경 유의
URL: str = DOMAIN_NAME + "/ble_login2.aspx"
@classmethod
async def fetch(
cls,
user_id: str,
user_pw: str,
*,
headers: Optional[Dict[str, str]] = None,
timeout: Optional[float] = None,
**kwargs,
) -> Response:
form = {"Txt_1": user_id, "Txt_2": user_pw, "use_type": "2"}
return await HTTPClient.connector.post(
cls.URL, headers=headers, body=form, timeout=timeout, **kwargs
)
@classmethod
def parse(cls, response: Response) -> APIResponseType:
"""
로그인 성공: status 302, location header 포함, 리다이렉트 메시지를 body에 포함
로그인 실패: status 200, location header 미포함, alert 메시지룰 body에 포함
"""
# Login 성공
if response.status == 302:
iat = httpdate_to_unixtime(response.headers["date"])
return ResourceData(
data={"cookies": response.cookies, "iat": iat}, link=response.url
)
# TODO: 현 인트라넷 서버 과부하 상황이 없애지면 더 자세한 조건 추가할 예정
# Login 실패: 인트라넷 서버 과부하
elif response.status == 503:
return ErrorData(
error={
"title": response.soup.find("h2").get_text(),
"error_message": response.soup.find("p").get_text()
},
link=response.url
)
# Login 실패: Common 한 오류
else:
alerts: List[str] = extract_alerts(response.soup)
alert = alerts[0] if alerts else ""
return ErrorData(
error={"title": alert, "alert_messages": alerts}, link=response.url
)
class StudentPhoto(IParser):
URL: str = DOMAIN_NAME + "/SchoolRegMng/SR015.aspx"
@classmethod
async def fetch(
cls,
cookies: Dict[str, str],
sid: str,
*,
headers: Optional[Dict[str, str]] = None,
timeout: Optional[float] = None,
**kwargs,
) -> Response:
query: Dict[str, str] = {"schNo": sid}
query_string = urlencode(query)
url = f"{cls.URL}?{query_string}"
return await HTTPClient.connector.get(
url, cookies=cookies, headers=headers, timeout=timeout, **kwargs
)
@classmethod
@_ParserPrecondition
def parse(cls, response: Response) -> APIResponseType:
"""
사진을 불러온 경우:
headers= {'transfer-encoding': 'chunked', 'content-type': 'image/jpeg', 'content-disposition': 'attachment;filename=image.jpeg'}
사진을 불러오지 못한 경우:
headers= {'transfer-encoding': 없음, 'content-type': 'text/html; charset=ks_c_5601-1987', 'content-disposition': 없음}
"""
if response.headers["content-type"][:5] == "image":
return ResourceData(data={"raw_image": response.raw}, link=response.url)
else:
return ErrorData(error={"title": "이미지를 불러올 수 없습니다."}, link=response.url)
class Chapel(ISemesterFetcher, IParser):
URL: str = DOMAIN_NAME + "/StudentMng/SM050.aspx"
@classmethod
async def fetch(
cls,
cookies: Dict[str, str],
semester: Optional[str] = None,
*,
headers: Optional[Dict[str, str]] = None,
timeout: Optional[float] = None,
**kwargs,
) -> Response:
return await _post_with_semester(
cls.URL, cookies, semester, headers=headers, timeout=timeout, **kwargs
)
@classmethod
def _parse_summary(cls, response: Response) -> Dict[str, str]:
soup = response.soup
tbody = soup.find("tbody", attrs={"class": "viewbody"})
if not tbody:
raise ParsingError("채플 요약 테이블을 찾을 수 없습니다.", response)
summary: Dict[str, str] = {}
for th, td in zip(tbody.find_all("th"), tbody.find_all("td")):
key = th.get_text(strip=True)
value = td.get_text(strip=True)
day_count = re.search(r"\d+", value)
summary[key] = str(day_count.group()) if day_count else ""
return summary
@classmethod
def _parse_main_table(cls, response: Response) -> Tuple[List, List]:
soup = response.soup
thead = soup.find("thead", attrs={"class": "mhead"})
tbody = soup.find("tbody", attrs={"class": "mbody"})
return parse_table(response, thead, tbody)
@classmethod
@_ParserPrecondition
def parse(cls, response: Response) -> APIResponseType:
summary = cls._parse_summary(response)
head, body = cls._parse_main_table(response)
return ResourceData(
data={"summary": summary, "head": head, "body": body,},
link=response.url,
meta={
"selected": response.etc["semester"].selected,
"selectable": response.etc["semester"].selectable,
},
)
class Timetable(ISemesterFetcher, IParser):
URL: str = DOMAIN_NAME + "/GradeMng/GD160.aspx"
@classmethod
async def fetch(
cls,
cookies: Dict[str, str],
semester: Optional[str] = None,
*,
headers: Optional[Dict[str, str]] = None,
timeout: Optional[float] = None,
**kwargs,
) -> Response:
return await _post_with_semester(
cls.URL, cookies, semester, headers=headers, timeout=timeout, **kwargs
)
@staticmethod
def _parse_contents(td: str, response: Response) -> Tuple:
matching = re.match(
r"(.+)?\(([^(]*)?\)(\d{2}:\d{2})\s*~\s*([0-9:]{,5})", td
) or re.match(r"(.+)?()(\d{2}:\d{2})\s*~\s*([0-9:]{,5})", td)
if not matching:
ParsingError("시간표 상세정보를 해석할 수 없습니다.", response)
return matching.groups()
@classmethod
def _parse_main_table(cls, response: Response) -> Tuple[List, List]:
soup = response.soup
thead = soup.find("thead", attrs={"class": "mhead"})
tbody = soup.find("tbody", attrs={"class": "mbody"})
result = [[], [], [], [], []]
head, body = parse_table(response, thead, tbody)
for row in body:
for i, each in enumerate(row):
if each:
result[i].append(cls._parse_contents(each, response))
return head, result
@classmethod
@_ParserPrecondition
def parse(cls, response: Response) -> APIResponseType:
head, body = cls._parse_main_table(response)
return ResourceData(
data={"head": head, "body": body},
link=response.url,
meta={
"selected": response.etc["semester"].selected,
"selectable": response.etc["semester"].selectable,
},
)
class Course(ISemesterFetcher, IParser):
URL: str = DOMAIN_NAME + "/GradeMng/GD095.aspx"
@classmethod
async def fetch(
cls,
cookies: Dict[str, str],
semester: Optional[str] = None,
*,
headers: Optional[Dict[str, str]] = None,
timeout: Optional[float] = None,
**kwargs,
) -> Response:
return await _post_with_semester(
cls.URL, cookies, semester, headers=headers, timeout=timeout, **kwargs
)
@classmethod
def _parse_main_table(cls, response: Response) -> Tuple[List, List]:
soup = response.soup
thead = soup.find("thead", attrs={"class": "mhead"})
tbody = soup.find("tbody", attrs={"class": "mbody"})
return parse_table(response, thead, tbody)
@classmethod
@_ParserPrecondition
def parse(cls, response: Response) -> APIResponseType:
head, body = cls._parse_main_table(response)
return ResourceData(
data={"head": head, "body": body},
link=response.url,
meta={
"selected": response.etc["semester"].selected,
"selectable": response.etc["semester"].selectable,
},
)
|
[
"re.search",
"re.match"
] |
[((7462, 7486), 're.search', 're.search', (['"""\\\\d+"""', 'value'], {}), "('\\\\d+', value)\n", (7471, 7486), False, 'import re\n'), ((8979, 9050), 're.match', 're.match', (['"""(.+)?\\\\(([^(]*)?\\\\)(\\\\d{2}:\\\\d{2})\\\\s*~\\\\s*([0-9:]{,5})"""', 'td'], {}), "('(.+)?\\\\(([^(]*)?\\\\)(\\\\d{2}:\\\\d{2})\\\\s*~\\\\s*([0-9:]{,5})', td)\n", (8987, 9050), False, 'import re\n'), ((9071, 9130), 're.match', 're.match', (['"""(.+)?()(\\\\d{2}:\\\\d{2})\\\\s*~\\\\s*([0-9:]{,5})"""', 'td'], {}), "('(.+)?()(\\\\d{2}:\\\\d{2})\\\\s*~\\\\s*([0-9:]{,5})', td)\n", (9079, 9130), False, 'import re\n')]
|
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from .models import Profile
from django.dispatch import receiver
# Create your models here.
@receiver(post_save, sender=User)
def create_profile(sender,instance,created,**kwargs):
if created:
Profile.objects.create(person_of=instance)
print("profile created")
post_save.connect(create_profile,sender=User)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(person_of=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
|
[
"django.db.models.signals.post_save.connect",
"django.dispatch.receiver"
] |
[((185, 217), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'User'}), '(post_save, sender=User)\n', (193, 217), False, 'from django.dispatch import receiver\n'), ((372, 418), 'django.db.models.signals.post_save.connect', 'post_save.connect', (['create_profile'], {'sender': 'User'}), '(create_profile, sender=User)\n', (389, 418), False, 'from django.db.models.signals import post_save\n'), ((422, 454), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'User'}), '(post_save, sender=User)\n', (430, 454), False, 'from django.dispatch import receiver\n'), ((588, 620), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'User'}), '(post_save, sender=User)\n', (596, 620), False, 'from django.dispatch import receiver\n')]
|
from scipy.misc import imread,imresize,imsave
import os
path = '/home/zhang/tm/insightface_for_face_recognition-master/dataset/8631_align_train/'
out_path = '/home/zhang/tm/insightface_for_face_recognition-master/dataset/8631_112_align_train/'
img_lists = os.listdir(path)
for img_list in img_lists:
imgpaths = os.path.join(path,img_list)
out_imgpaths = os.path.join(out_path,img_list)
if not os.path.exists(out_imgpaths):
os.mkdir(out_imgpaths)
img_names = os.listdir(imgpaths)
for i in img_names:
img_name = os.path.join(imgpaths,i)
out_img_name = os.path.join(out_imgpaths,i)
img = imread(img_name)
img = imresize(img,(112,96))
imsave(out_img_name,img)
|
[
"scipy.misc.imsave",
"os.mkdir",
"os.path.exists",
"scipy.misc.imresize",
"os.path.join",
"os.listdir",
"scipy.misc.imread"
] |
[((257, 273), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (267, 273), False, 'import os\n'), ((318, 346), 'os.path.join', 'os.path.join', (['path', 'img_list'], {}), '(path, img_list)\n', (330, 346), False, 'import os\n'), ((365, 397), 'os.path.join', 'os.path.join', (['out_path', 'img_list'], {}), '(out_path, img_list)\n', (377, 397), False, 'import os\n'), ((485, 505), 'os.listdir', 'os.listdir', (['imgpaths'], {}), '(imgpaths)\n', (495, 505), False, 'import os\n'), ((408, 436), 'os.path.exists', 'os.path.exists', (['out_imgpaths'], {}), '(out_imgpaths)\n', (422, 436), False, 'import os\n'), ((446, 468), 'os.mkdir', 'os.mkdir', (['out_imgpaths'], {}), '(out_imgpaths)\n', (454, 468), False, 'import os\n'), ((549, 574), 'os.path.join', 'os.path.join', (['imgpaths', 'i'], {}), '(imgpaths, i)\n', (561, 574), False, 'import os\n'), ((597, 626), 'os.path.join', 'os.path.join', (['out_imgpaths', 'i'], {}), '(out_imgpaths, i)\n', (609, 626), False, 'import os\n'), ((640, 656), 'scipy.misc.imread', 'imread', (['img_name'], {}), '(img_name)\n', (646, 656), False, 'from scipy.misc import imread, imresize, imsave\n'), ((671, 695), 'scipy.misc.imresize', 'imresize', (['img', '(112, 96)'], {}), '(img, (112, 96))\n', (679, 695), False, 'from scipy.misc import imread, imresize, imsave\n'), ((702, 727), 'scipy.misc.imsave', 'imsave', (['out_img_name', 'img'], {}), '(out_img_name, img)\n', (708, 727), False, 'from scipy.misc import imread, imresize, imsave\n')]
|
import flappybird as fb
import random
import time
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
import numpy as np
import copy
SCALE_FACTOR = 200
class GeneticBrain(fb.Brain):
def __init__(self,n_input,n_hidden):
'''
self.model = Sequential()
self.model.add(Dense(n_hidden,activation='sigmoid',input_shape=(n_input,)))
self.model.add(Dense(1,activation='sigmoid'))
#print(self.getModel())
'''
self.model = NeuralNetwork([n_input,n_hidden],'logistic')
def decideFlap(self,params):
#print(params)
distance = params['distance'] + params['pipeWidth']
deltaHeight = (params['bottomPipeHeight'] + params['topPipeHeight'])/2 - params['height']
velY = params['velY']
data = [distance * SCALE_FACTOR, deltaHeight * SCALE_FACTOR]
pred = self.model.predict(data)
#print(pred)
return pred[0] > 0.5
def getModel(self):
return self.model.getWeights()
def setModel(self,weights):
self.model.setWeights(weights)
return True
class GeneticAlgorithm():
def __init__(self,max_units,top_units):
self.max_units = max_units
self.top_units = top_units
if max_units < top_units:
self.top_units = max_units
self.population = []
self.best_brain = None
def reset(self):
self.iteration = 1
self.mutateRate = 1
self.best_population = 0
self.best_fitness = 0
self.best_score = 0
def createPopulation(self):
self.population = []
for i in range(self.max_units):
newUnit = GeneticBrain(2,6)
newUnit.index = i
newUnit.fitness = 0
newUnit.score = 0
newUnit.isWinner = False
self.population.append(newUnit)
return self.population
def evolvePopulation(self,results):
winners = self.selection(results)
for w in winners:
print("%d: fitness = %f score = %d" %(w.index,w.fitness,w.score))
if self.mutateRate == 1 and winners[0].fitness < 0:
# all is bad
# create another population
print("recreate popultation")
return self.createPopulation()
else:
self.mutateRate = 0.2
if winners[0].fitness > self.best_fitness:
self.best_fitness = winners[0].fitness
self.best_score = winners[0].score
winners[0].model.save('best.h5')
for i in range(self.top_units,self.max_units):
if i == self.top_units:
parantA = winners[0].getModel()
parantB = winners[1].getModel()
offspring = self.crossOver(parantA,parantB)
elif i < self.max_units - 2:
parantA = self.getRandomUnit(winners).getModel()
parantB = self.getRandomUnit(winners).getModel()
offspring = self.crossOver(parantA,parantB)
else:
offspring = winners[0].getModel()
offspring = self.mutation(offspring)
newUnit = self.population[i]
newUnit.setModel(offspring)
newUnit.score = 0
newUnit.isWinner = False
return self.population
def selection(self,results):
for i in range(self.top_units):
self.population[results[i].index].isWinner = True
return results[:self.top_units]
def crossOver(self,parantA,parantB):
length = np.size(parantA[1],0)
cutPoint = random.randint(0,length-1)
for i in range(cutPoint,length):
tmp = parantA[1][0][i]
parantA[1][0][i] = parantB[1][0][i]
parantB[1][0][i] = tmp
if random.randint(0,1):
return parantA
else:
return parantB
def mutation(self,offspring):
for i in offspring[1]:
for bias in i:
bias = self.mutate(bias)
for i in offspring[0]:
for weight in i:
weight = self.mutate(weight)
return offspring
def mutate(self,gene):
if random.random() < self.mutateRate:
mutateFactor = 1 + (random.random() - 0.5) * 3 + (random.random() - 0.5)
gene *= mutateFactor
return gene
def getRandomUnit(self,array):
return array[random.randint(0,len(array)-1)]
def normalize(self,value,maxValue):
if value < -maxValue: value = -maxValue
elif value > maxValue: value = maxValue
return value/maxValue
def saveBestBird(self):
pass
import pygame
class PlayerBrain(fb.Brain): # 玩家大脑
def decideFlap(self,params):
#print(params)
return params['playerClick']
class HappyBrain(fb.Brain):
def __init__(self):
random.seed(2000)
def decideFlap(self,params):
#print(params)
pygame.event.get()
if params['height'] < 40:
return False
r = random.randint(0,1000)
return r > 940
def train():
bird_num = 10
GA = GeneticAlgorithm(bird_num,4)
GA.reset()
brains = GA.createPopulation()
#brains = [HappyBrain()] * bird_num
g = fb.FlappyBirdGame(30,bird_num,brains)
train_time = 200
for i in range(train_time):
g.run()
results = g.result()
print("Generation %d:" %(i))
sorted_brains = []
for r in results[::-1]:
b = r[0].brain
b.fitness = (r[1]['score']) * r[1]['interval'] - r[1]['distance']
b.score = r[1]['score']
sorted_brains.append(b)
brains = GA.evolvePopulation(sorted_brains)
print("best score = %d best fitness = %d" % (GA.best_score,GA.best_fitness))
g.reset(bird_num,brains)
GA.saveBestBird()
print("GA end!")
from simpleNeuralNetwork import NeuralNetwork
class simpleNNBrain(fb.Brain):
def __init__(self):
self.model = NeuralNetwork([2,6,1],'logistic')
print(self.model.getWeights())
def decideFlap(self,params):
distance = params['distance'] + params['pipeWidth']
deltaHeight = (params['bottomPipeHeight'] + params['topPipeHeight'])/2 - params['height']
velY = params['velY']
data = [distance * SCALE_FACTOR, deltaHeight * SCALE_FACTOR]
pred = self.model.predict(data)
#print(pred)
print(pred)
return pred[0] > 0.5
def train_test():
bird_num = 10
brains = []
for i in range(bird_num):
brains.append(simpleNNBrain())
g = fb.FlappyBirdGame(30,bird_num,brains)
for i in range(10):
g.run()
result = g.result()
brains = []
for i in range(bird_num):
brains.append(simpleNNBrain())
g.reset(10,brains)
if __name__ == '__main__':
train()
|
[
"numpy.size",
"random.randint",
"flappybird.FlappyBirdGame",
"pygame.event.get",
"random.random",
"random.seed",
"simpleNeuralNetwork.NeuralNetwork"
] |
[((5525, 5564), 'flappybird.FlappyBirdGame', 'fb.FlappyBirdGame', (['(30)', 'bird_num', 'brains'], {}), '(30, bird_num, brains)\n', (5542, 5564), True, 'import flappybird as fb\n'), ((6932, 6971), 'flappybird.FlappyBirdGame', 'fb.FlappyBirdGame', (['(30)', 'bird_num', 'brains'], {}), '(30, bird_num, brains)\n', (6949, 6971), True, 'import flappybird as fb\n'), ((529, 575), 'simpleNeuralNetwork.NeuralNetwork', 'NeuralNetwork', (['[n_input, n_hidden]', '"""logistic"""'], {}), "([n_input, n_hidden], 'logistic')\n", (542, 575), False, 'from simpleNeuralNetwork import NeuralNetwork\n'), ((3740, 3762), 'numpy.size', 'np.size', (['parantA[1]', '(0)'], {}), '(parantA[1], 0)\n', (3747, 3762), True, 'import numpy as np\n'), ((3781, 3810), 'random.randint', 'random.randint', (['(0)', '(length - 1)'], {}), '(0, length - 1)\n', (3795, 3810), False, 'import random\n'), ((4005, 4025), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (4019, 4025), False, 'import random\n'), ((5096, 5113), 'random.seed', 'random.seed', (['(2000)'], {}), '(2000)\n', (5107, 5113), False, 'import random\n'), ((5178, 5196), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (5194, 5196), False, 'import pygame\n'), ((5268, 5291), 'random.randint', 'random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (5282, 5291), False, 'import random\n'), ((6311, 6347), 'simpleNeuralNetwork.NeuralNetwork', 'NeuralNetwork', (['[2, 6, 1]', '"""logistic"""'], {}), "([2, 6, 1], 'logistic')\n", (6324, 6347), False, 'from simpleNeuralNetwork import NeuralNetwork\n'), ((4396, 4411), 'random.random', 'random.random', ([], {}), '()\n', (4409, 4411), False, 'import random\n'), ((4493, 4508), 'random.random', 'random.random', ([], {}), '()\n', (4506, 4508), False, 'import random\n'), ((4463, 4478), 'random.random', 'random.random', ([], {}), '()\n', (4476, 4478), False, 'import random\n')]
|
import matplotlib.pyplot as plt
import pandas as pd
#Data from source
stockData = './stock_market_data-AAPL'
df = pd.read_csv (stockData+".csv")
# Sort DataFrame by date
df = df.sort_values('Date')
# Gets all of the rows
df.head()
#Plots figure
plt.figure(figsize = (18,9))
plt.plot(range(df.shape[0]),(df['Low']+df['High'])/2.0)
plt.xticks(range(0,df.shape[0],500),df['Date'].loc[::500],rotation=45)
plt.title(stockData.replace("./stock_market_data-", ""),fontsize=18)
plt.xlabel('Date',fontsize=18)
plt.ylabel('Mid Price',fontsize=18)
plt.show()
|
[
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((116, 147), 'pandas.read_csv', 'pd.read_csv', (["(stockData + '.csv')"], {}), "(stockData + '.csv')\n", (127, 147), True, 'import pandas as pd\n'), ((250, 277), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 9)'}), '(figsize=(18, 9))\n', (260, 277), True, 'import matplotlib.pyplot as plt\n'), ((475, 506), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {'fontsize': '(18)'}), "('Date', fontsize=18)\n", (485, 506), True, 'import matplotlib.pyplot as plt\n'), ((506, 542), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mid Price"""'], {'fontsize': '(18)'}), "('Mid Price', fontsize=18)\n", (516, 542), True, 'import matplotlib.pyplot as plt\n'), ((542, 552), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (550, 552), True, 'import matplotlib.pyplot as plt\n')]
|
from ftplib import FTP
import time
import tarfile
import shutil
import os
def ftpconnect(host, username, password):
ftp = FTP()
ftp.set_pasv(0)
ftp.set_debuglevel(2)
ftp.connect(host, 21)
ftp.login(username, password)
ftp.encoding = "utf-8"
return ftp
def downloadfile(ftp, remotepath, localpath):
bufsize = 1024
fp = open(localpath, 'wb')
ftp.retrbinary('RETR ' + remotepath, fp.write, bufsize)
# 接受服务器上文件并写入文本
ftp.set_debuglevel(0) # 关闭调试
fp.close() # 关闭文件
def uploadfile(ftp, remotepath, localpath):
bufsize = 1024
fp = open(localpath, 'rb')
ftp.storbinary('STOR ' + remotepath, fp, bufsize) # 上传文件
ftp.set_debuglevel(0)
# fp.seek(0)
fp.close()
if __name__ == "__main__":
path = './rate/'
f0, f1, f2, f3, f4, f5 = 0, 0, 0, 0, 0, 0
print(f1)
try:
ftp0 = ftpconnect("[240b:250:280:cb00:8171:63df:dae6:187b]", "rate", "")
uploadfile(ftp0, "./下赢用上模型局前预估/" + "下赢用上模型局前预估" + str(time.time()) + ".csv", path + "下赢用上模型局前预估.csv")
ftp0.quit()
except:
f0 = 1
try:
ftp2 = ftpconnect("[240b:250:280:cb00:8171:63df:dae6:187b]", "rate", "")
uploadfile(ftp2, "./地上赢时局前预估/" + "地上赢时局前预估" + str(time.time()) + ".csv", path + "地上赢时局前预估.csv")
ftp2.quit()
except:
f2 = 1
try:
ftp1 = ftpconnect("[240b:250:280:cb00:8171:63df:dae6:187b]", "rate", "")
uploadfile(ftp1, "./地主赢时叫牌胜率/" + "地主赢时叫牌胜率" + str(time.time()) + ".csv", path + "地主赢时叫牌胜率.csv")
ftp1.quit()
except:
f1 = 1
try:
ftp3 = ftpconnect("[240b:250:280:cb00:8171:63df:dae6:187b]", "rate", "")
uploadfile(ftp3, "./地主赢时局前预估/" + "地主赢时局前预估" + str(time.time()) + ".csv", path + "地主赢时局前预估.csv")
ftp3.quit()
except:
f3 = 1
try:
ftp4 = ftpconnect("[240b:250:280:cb00:8171:63df:dae6:187b]", "rate", "")
uploadfile(ftp4, "./地主输时叫牌胜率/" + "地主输时叫牌胜率" + str(time.time()) + ".csv", path + "地主输时叫牌胜率.csv")
ftp4.quit()
except:
f4 = 1
try:
ftp5 = ftpconnect("[240b:250:280:cb00:8171:63df:dae6:187b]", "rate", "")
uploadfile(ftp5, "./地主输时局前预估/" + "地主输时局前预估" + str(time.time()) + ".csv", path + "地主输时局前预估.csv")
ftp5.quit()
except:
f5 = 1
if f0 != 1 and f1 != 1 and f2 != 1 and f3 != 1 and f4 != 1 and f5 != 1:
shutil.rmtree("./rate/")
print(f0,f1,f2,f3,f4,f5)
#os.system("pause")
shutil.copytree("./sample", "./rate/")
|
[
"shutil.rmtree",
"time.time",
"shutil.copytree",
"ftplib.FTP"
] |
[((128, 133), 'ftplib.FTP', 'FTP', ([], {}), '()\n', (131, 133), False, 'from ftplib import FTP\n'), ((2380, 2404), 'shutil.rmtree', 'shutil.rmtree', (['"""./rate/"""'], {}), "('./rate/')\n", (2393, 2404), False, 'import shutil\n'), ((2474, 2512), 'shutil.copytree', 'shutil.copytree', (['"""./sample"""', '"""./rate/"""'], {}), "('./sample', './rate/')\n", (2489, 2512), False, 'import shutil\n'), ((1036, 1047), 'time.time', 'time.time', ([], {}), '()\n', (1045, 1047), False, 'import time\n'), ((1271, 1282), 'time.time', 'time.time', ([], {}), '()\n', (1280, 1282), False, 'import time\n'), ((1512, 1523), 'time.time', 'time.time', ([], {}), '()\n', (1521, 1523), False, 'import time\n'), ((1753, 1764), 'time.time', 'time.time', ([], {}), '()\n', (1762, 1764), False, 'import time\n'), ((1994, 2005), 'time.time', 'time.time', ([], {}), '()\n', (2003, 2005), False, 'import time\n'), ((2235, 2246), 'time.time', 'time.time', ([], {}), '()\n', (2244, 2246), False, 'import time\n')]
|
from os.path import join
import pandas as pd
import matplotlib.pyplot as plt
from util.plot import Plot, plotDataFrame, formatXAxisDate
class SubjectAlternateNamesPlot(Plot):
def __init__(self):
super(SubjectAlternateNamesPlot, self).__init__('Subject Alternate Names', 'SubjectAlternateNames.csv', 'subjectAltNames')
self.__output_file_name = "SubjectAlternateNames.png"
def add_args(self, parser):
parser.add_argument('-san', '--subjectAltNames', action='store_true',
help='Plot subject alternate names from certificates')
def parse_args(self, args):
pass
def plot(self, input_file, output_folder):
df = pd.read_csv(input_file,
sep='\x09', usecols=[0, 1, 2], parse_dates=[0], converters={"SubjectAltNames": lambda x: x.strip("[]").split(", ")})
df.dropna(inplace=True)
df['SANLength'] = df['SubjectAltNames'].apply(lambda x:len(x) if isinstance(x, list) else None)
df = df.groupby('Day')['SANLength'].agg(['mean', 'median', 'max', 'min'])
df.columns.name = None
df.index.name = None
fig = plotDataFrame(df, "Length of Subject Alternate Name List")
fig.legend(loc='center left', bbox_to_anchor=(1, 0.5))
formatXAxisDate(fig)
plt.tight_layout()
plt.savefig(join(output_folder, self.__output_file_name), bbox_inches='tight')
|
[
"util.plot.formatXAxisDate",
"util.plot.plotDataFrame",
"matplotlib.pyplot.tight_layout",
"os.path.join"
] |
[((1157, 1215), 'util.plot.plotDataFrame', 'plotDataFrame', (['df', '"""Length of Subject Alternate Name List"""'], {}), "(df, 'Length of Subject Alternate Name List')\n", (1170, 1215), False, 'from util.plot import Plot, plotDataFrame, formatXAxisDate\n'), ((1287, 1307), 'util.plot.formatXAxisDate', 'formatXAxisDate', (['fig'], {}), '(fig)\n', (1302, 1307), False, 'from util.plot import Plot, plotDataFrame, formatXAxisDate\n'), ((1317, 1335), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1333, 1335), True, 'import matplotlib.pyplot as plt\n'), ((1357, 1401), 'os.path.join', 'join', (['output_folder', 'self.__output_file_name'], {}), '(output_folder, self.__output_file_name)\n', (1361, 1401), False, 'from os.path import join\n')]
|
import os
import os.path as osp
from PIL import Image
PATH='../Fewshot/Fewshot/'
classes= os.listdir(PATH)
trainp='../Fewshot/train/'
valp='../Fewshot/val/'
testp='../Fewshot/test/'
for classv in classes:
if classv[0]=='.':
continue
pathn=osp.join(PATH,classv)
pathn=pathn+'/'
folders=os.listdir(pathn)
path1=osp.join(trainp,'images/')
path1=osp.join(path1,classv)
os.mkdir(path1)
path1 =path1 +'/'
path2=osp.join(trainp,'labels/')
path2=osp.join(path2,classv)
os.mkdir(path2)
path2=path2+'/'
for i in range(0,8,1):
p=osp.join(pathn,folders[i])
im=Image.open(p)
if(i%2==0):
p1=osp.join(path1,folders[i])
im.save(p1)
else:
p2=osp.join(path2,folders[i])
im.save(p2)
path1=osp.join(valp,'images/')
path1=osp.join(path1,classv)
os.mkdir(path1)
path1 =path1 +'/'
path2=osp.join(valp,'labels/')
path2=osp.join(path2,classv)
os.mkdir(path2)
path2=path2+'/'
for i in range(8,16,1):
p=osp.join(pathn,folders[i])
im=Image.open(p)
if(i%2==0):
p1=osp.join(path1,folders[i])
im.save(p1)
else:
p2=osp.join(path2,folders[i])
im.save(p2)
path1=osp.join(testp,'images/')
path1=osp.join(path1,classv)
os.mkdir(path1)
path1=path1+'/'
path2=osp.join(testp,'labels/')
path2=osp.join(path2,classv)
os.mkdir(path2)
path2=path2+'/'
for i in range(16,20,1):
p=osp.join(pathn,folders[i])
im=Image.open(p)
if(i%2==0):
p1=osp.join(path1,folders[i])
im.save(p1)
else:
p2=osp.join(path2,folders[i])
im.save(p2)
|
[
"os.mkdir",
"os.path.join",
"os.listdir",
"PIL.Image.open"
] |
[((96, 112), 'os.listdir', 'os.listdir', (['PATH'], {}), '(PATH)\n', (106, 112), False, 'import os\n'), ((262, 284), 'os.path.join', 'osp.join', (['PATH', 'classv'], {}), '(PATH, classv)\n', (270, 284), True, 'import os.path as osp\n'), ((316, 333), 'os.listdir', 'os.listdir', (['pathn'], {}), '(pathn)\n', (326, 333), False, 'import os\n'), ((349, 376), 'os.path.join', 'osp.join', (['trainp', '"""images/"""'], {}), "(trainp, 'images/')\n", (357, 376), True, 'import os.path as osp\n'), ((386, 409), 'os.path.join', 'osp.join', (['path1', 'classv'], {}), '(path1, classv)\n', (394, 409), True, 'import os.path as osp\n'), ((413, 428), 'os.mkdir', 'os.mkdir', (['path1'], {}), '(path1)\n', (421, 428), False, 'import os\n'), ((466, 493), 'os.path.join', 'osp.join', (['trainp', '"""labels/"""'], {}), "(trainp, 'labels/')\n", (474, 493), True, 'import os.path as osp\n'), ((503, 526), 'os.path.join', 'osp.join', (['path2', 'classv'], {}), '(path2, classv)\n', (511, 526), True, 'import os.path as osp\n'), ((530, 545), 'os.mkdir', 'os.mkdir', (['path2'], {}), '(path2)\n', (538, 545), False, 'import os\n'), ((857, 882), 'os.path.join', 'osp.join', (['valp', '"""images/"""'], {}), "(valp, 'images/')\n", (865, 882), True, 'import os.path as osp\n'), ((892, 915), 'os.path.join', 'osp.join', (['path1', 'classv'], {}), '(path1, classv)\n', (900, 915), True, 'import os.path as osp\n'), ((919, 934), 'os.mkdir', 'os.mkdir', (['path1'], {}), '(path1)\n', (927, 934), False, 'import os\n'), ((972, 997), 'os.path.join', 'osp.join', (['valp', '"""labels/"""'], {}), "(valp, 'labels/')\n", (980, 997), True, 'import os.path as osp\n'), ((1007, 1030), 'os.path.join', 'osp.join', (['path2', 'classv'], {}), '(path2, classv)\n', (1015, 1030), True, 'import os.path as osp\n'), ((1034, 1049), 'os.mkdir', 'os.mkdir', (['path2'], {}), '(path2)\n', (1042, 1049), False, 'import os\n'), ((1387, 1413), 'os.path.join', 'osp.join', (['testp', '"""images/"""'], {}), "(testp, 'images/')\n", (1395, 1413), True, 'import os.path as osp\n'), ((1423, 1446), 'os.path.join', 'osp.join', (['path1', 'classv'], {}), '(path1, classv)\n', (1431, 1446), True, 'import os.path as osp\n'), ((1450, 1465), 'os.mkdir', 'os.mkdir', (['path1'], {}), '(path1)\n', (1458, 1465), False, 'import os\n'), ((1501, 1527), 'os.path.join', 'osp.join', (['testp', '"""labels/"""'], {}), "(testp, 'labels/')\n", (1509, 1527), True, 'import os.path as osp\n'), ((1537, 1560), 'os.path.join', 'osp.join', (['path2', 'classv'], {}), '(path2, classv)\n', (1545, 1560), True, 'import os.path as osp\n'), ((1564, 1579), 'os.mkdir', 'os.mkdir', (['path2'], {}), '(path2)\n', (1572, 1579), False, 'import os\n'), ((616, 643), 'os.path.join', 'osp.join', (['pathn', 'folders[i]'], {}), '(pathn, folders[i])\n', (624, 643), True, 'import os.path as osp\n'), ((654, 667), 'PIL.Image.open', 'Image.open', (['p'], {}), '(p)\n', (664, 667), False, 'from PIL import Image\n'), ((1121, 1148), 'os.path.join', 'osp.join', (['pathn', 'folders[i]'], {}), '(pathn, folders[i])\n', (1129, 1148), True, 'import os.path as osp\n'), ((1159, 1172), 'PIL.Image.open', 'Image.open', (['p'], {}), '(p)\n', (1169, 1172), False, 'from PIL import Image\n'), ((1676, 1703), 'os.path.join', 'osp.join', (['pathn', 'folders[i]'], {}), '(pathn, folders[i])\n', (1684, 1703), True, 'import os.path as osp\n'), ((1714, 1727), 'PIL.Image.open', 'Image.open', (['p'], {}), '(p)\n', (1724, 1727), False, 'from PIL import Image\n'), ((703, 730), 'os.path.join', 'osp.join', (['path1', 'folders[i]'], {}), '(path1, folders[i])\n', (711, 730), True, 'import os.path as osp\n'), ((783, 810), 'os.path.join', 'osp.join', (['path2', 'folders[i]'], {}), '(path2, folders[i])\n', (791, 810), True, 'import os.path as osp\n'), ((1208, 1235), 'os.path.join', 'osp.join', (['path1', 'folders[i]'], {}), '(path1, folders[i])\n', (1216, 1235), True, 'import os.path as osp\n'), ((1288, 1315), 'os.path.join', 'osp.join', (['path2', 'folders[i]'], {}), '(path2, folders[i])\n', (1296, 1315), True, 'import os.path as osp\n'), ((1763, 1790), 'os.path.join', 'osp.join', (['path1', 'folders[i]'], {}), '(path1, folders[i])\n', (1771, 1790), True, 'import os.path as osp\n'), ((1843, 1870), 'os.path.join', 'osp.join', (['path2', 'folders[i]'], {}), '(path2, folders[i])\n', (1851, 1870), True, 'import os.path as osp\n')]
|
import unittest
import code_helper
class Test0012(unittest.TestCase):
def test_problem(self):
primes = list(code_helper.range_prime(10000))
triangle_number = -1
for n in range(7000, 20000):
triangle_number = n * (n + 1) / 2
divisors = 1
s = triangle_number
for prime in primes:
if s < prime:
break
if s % prime == 0:
time = 1
while s % prime == 0:
s /= prime
time += 1
divisors *= time
if divisors > 500:
break
self.assertEqual(triangle_number, 76576500)
|
[
"code_helper.range_prime"
] |
[((122, 152), 'code_helper.range_prime', 'code_helper.range_prime', (['(10000)'], {}), '(10000)\n', (145, 152), False, 'import code_helper\n')]
|
from twisted.internet import reactor, threads
import threading
import functools
import aux.protocol as protocol_module
class Backend(object):
def __init__(self):
self.thread = None
self.reactor = reactor
self.event = threading.Event()
self.protocols = protocols_module
def start(self):
self.thread = threading.Thread(name='BackendThread',
target=self.start_reactor)
self.thread.start()
#The event.set is called when the reactor
#is completely initialized.
self.event.wait()
def stop(self):
self.reactor.callFromThread(self.reactor.stop)
while self.thread.is_alive():
# Do not just do .join() as this will block the mainthread
# in such a way that C-c will not work.
self.thread.join(timeout=0.01)
def start_reactor(self):
self.reactor.callWhenRunning(lambda: self.event.set())
self.reactor.run(installSignalHandlers=0)
def make_proxy(self, obj):
if isinstance(obj, Proxy):
raise AssertionError('Wrapping a Proxy in a Proxy will deadlock')
return Proxy(obj)
class Proxy(object):
def __init__(self, wrapped_obj):
self.__dict__['wrapped_obj'] = wrapped_obj
def __getattr__(self, attr):
if attr in ['wrapped_obj']:
return self.__dict__['wrapped_obj']
if hasattr(self.wrapped_obj, attr):
attr = getattr(self.wrapped_obj, attr)
if callable(attr):
return self.create_blocking_wrapper(attr)
return attr
raise KeyError('%s does not exist in %s' % (attr, self))
def __setattr__(self, attr, value):
setattr(self.wrapped_obj, attr, value)
def create_blocking_wrapper(self, callable_):
@functools.wraps(callable_)
def _blocked(*args, **kwargs):
return threads.blockingCallFromThread(reactor,
callable_,
*args,
**kwargs)
return _blocked
|
[
"threading.Thread",
"twisted.internet.threads.blockingCallFromThread",
"threading.Event",
"functools.wraps"
] |
[((247, 264), 'threading.Event', 'threading.Event', ([], {}), '()\n', (262, 264), False, 'import threading\n'), ((351, 416), 'threading.Thread', 'threading.Thread', ([], {'name': '"""BackendThread"""', 'target': 'self.start_reactor'}), "(name='BackendThread', target=self.start_reactor)\n", (367, 416), False, 'import threading\n'), ((1839, 1865), 'functools.wraps', 'functools.wraps', (['callable_'], {}), '(callable_)\n', (1854, 1865), False, 'import functools\n'), ((1924, 1991), 'twisted.internet.threads.blockingCallFromThread', 'threads.blockingCallFromThread', (['reactor', 'callable_', '*args'], {}), '(reactor, callable_, *args, **kwargs)\n', (1954, 1991), False, 'from twisted.internet import reactor, threads\n')]
|
# Author: <NAME>
# Python 3.9
import argparse
import nltk
import re
import os
import pathlib
def extract(filePath):
"""Extracts the textual information from a file.
Args:
filePath (str): The path to the file to extract text from.
Raises:
ValueError: If the information could not be extracted due to unsupported file type.
Returns:
str: The text in the provided file.
"""
# get the file extension
ext = pathlib.Path(filePath).suffix
# extract all data from pure text files
if ext == ".txt" or ext == ".md":
text = None
with open(filePath) as file:
text = file.read()
return text
# get the text from PDFs
if ext == ".pdf":
from pdfminer.high_level import extract_text
return extract_text(filePath)
# get the text minus tags from HTML files
if ext == ".html" or ext == ".htm":
from bs4 import BeautifulSoup
with open(filePath) as file:
soup = BeautifulSoup(file, "html.parser")
return soup.get_text()
raise ValueError(f"Text from file {filePath} could not be extracted. Supported types are TXT, PDF, HTML.")
def getNE(text, piiNE):
"""Gets the named entities classified as PII in the text.
Args:
text (str): The text to analyze.
piiNE (list): The types of named entities classified as PII that should be removed. Options: PERSON, ORGANIZATION, GPE, LOCATIOn.
Returns:
set: The set of strings holding named entity PII.
"""
# search for NLTK required data in this directory so the user doesn't need to download it separately
nltk.data.path.append(os.getcwd())
# gets all of the named entities in the text
ne = nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(text)))
pii = []
# checks if a subtree contains PII (i.e. it should be removed)
def filterPII(x):
return x.label() in piiNE
# loops through all subtrees with a PII label
for sub in ne.subtrees(filter = filterPII):
# gets the PII's full text string from the subtree's leaves
# ex: [('Google', 'NNP'), ('Science', 'NNP'), ('Fair', 'NNP')] -> Google Science Fair
piiStr = " ".join(pair[0] for pair in sub.leaves())
# adds the PII string to the list
if piiStr not in pii:
pii.append(piiStr)
# converts to a set before returning to remove duplicates
return set(pii)
def getIDInfo(text, types):
"""Gets the ID info classified as PII in the text.
Args:
text (str): The text to analyze.
types (list): The types of ID info classified as PII that should be removed. Options: EMAIL, PHONE, SSN
Returns:
set: The set of strings holding ID info PII.
"""
# gets whether each ID info type should be removed.
phone = "PHONE" in types
email = "EMAIL" in types
ssn = "SSN" in types
# return an empty set if we're not looking for any ID info PII
if not(phone or email or ssn):
return set([])
# initialize the phone number regex
if phone: phoneRegex = re.compile(r'''(
(\d{3}|\(\d{3}\))? # area code
(\s|-|\.)? # separator
(\d{3}) # first 3 digits
(\s|-|\.) # separator
(\d{4}) # last 4 digits
(\s*(ext|x|ext.)\s*(\d{2,5}))? # optional extension
)''', re.VERBOSE)
# initialize the email address regex
if email: emailRegex = re.compile(r'''(
[a-zA-Z0-9._%+-] + # username
@ # @symbol
[a-zA-Z0-9.-] + # domain
(\.[a-zA-Z]{2,4}) # .something
)''', re.VERBOSE)
# initialize the social security number regex
if ssn: ssnRegex = re.compile(r'''(
(?!666|000|9\d{2})\d{3} # SSN can't begin with 666, 000 or anything between 900-999
- # explicit dash (separating Area and Group numbers)
(?!00)\d{2} # don't allow the Group Number to be "00"
- # another dash (separating Group and Serial numbers)
(?!0{4})\d{4} # don't allow last four digits to be "0000"
)''', re.VERBOSE)
pii = []
# utility method for getting PII matches
def getMatches(pattern, t):
# for each match, return the match itself if it is a string or the first member of a tuple match
# this is because matches are sometimes tuples of multiple groups, like a phone number match being:
# ("800-999-2222", "800", "-", "999", "-", "2222")
# However, sometimes the matches are just strings (no groups), so accessing the element at [0] would get the first char, which is not desirable.
return [(match if type(match) is str else match[0]) for match in pattern.findall(t)]
# adds the found phone #s, emails, and SSNs to the PII list
if phone: pii += getMatches(phoneRegex, text)
if email: pii += getMatches(emailRegex, text)
if ssn: pii += getMatches(ssnRegex, text)
# converts to a set before returning to remove duplicates
return set(pii)
def writeFile(text, path):
"""Writes text to the file path.
Args:
text (str): The text to write.
path (str): The path to write the file to.
"""
with open(path, "w") as file:
file.write(text)
def cleanString(text,
verbose = False,
piiNE = ["PERSON", "ORGANIZATION", "GPE", "LOCATION"],
piiNums = ["PHONE", "EMAIL", "SSN"]):
"""Cleans a string of PII.
Args:
text (str): The text to clean.
verbose (bool, optional): Whether status updates should be printed to the console. Defaults to False.
piiNE (list, optional): The types of named entity PII to remove. Defaults to all types: ["PERSON", "ORGANIZATION", "GPE", "LOCATION"].
piiNums (list, optional): The types of ID info PII to remove. Defaults to all types: ["PHONE", "EMAIL", "SSN"].
Returns:
str: The cleaned text string with PII replaced with XXXXX values.
"""
if verbose: print("Cleaning text: getting named entities and identifiable information...")
# combines the NE and ID info PII string sets
piiSet = set.union(getNE(text, piiNE), getIDInfo(text, piiNums))
if verbose: print(str(len(piiSet)) + " PII strings found.")
if verbose: print("Removing PII.")
# replaces PII with XXXXX values
cleaned = text
for pii in piiSet:
cleaned = cleaned.replace(pii, "XXXXX")
# return the cleaned text string
return cleaned
def cleanFile(filePath, outputPath,
verbose = False,
piiNE = ["PERSON", "ORGANIZATION", "GPE", "LOCATION"],
piiNums = ["PHONE", "EMAIL", "SSN"]):
"""Reads a file with PII and saves a copy of that file with PII removed.
Args:
filePath (str): The path to the file with PII.
outputPath (str): The path to the cleaned file to be saved.
verbose (bool, optional): Whether status updates should be printed to the console. Defaults to False.
piiNE (list, optional): The types of named entity PII to remove. Defaults to all types: ["PERSON", "ORGANIZATION", "GPE", "LOCATION"].
piiNums (list, optional): The types of ID info PII to remove. Defaults to all types: ["PHONE", "EMAIL", "SSN"].
"""
if verbose: print("Extracting text from " + filePath + "...")
# gets the file's text
text = extract(filePath)
if verbose: print("Text extracted.")
# gets the text without PII
cleaned = cleanString(text, verbose, piiNE, piiNums)
if verbose: print("Writing clean text to " + outputPath + ".")
# write the cleaned text to the output file
writeFile(cleaned, outputPath)
# if this file is being executed on the command line, parse arguments and process the user's file or text
if __name__ == "__main__":
parser = argparse.ArgumentParser("Removes personally identifiable information (PII) like names and phone numbers from text strings and files.")
parser.add_argument("-f", nargs=2, dest="path", default=[], metavar=("inputPath","outputPath"), help="the file to remove PII from and the clean output file path")
parser.add_argument("-s", dest="text", default=None, help="input a text string to clean")
args = parser.parse_args()
# cleans the user's provided file
if len(args.path) == 2:
cleanFile(args.path[0], args.path[1], verbose=True)
# cleans the user's provided text
elif args.text is not None:
s = cleanString(args.text, verbose=True)
print("Text with PII removed:\n" + s)
else:
print("No action specified.")
|
[
"argparse.ArgumentParser",
"os.getcwd",
"pdfminer.high_level.extract_text",
"pathlib.Path",
"bs4.BeautifulSoup",
"nltk.word_tokenize",
"re.compile"
] |
[((7744, 7888), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Removes personally identifiable information (PII) like names and phone numbers from text strings and files."""'], {}), "(\n 'Removes personally identifiable information (PII) like names and phone numbers from text strings and files.'\n )\n", (7767, 7888), False, 'import argparse\n'), ((459, 481), 'pathlib.Path', 'pathlib.Path', (['filePath'], {}), '(filePath)\n', (471, 481), False, 'import pathlib\n'), ((800, 822), 'pdfminer.high_level.extract_text', 'extract_text', (['filePath'], {}), '(filePath)\n', (812, 822), False, 'from pdfminer.high_level import extract_text\n'), ((1670, 1681), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1679, 1681), False, 'import os\n'), ((3099, 3388), 're.compile', 're.compile', (['"""(\n (\\\\d{3}|\\\\(\\\\d{3}\\\\))? # area code\n (\\\\s|-|\\\\.)? # separator\n (\\\\d{3}) # first 3 digits\n (\\\\s|-|\\\\.) # separator\n (\\\\d{4}) # last 4 digits\n (\\\\s*(ext|x|ext.)\\\\s*(\\\\d{2,5}))? # optional extension\n )"""', 're.VERBOSE'], {}), '(\n """(\n (\\\\d{3}|\\\\(\\\\d{3}\\\\))? # area code\n (\\\\s|-|\\\\.)? # separator\n (\\\\d{3}) # first 3 digits\n (\\\\s|-|\\\\.) # separator\n (\\\\d{4}) # last 4 digits\n (\\\\s*(ext|x|ext.)\\\\s*(\\\\d{2,5}))? # optional extension\n )"""\n , re.VERBOSE)\n', (3109, 3388), False, 'import re\n'), ((3436, 3618), 're.compile', 're.compile', (['"""(\n [a-zA-Z0-9._%+-] + # username\n @ # @symbol\n [a-zA-Z0-9.-] + # domain\n (\\\\.[a-zA-Z]{2,4}) # .something\n )"""', 're.VERBOSE'], {}), '(\n """(\n [a-zA-Z0-9._%+-] + # username\n @ # @symbol\n [a-zA-Z0-9.-] + # domain\n (\\\\.[a-zA-Z]{2,4}) # .something\n )"""\n , re.VERBOSE)\n', (3446, 3618), False, 'import re\n'), ((3683, 4083), 're.compile', 're.compile', (['"""(\n (?!666|000|9\\\\d{2})\\\\d{3} # SSN can\'t begin with 666, 000 or anything between 900-999\n - # explicit dash (separating Area and Group numbers)\n (?!00)\\\\d{2} # don\'t allow the Group Number to be "00"\n - # another dash (separating Group and Serial numbers)\n (?!0{4})\\\\d{4} # don\'t allow last four digits to be "0000"\n )"""', 're.VERBOSE'], {}), '(\n """(\n (?!666|000|9\\\\d{2})\\\\d{3} # SSN can\'t begin with 666, 000 or anything between 900-999\n - # explicit dash (separating Area and Group numbers)\n (?!00)\\\\d{2} # don\'t allow the Group Number to be "00"\n - # another dash (separating Group and Serial numbers)\n (?!0{4})\\\\d{4} # don\'t allow last four digits to be "0000"\n )"""\n , re.VERBOSE)\n', (3693, 4083), False, 'import re\n'), ((1004, 1038), 'bs4.BeautifulSoup', 'BeautifulSoup', (['file', '"""html.parser"""'], {}), "(file, 'html.parser')\n", (1017, 1038), False, 'from bs4 import BeautifulSoup\n'), ((1768, 1792), 'nltk.word_tokenize', 'nltk.word_tokenize', (['text'], {}), '(text)\n', (1786, 1792), False, 'import nltk\n')]
|
import pyrebase
import time
from FaceRecognitionManager import *
firebaseConfig = {
"apiKey": "<KEY>",
"authDomain": "iaproject-29018.firebaseapp.com",
"projectId": "iaproject-29018",
"storageBucket": "iaproject-29018.appspot.com",
"messagingSenderId": "817053540910",
"appId": "1:817053540910:web:423251c3f6691e27fd75bf",
"databaseURL" : ""
}
email = '<EMAIL>'
password = '<PASSWORD>'
firebase = pyrebase.initialize_app(firebaseConfig)
auth = firebase.auth()
storage = firebase.storage()
user = auth.sign_in_with_email_and_password(email, password)
def uploadImage(imageName):
globalPath = "detected/{0}.jpg".format(imageName)
storage.child(globalPath).put(globalPath)
url = storage.child(globalPath).get_url(user['idToken'])
return url
def downloadImage(imageName):
globalPath = "uploaded/{0}.jpg".format(imageName)
downloadPath = 'downloaded/{0}.jpg'.format(imageName)
storage.child(globalPath).download(downloadPath)
return detectImage(downloadPath, imageName)
|
[
"pyrebase.initialize_app"
] |
[((428, 467), 'pyrebase.initialize_app', 'pyrebase.initialize_app', (['firebaseConfig'], {}), '(firebaseConfig)\n', (451, 467), False, 'import pyrebase\n')]
|
"""Parallel backends"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
import os
import sys
import re
import multiprocessing
import shlex
import pickle
import base64
from warnings import warn
from subprocess import Popen, PIPE, TimeoutExpired
import binascii
from queue import Queue, Empty
from threading import Thread, Event
from .cell_response import CellResponse
from .dipole import Dipole
from .network_builder import _simulate_single_trial
_BACKEND = None
def _thread_handler(event, out, queue):
while not event.isSet():
line = out.readline()
if line == '':
break
queue.put(line)
def _gather_trial_data(sim_data, net, n_trials, postproc):
"""Arrange data by trial
To be called after simulate(). Returns list of Dipoles, one for each trial,
and saves spiking info in net (instance of Network).
"""
dpls = []
# Create array of equally sampled time points for simulating currents
cell_type_names = list(net.cell_types.keys())
cell_response = CellResponse(times=sim_data[0]['times'],
cell_type_names=cell_type_names)
net.cell_response = cell_response
for idx in range(n_trials):
# cell response
net.cell_response._spike_times.append(sim_data[idx]['spike_times'])
net.cell_response._spike_gids.append(sim_data[idx]['spike_gids'])
net.cell_response.update_types(net.gid_ranges)
net.cell_response._vsoma.append(sim_data[idx]['vsoma'])
net.cell_response._isoma.append(sim_data[idx]['isoma'])
# extracellular array
for arr_name, arr in net.rec_arrays.items():
# voltages is a n_trials x n_contacts x n_samples array
arr._data.append(sim_data[idx]['rec_data'][arr_name])
arr._times = sim_data[idx]['rec_times'][arr_name]
# dipole
dpl = Dipole(times=sim_data[idx]['times'],
data=sim_data[idx]['dpl_data'])
N_pyr_x = net._params['N_pyr_x']
N_pyr_y = net._params['N_pyr_y']
dpl._baseline_renormalize(N_pyr_x, N_pyr_y) # XXX cf. #270
dpl._convert_fAm_to_nAm() # always applied, cf. #264
if postproc:
window_len = net._params['dipole_smooth_win'] # specified in ms
fctr = net._params['dipole_scalefctr']
if window_len > 0: # param files set this to zero for no smoothing
dpl.smooth(window_len=window_len)
if fctr > 0:
dpl.scale(fctr)
dpls.append(dpl)
return dpls
def _get_mpi_env():
"""Set some MPI environment variables."""
my_env = os.environ.copy()
if 'win' not in sys.platform:
my_env["OMPI_MCA_btl_base_warn_component_unused"] = '0'
if 'darwin' in sys.platform:
my_env["PMIX_MCA_gds"] = "^ds12" # open-mpi/ompi/issues/7516
my_env["TMPDIR"] = "/tmp" # open-mpi/ompi/issues/2956
return my_env
def run_subprocess(command, obj, timeout, proc_queue=None, *args, **kwargs):
"""Run process and communicate with it.
Parameters
----------
command : list of str | str
Command to run as subprocess (see subprocess.Popen documentation).
obj : object
The object to write to stdin after starting child process
with MPI command.
timeout : float
The number of seconds to wait for a process without output.
*args, **kwargs : arguments
Additional arguments to pass to subprocess.Popen.
Returns
-------
child_data : object
The data returned by the child process.
"""
proc_data_bytes = b''
# each loop while waiting will involve two Queue.get() timeouts, each
# 0.01s. This caclulation will error on the side of a longer timeout
# than is specified because more is done each loop that just Queue.get()
timeout_cycles = timeout / 0.02
pickled_obj = base64.b64encode(pickle.dumps(obj))
# non-blocking adapted from https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python#4896288 # noqa: E501
out_q = Queue()
err_q = Queue()
threads_started = False
try:
proc = Popen(command, stdin=PIPE, stdout=PIPE, stderr=PIPE, *args,
**kwargs)
# now that the process has started, add it to the queue
# used by MPIBackend.terminate()
if proc_queue is not None:
proc_queue.put(proc)
# set up polling first so all of child's stdout/stderr
# gets captured
event = Event()
out_t = Thread(target=_thread_handler,
args=(event, proc.stdout, out_q))
err_t = Thread(target=_thread_handler,
args=(event, proc.stderr, err_q))
out_t.start()
err_t.start()
threads_started = True
data_received = False
sent_network = False
count_since_last_output = 0
# loop while the process is running the simulation
while True:
child_terminated = proc.poll() is not None
if not data_received:
if _echo_child_output(out_q):
count_since_last_output = 0
else:
count_since_last_output += 1
# look for data in stderr and print child stdout
data_len, proc_data_bytes = _get_data_from_child_err(err_q)
if data_len > 0:
data_received = True
_write_child_exit_signal(proc.stdin)
elif child_terminated:
# child terminated early, and we already
# captured output left in queues
warn("Child process failed unexpectedly")
kill_proc_name('nrniv')
break
if not sent_network:
# Send network object to child so it can start
try:
_write_net(proc.stdin, pickled_obj)
except BrokenPipeError:
# child failed during _write_net(). get the
# output and break out of loop on the next
# iteration
warn("Received BrokenPipeError exception. "
"Child process failed unexpectedly")
continue
else:
sent_network = True
# This is not the same as "network received", but we
# assume it was successful and move on to waiting for
# data in the next loop iteration.
if child_terminated and data_received:
# both exit conditions have been met (also we know that
# the network has been sent)
break
if not child_terminated and \
count_since_last_output > timeout_cycles:
warn("Timeout exceeded while waiting for child process output"
". Terminating...")
kill_proc_name('nrniv')
break
except KeyboardInterrupt:
warn("Received KeyboardInterrupt. Stopping simulation process...")
if threads_started:
# stop the threads
event.set() # close signal
out_t.join()
err_t.join()
# wait for the process to terminate. we need use proc.communicate to
# read any output at its end of life.
try:
outs, errs = proc.communicate(timeout=1)
except TimeoutExpired:
proc.kill()
# wait for output again after kill signal
outs, errs = proc.communicate(timeout=1)
sys.stdout.write(outs)
sys.stdout.write(errs)
if proc.returncode is None:
# It's theoretically possible that we have received data
# and exited the loop above, but the child process has not
# yet terminated. This is unexpected unless KeyboarInterrupt
# is caught
proc.terminate()
try:
proc.wait(1) # wait maximum of 1s
except TimeoutExpired:
warn("Could not kill python subprocess: PID %d" % proc.pid)
if not proc.returncode == 0:
# simulation failed with a numeric return code
raise RuntimeError("MPI simulation failed. Return code: %d" %
proc.returncode)
child_data = _process_child_data(proc_data_bytes, data_len)
# clean up the queue
try:
proc_queue.get_nowait()
except Empty:
pass
return proc, child_data
def _process_child_data(data_bytes, data_len):
"""Process the data returned by child process.
Parameters
----------
data_bytes : str
The data bytes
Returns
-------
data_unpickled : object
The unpickled data.
"""
if not data_len == len(data_bytes):
# This is indicative of a failure. For debugging purposes.
warn("Length of received data unexpected. Expecting %d bytes, "
"got %d" % (data_len, len(data_bytes)))
if len(data_bytes) == 0:
raise RuntimeError("MPI simulation didn't return any data")
# decode base64 byte string
try:
data_pickled = base64.b64decode(data_bytes, validate=True)
except binascii.Error:
# This is here for future debugging purposes. Unit tests can't
# reproduce an incorrectly padded string, but this has been an
# issue before
raise ValueError("Incorrect padding for data length %d bytes" %
len(data_len) + " (mod 4 = %d)" %
(len(data_len) % 4))
# unpickle the data
return pickle.loads(data_pickled)
def _echo_child_output(out_q):
out = ''
while True:
try:
out += out_q.get(timeout=0.01)
except Empty:
break
if len(out) > 0:
sys.stdout.write(out)
return True
return False
def _get_data_from_child_err(err_q):
err = ''
data_length = 0
data_bytes = b''
while True:
try:
err += err_q.get(timeout=0.01)
except Empty:
break
# check for data signal
extracted_data = _extract_data(err, 'data')
if len(extracted_data) > 0:
# _extract_data only returns data when signals on
# both sides were seen
err = err.replace('@start_of_data@', '')
err = err.replace(extracted_data, '')
data_length = _extract_data_length(err, 'data')
err = err.replace('@end_of_data:%d@\n' % data_length, '')
data_bytes = extracted_data.encode()
# print the rest of the child's stderr to our stdout
sys.stdout.write(err)
return data_length, data_bytes
def _has_mpi4py():
"""Determine if mpi4py is present."""
try:
import mpi4py # noqa
except ImportError:
return False
else:
return True
def _has_psutil():
"""Determine if psutil is present."""
try:
import psutil # noqa
except ImportError:
return False
else:
return True
def requires_mpi4py(function):
"""Decorator for testing functions that require MPI."""
import pytest
try:
import mpi4py
assert hasattr(mpi4py, '__version__')
skip = False
except (ImportError, ModuleNotFoundError) as err:
if "TRAVIS_OS_NAME" not in os.environ:
skip = True
else:
raise ImportError(err)
reason = 'mpi4py not available'
return pytest.mark.skipif(skip, reason=reason)(function)
def requires_psutil(function):
"""Decorator for testing functions that require psutil."""
import pytest
try:
import psutil
assert hasattr(psutil, '__version__')
skip = False
except (ImportError, ModuleNotFoundError) as err:
if "TRAVIS_OS_NAME" not in os.environ:
skip = True
else:
raise ImportError(err)
reason = 'psutil not available'
return pytest.mark.skipif(skip, reason=reason)(function)
def _extract_data_length(data_str, object_name):
data_len_match = re.search('@end_of_%s:' % object_name + r'(\d+)@',
data_str)
if data_len_match is not None:
return int(data_len_match.group(1))
else:
raise ValueError("Couldn't find data length in string")
def _extract_data(data_str, object_name):
start_idx = 0
end_idx = 0
start_match = re.search('@start_of_%s@' % object_name, data_str)
if start_match is not None:
start_idx = start_match.end()
else:
# need start signal
return ''
end_match = re.search('@end_of_%s:' % object_name + r'\d+@', data_str)
if end_match is not None:
end_idx = end_match.start()
return data_str[start_idx:end_idx]
# Next 3 functions are from HNN. Will move here. They require psutil
def _kill_procs(procs):
"""Tries to terminate processes in a list before sending kill signal"""
from psutil import wait_procs, NoSuchProcess
# try terminate first
for p in procs:
try:
p.terminate()
except NoSuchProcess:
pass
_, alive = wait_procs(procs, timeout=3)
# now try kill
for p in alive:
p.kill()
_, alive = wait_procs(procs, timeout=3)
return alive
def _get_procs_running(proc_name):
"""Return a list of processes currently running"""
from psutil import process_iter
process_list = []
for p in process_iter(attrs=["name", "exe", "cmdline"]):
if proc_name == p.info['name'] or \
(p.info['exe'] is not None and
os.path.basename(p.info['exe']) == proc_name) or \
(p.info['cmdline'] and
p.info['cmdline'][0] == proc_name):
process_list.append(p)
return process_list
def kill_proc_name(proc_name):
"""Make best effort to kill processes
Parameters
----------
proc_name : str
A string to match process names against and kill all matches
Returns
-------
killed_procs : bool
True if any processes were killed
"""
killed_procs = False
procs = _get_procs_running(proc_name)
if len(procs) > 0:
running = _kill_procs(procs)
if len(running) > 0:
if len(running) < len(procs):
killed_procs = True
pids = [str(proc.pid) for proc in running]
warn("Failed to kill nrniv process(es) %s" %
','.join(pids))
else:
killed_procs = True
return killed_procs
def _write_net(stream, pickled_net):
stream.flush()
stream.write('@start_of_net@')
stream.write(pickled_net.decode())
stream.write('@end_of_net:%d@\n' % len(pickled_net))
stream.flush()
def _write_child_exit_signal(stream):
stream.flush()
stream.write('@data_received@\n')
stream.flush()
class JoblibBackend(object):
"""The JoblibBackend class.
Parameters
----------
n_jobs : int | None
The number of jobs to start in parallel. If None, then 1 trial will be
started without parallelism
Attributes
----------
n_jobs : int
The number of jobs to start in parallel
"""
def __init__(self, n_jobs=1):
self.n_jobs = n_jobs
print("joblib will run over %d jobs" % (self.n_jobs))
def _parallel_func(self, func):
if self.n_jobs != 1:
try:
from joblib import Parallel, delayed
except ImportError:
warn('joblib not installed. Cannot run in parallel.')
self.n_jobs = 1
if self.n_jobs == 1:
my_func = func
parallel = list
else:
parallel = Parallel(self.n_jobs)
my_func = delayed(func)
return parallel, my_func
def __enter__(self):
global _BACKEND
self._old_backend = _BACKEND
_BACKEND = self
return self
def __exit__(self, type, value, traceback):
global _BACKEND
_BACKEND = self._old_backend
def simulate(self, net, tstop, dt, n_trials, postproc=False):
"""Simulate the HNN model
Parameters
----------
net : Network object
The Network object specifying how cells are
connected.
n_trials : int
Number of trials to simulate.
tstop : float
The simulation stop time (ms).
dt : float
The integration time step of h.CVode (ms)
postproc : bool
If False, no postprocessing applied to the dipole
Returns
-------
dpl: list of Dipole
The Dipole results from each simulation trial
"""
parallel, myfunc = self._parallel_func(_simulate_single_trial)
sim_data = parallel(myfunc(net, tstop, dt, trial_idx) for
trial_idx in range(n_trials))
dpls = _gather_trial_data(sim_data, net=net, n_trials=n_trials,
postproc=postproc)
return dpls
class MPIBackend(object):
"""The MPIBackend class.
Parameters
----------
n_procs : int | None
The number of MPI processes requested by the user. If None, then will
attempt to detect number of cores (including hyperthreads) and start
parallel simulation over all of them.
mpi_cmd : str
The name of the mpi launcher executable. Will use 'mpiexec'
(openmpi) by default.
Attributes
----------
n_procs : int
The number of processes MPI will actually use (spread over cores). If 1
is specified or mpi4py could not be loaded, the simulation will be run
with the JoblibBackend
mpi_cmd : list of str
The mpi command with number of procs and options to be passed to Popen
expected_data_length : int
Used to check consistency between data that was sent and what
MPIBackend received.
proc_queue : threading.Queue
A Queue object to hold process handles from Popen in a thread-safe way.
There will be a valid process handle present the queue when a MPI
åsimulation is running.
"""
def __init__(self, n_procs=None, mpi_cmd='mpiexec'):
self.expected_data_length = 0
self.proc = None
self.proc_queue = Queue()
n_logical_cores = multiprocessing.cpu_count()
if n_procs is None:
self.n_procs = n_logical_cores
else:
self.n_procs = n_procs
# did user try to force running on more cores than available?
oversubscribe = False
if self.n_procs > n_logical_cores:
oversubscribe = True
hyperthreading = False
if _has_mpi4py() and _has_psutil():
import psutil
n_physical_cores = psutil.cpu_count(logical=False)
# detect if we need to use hwthread-cpus with mpiexec
if self.n_procs > n_physical_cores:
hyperthreading = True
else:
packages = list()
if not _has_mpi4py():
packages += ['mpi4py']
if not _has_psutil():
packages += ['psutil']
packages = ' and '.join(packages)
warn(f'{packages} not installed. Will run on single processor')
self.n_procs = 1
self.mpi_cmd = mpi_cmd
if self.n_procs == 1:
print("Backend will use 1 core. Running simulation without MPI")
return
else:
print("MPI will run over %d processes" % (self.n_procs))
if hyperthreading:
self.mpi_cmd += ' --use-hwthread-cpus'
if oversubscribe:
self.mpi_cmd += ' --oversubscribe'
self.mpi_cmd += ' -np ' + str(self.n_procs)
self.mpi_cmd += ' nrniv -python -mpi -nobanner ' + \
sys.executable + ' ' + \
os.path.join(os.path.dirname(sys.modules[__name__].__file__),
'mpi_child.py')
# Split the command into shell arguments for passing to Popen
if 'win' in sys.platform:
use_posix = True
else:
use_posix = False
self.mpi_cmd = shlex.split(self.mpi_cmd, posix=use_posix)
def __enter__(self):
global _BACKEND
self._old_backend = _BACKEND
_BACKEND = self
return self
def __exit__(self, type, value, traceback):
global _BACKEND
_BACKEND = self._old_backend
# always kill nrniv processes for good measure
if self.n_procs > 1:
kill_proc_name('nrniv')
def simulate(self, net, tstop, dt, n_trials, postproc=False):
"""Simulate the HNN model in parallel on all cores
Parameters
----------
net : Network object
The Network object specifying how cells are
connected.
tstop : float
The simulation stop time (ms).
dt : float
The integration time step of h.CVode (ms)
n_trials : int
Number of trials to simulate.
postproc: bool
If False, no postprocessing applied to the dipole
Returns
-------
dpl: list of Dipole
The Dipole results from each simulation trial
"""
# just use the joblib backend for a single core
if self.n_procs == 1:
return JoblibBackend(n_jobs=1).simulate(net, tstop=tstop,
dt=dt,
n_trials=n_trials,
postproc=postproc)
print("Running %d trials..." % (n_trials))
dpls = []
env = _get_mpi_env()
self.proc, sim_data = run_subprocess(
command=self.mpi_cmd, obj=[net, tstop, dt, n_trials], timeout=30,
proc_queue=self.proc_queue, env=env, cwd=os.getcwd(),
universal_newlines=True)
dpls = _gather_trial_data(sim_data, net, n_trials, postproc)
return dpls
def terminate(self):
"""Terminate running simulation on this MPIBackend
Safe to call from another thread from the one `simulate_dipole`
was called from.
"""
proc = None
try:
proc = self.proc_queue.get(timeout=1)
except Empty:
warn("No currently running process to terminate")
if proc is not None:
proc.terminate()
try:
proc.wait(5) # wait maximum of 5s
except TimeoutExpired:
warn("Could not kill python subprocess: PID %d" %
proc.pid)
|
[
"sys.stdout.write",
"os.environ.copy",
"base64.b64decode",
"pytest.mark.skipif",
"psutil.cpu_count",
"multiprocessing.cpu_count",
"psutil.process_iter",
"os.path.dirname",
"shlex.split",
"threading.Event",
"re.search",
"pickle.dumps",
"pickle.loads",
"threading.Thread",
"subprocess.Popen",
"psutil.wait_procs",
"os.path.basename",
"queue.Queue",
"os.getcwd",
"joblib.Parallel",
"warnings.warn",
"joblib.delayed"
] |
[((2645, 2662), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (2660, 2662), False, 'import os\n'), ((4097, 4104), 'queue.Queue', 'Queue', ([], {}), '()\n', (4102, 4104), False, 'from queue import Queue, Empty\n'), ((4117, 4124), 'queue.Queue', 'Queue', ([], {}), '()\n', (4122, 4124), False, 'from queue import Queue, Empty\n'), ((7656, 7678), 'sys.stdout.write', 'sys.stdout.write', (['outs'], {}), '(outs)\n', (7672, 7678), False, 'import sys\n'), ((7683, 7705), 'sys.stdout.write', 'sys.stdout.write', (['errs'], {}), '(errs)\n', (7699, 7705), False, 'import sys\n'), ((9651, 9677), 'pickle.loads', 'pickle.loads', (['data_pickled'], {}), '(data_pickled)\n', (9663, 9677), False, 'import pickle\n'), ((10654, 10675), 'sys.stdout.write', 'sys.stdout.write', (['err'], {}), '(err)\n', (10670, 10675), False, 'import sys\n'), ((12103, 12163), 're.search', 're.search', (["('@end_of_%s:' % object_name + '(\\\\d+)@')", 'data_str'], {}), "('@end_of_%s:' % object_name + '(\\\\d+)@', data_str)\n", (12112, 12163), False, 'import re\n'), ((12444, 12494), 're.search', 're.search', (["('@start_of_%s@' % object_name)", 'data_str'], {}), "('@start_of_%s@' % object_name, data_str)\n", (12453, 12494), False, 'import re\n'), ((12638, 12696), 're.search', 're.search', (["('@end_of_%s:' % object_name + '\\\\d+@')", 'data_str'], {}), "('@end_of_%s:' % object_name + '\\\\d+@', data_str)\n", (12647, 12696), False, 'import re\n'), ((13171, 13199), 'psutil.wait_procs', 'wait_procs', (['procs'], {'timeout': '(3)'}), '(procs, timeout=3)\n', (13181, 13199), False, 'from psutil import wait_procs, NoSuchProcess\n'), ((13272, 13300), 'psutil.wait_procs', 'wait_procs', (['procs'], {'timeout': '(3)'}), '(procs, timeout=3)\n', (13282, 13300), False, 'from psutil import wait_procs, NoSuchProcess\n'), ((13483, 13529), 'psutil.process_iter', 'process_iter', ([], {'attrs': "['name', 'exe', 'cmdline']"}), "(attrs=['name', 'exe', 'cmdline'])\n", (13495, 13529), False, 'from psutil import process_iter\n'), ((3919, 3936), 'pickle.dumps', 'pickle.dumps', (['obj'], {}), '(obj)\n', (3931, 3936), False, 'import pickle\n'), ((4179, 4248), 'subprocess.Popen', 'Popen', (['command', '*args'], {'stdin': 'PIPE', 'stdout': 'PIPE', 'stderr': 'PIPE'}), '(command, *args, stdin=PIPE, stdout=PIPE, stderr=PIPE, **kwargs)\n', (4184, 4248), False, 'from subprocess import Popen, PIPE, TimeoutExpired\n'), ((4548, 4555), 'threading.Event', 'Event', ([], {}), '()\n', (4553, 4555), False, 'from threading import Thread, Event\n'), ((4572, 4636), 'threading.Thread', 'Thread', ([], {'target': '_thread_handler', 'args': '(event, proc.stdout, out_q)'}), '(target=_thread_handler, args=(event, proc.stdout, out_q))\n', (4578, 4636), False, 'from threading import Thread, Event\n'), ((4676, 4740), 'threading.Thread', 'Thread', ([], {'target': '_thread_handler', 'args': '(event, proc.stderr, err_q)'}), '(target=_thread_handler, args=(event, proc.stderr, err_q))\n', (4682, 4740), False, 'from threading import Thread, Event\n'), ((9202, 9245), 'base64.b64decode', 'base64.b64decode', (['data_bytes'], {'validate': '(True)'}), '(data_bytes, validate=True)\n', (9218, 9245), False, 'import base64\n'), ((9866, 9887), 'sys.stdout.write', 'sys.stdout.write', (['out'], {}), '(out)\n', (9882, 9887), False, 'import sys\n'), ((11497, 11536), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip'], {'reason': 'reason'}), '(skip, reason=reason)\n', (11515, 11536), False, 'import pytest\n'), ((11981, 12020), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip'], {'reason': 'reason'}), '(skip, reason=reason)\n', (11999, 12020), False, 'import pytest\n'), ((18387, 18394), 'queue.Queue', 'Queue', ([], {}), '()\n', (18392, 18394), False, 'from queue import Queue, Empty\n'), ((18422, 18449), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (18447, 18449), False, 'import multiprocessing\n'), ((20272, 20314), 'shlex.split', 'shlex.split', (['self.mpi_cmd'], {'posix': 'use_posix'}), '(self.mpi_cmd, posix=use_posix)\n', (20283, 20314), False, 'import shlex\n'), ((7134, 7200), 'warnings.warn', 'warn', (['"""Received KeyboardInterrupt. Stopping simulation process..."""'], {}), "('Received KeyboardInterrupt. Stopping simulation process...')\n", (7138, 7200), False, 'from warnings import warn\n'), ((15763, 15784), 'joblib.Parallel', 'Parallel', (['self.n_jobs'], {}), '(self.n_jobs)\n', (15771, 15784), False, 'from joblib import Parallel, delayed\n'), ((15807, 15820), 'joblib.delayed', 'delayed', (['func'], {}), '(func)\n', (15814, 15820), False, 'from joblib import Parallel, delayed\n'), ((18882, 18913), 'psutil.cpu_count', 'psutil.cpu_count', ([], {'logical': '(False)'}), '(logical=False)\n', (18898, 18913), False, 'import psutil\n'), ((19316, 19379), 'warnings.warn', 'warn', (['f"""{packages} not installed. Will run on single processor"""'], {}), "(f'{packages} not installed. Will run on single processor')\n", (19320, 19379), False, 'from warnings import warn\n'), ((6930, 7009), 'warnings.warn', 'warn', (['"""Timeout exceeded while waiting for child process output. Terminating..."""'], {}), "('Timeout exceeded while waiting for child process output. Terminating...')\n", (6934, 7009), False, 'from warnings import warn\n'), ((8088, 8147), 'warnings.warn', 'warn', (["('Could not kill python subprocess: PID %d' % proc.pid)"], {}), "('Could not kill python subprocess: PID %d' % proc.pid)\n", (8092, 8147), False, 'from warnings import warn\n'), ((19981, 20028), 'os.path.dirname', 'os.path.dirname', (['sys.modules[__name__].__file__'], {}), '(sys.modules[__name__].__file__)\n', (19996, 20028), False, 'import os\n'), ((22008, 22019), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (22017, 22019), False, 'import os\n'), ((22460, 22509), 'warnings.warn', 'warn', (['"""No currently running process to terminate"""'], {}), "('No currently running process to terminate')\n", (22464, 22509), False, 'from warnings import warn\n'), ((13639, 13670), 'os.path.basename', 'os.path.basename', (["p.info['exe']"], {}), "(p.info['exe'])\n", (13655, 13670), False, 'import os\n'), ((15556, 15609), 'warnings.warn', 'warn', (['"""joblib not installed. Cannot run in parallel."""'], {}), "('joblib not installed. Cannot run in parallel.')\n", (15560, 15609), False, 'from warnings import warn\n'), ((22688, 22747), 'warnings.warn', 'warn', (["('Could not kill python subprocess: PID %d' % proc.pid)"], {}), "('Could not kill python subprocess: PID %d' % proc.pid)\n", (22692, 22747), False, 'from warnings import warn\n'), ((5714, 5755), 'warnings.warn', 'warn', (['"""Child process failed unexpectedly"""'], {}), "('Child process failed unexpectedly')\n", (5718, 5755), False, 'from warnings import warn\n'), ((6219, 6296), 'warnings.warn', 'warn', (['"""Received BrokenPipeError exception. Child process failed unexpectedly"""'], {}), "('Received BrokenPipeError exception. Child process failed unexpectedly')\n", (6223, 6296), False, 'from warnings import warn\n')]
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from Platforms.Discord.main_discord import PhaazebotDiscord
from Platforms.Web.index import WebIndex
from aiohttp.web import Response, Request
from .get import apiDiscordCommandsGet
from .create import apiDiscordCommandsCreate
from .list import apiDiscordCommandsList
from .delete import apiDiscordCommandsDelete
from .edit import apiDiscordCommandsEdit
from Platforms.Web.Processing.Api.errors import apiMissingValidMethod, apiNotAllowed
async def apiDiscordCommands(cls:"WebIndex", WebRequest:Request) -> Response:
"""
Default url: /api/discord/commands
"""
PhaazeDiscord:"PhaazebotDiscord" = cls.Web.BASE.Discord
if not PhaazeDiscord: return await apiNotAllowed(cls, WebRequest, msg="Discord module is not active")
method:str = WebRequest.match_info.get("method", "")
if not method: return await apiMissingValidMethod(cls, WebRequest)
elif method == "get":
return await apiDiscordCommandsGet(cls, WebRequest)
elif method == "delete":
return await apiDiscordCommandsDelete(cls, WebRequest)
elif method == "create":
return await apiDiscordCommandsCreate(cls, WebRequest)
elif method == "edit":
return await apiDiscordCommandsEdit(cls, WebRequest)
elif method == "list":
return await apiDiscordCommandsList(cls, WebRequest)
else: return await apiMissingValidMethod(cls, WebRequest, msg=f"'{method}' is not a known method")
|
[
"Platforms.Web.Processing.Api.errors.apiNotAllowed",
"Platforms.Web.Processing.Api.errors.apiMissingValidMethod"
] |
[((713, 779), 'Platforms.Web.Processing.Api.errors.apiNotAllowed', 'apiNotAllowed', (['cls', 'WebRequest'], {'msg': '"""Discord module is not active"""'}), "(cls, WebRequest, msg='Discord module is not active')\n", (726, 779), False, 'from Platforms.Web.Processing.Api.errors import apiMissingValidMethod, apiNotAllowed\n'), ((864, 902), 'Platforms.Web.Processing.Api.errors.apiMissingValidMethod', 'apiMissingValidMethod', (['cls', 'WebRequest'], {}), '(cls, WebRequest)\n', (885, 902), False, 'from Platforms.Web.Processing.Api.errors import apiMissingValidMethod, apiNotAllowed\n'), ((1330, 1409), 'Platforms.Web.Processing.Api.errors.apiMissingValidMethod', 'apiMissingValidMethod', (['cls', 'WebRequest'], {'msg': 'f"""\'{method}\' is not a known method"""'}), '(cls, WebRequest, msg=f"\'{method}\' is not a known method")\n', (1351, 1409), False, 'from Platforms.Web.Processing.Api.errors import apiMissingValidMethod, apiNotAllowed\n')]
|
from __future__ import annotations
import os
import math
import itertools
from io import BytesIO
from typing import Any, Dict, List, Tuple, Union, Iterator, Optional, Sequence
import PIL
from PIL import ImageDraw, ImageFont, ImageFilter
from pink_accents import Accent
from pink.context import Context
from pink.cogs.utils.errorhandler import PINKError
from .types import StaticImage
_VertexType = Dict[str, int]
_VerticesType = Tuple[_VertexType, _VertexType, _VertexType, _VertexType]
OCR_API_URL = "https://content-vision.googleapis.com/v1/images:annotate"
# avoid making this a hard dependency by not reading it in constants.py
# since it is not used anywhere else now
PINK_PROXY = os.environ["PINK_PROXY"]
PINK_PROXY_TOKEN = f"Bearer {os.environ['PINK_PROXY_TOKEN']}"
FONT = ImageFont.truetype("DejaVuSans.ttf")
class GoogleOCRError(PINKError):
KNOWN_HINTS = {
None: "The world is on fire, something really bad happened. I have no idea.",
14: "This means Google cannot access image URL. Try using a different one.",
}
def __init__(self, code: Optional[int], message: str):
self.code = code
self.message = message
super().__init__(str(self))
@classmethod
def from_response(cls, response: Dict[str, Any]) -> GoogleOCRError:
error = response.get("error", {})
code = error.get("code")
message = error.get("message", "unknown")
return cls(code, message)
def __str__(self) -> str:
base = f"**{type(self).__name__}**[{self.code}]: {self.message}"
if (hint := self.KNOWN_HINTS.get(self.code)) is not None:
base += f"\n\nHint: {hint}"
return base
class TROCRException(Exception):
pass
class AngleUndetectable(TROCRException):
pass
class TextField:
def __init__(self, full_text: str, src: PIL.Image, padding: int = 3):
self.text = full_text
self.left: Optional[int] = None
self.upper: Optional[int] = None
self.right: Optional[int] = None
self.lower: Optional[int] = None
self.angle = 0
self._src_width, self._src_height = src.size
self._padding = padding
def add_word(self, vertices: _VerticesType, src_size: Tuple[int, int]) -> None:
if not self.initialized:
# Get angle from first word
self.angle = self._get_angle(vertices)
left, upper, right, lower = self._vertices_to_coords(
vertices, src_size, self.angle
)
self.left = left if self.left is None else min((self.left, left))
self.upper = upper if self.upper is None else min((self.upper, upper))
self.right = right if self.right is None else max((self.right, right))
self.lower = lower if self.lower is None else max((self.lower, lower))
@staticmethod
def _vertices_to_coords(
vertices: _VerticesType, src_size: Tuple[int, int], angle: int
) -> Tuple[int, int, int, int]:
"""Returns Pillow style coordinates (left, upper, right, lower)."""
# A - 0
# B - 1
# C - 2
# D - 3
#
# A----B
# | | angle = 360/0
# D----C
#
# A
# / \
# D B angle = 315
# \ /
# C
#
# D----A
# | | angle = 270
# C----B
#
# D
# / \
# C A angle = 225
# \ /
# B
#
# C---D
# | | angle = 180
# B---A
#
# C
# / \
# B D angle = 135
# \ /
# A
#
# B---C
# | | angle = 90
# A---D
#
# B
# / \
# A C angle = 45
# \ /
# D
if 0 <= angle <= 90:
left = vertices[0].get("x")
upper = vertices[1].get("y")
right = vertices[2].get("x")
lower = vertices[3].get("y")
elif 90 < angle <= 180:
left = vertices[1].get("x")
upper = vertices[2].get("y")
right = vertices[3].get("x")
lower = vertices[0].get("y")
elif 180 < angle <= 270:
left = vertices[2].get("x")
upper = vertices[3].get("y")
right = vertices[0].get("x")
lower = vertices[1].get("y")
elif 270 < angle <= 360:
left = vertices[3].get("x")
upper = vertices[0].get("y")
right = vertices[1].get("x")
lower = vertices[2].get("y")
if left is None:
left = 0
if upper is None:
upper = 0
if right is None:
right = src_size[0]
if lower is None:
lower = src_size[1]
return (left, upper, right, lower)
@staticmethod
def _get_angle(vertices: _VerticesType) -> int:
def get_coords(vertex: _VertexType) -> Tuple[Optional[int], Optional[int]]:
return vertex.get("x"), vertex.get("y")
cycle = itertools.cycle(vertices)
x, y = get_coords(next(cycle))
for i in range(4):
next_x, next_y = get_coords(next(cycle))
# Any vertex coordinate can be missing
if None in (x, y, next_x, next_y):
x, y = next_x, next_y
continue
# algo: https://stackoverflow.com/a/27481611
# mypy literally does not see previous statement
delta_y = y - next_y # type: ignore
delta_x = next_x - x # type: ignore
degrees = math.degrees(math.atan2(delta_y, delta_x))
if degrees < 0:
degrees += 360
# compensate missing vertices
degrees += 90 * i
break
else:
raise AngleUndetectable
# # truncate last digit, OCR often returns 1-2 degree tilted text, ignore this
# TEMPORARY: truncate angle to 90 degrees
return 90 * round(degrees / 90)
@property
def coords(self) -> Tuple[int, int, int, int]:
return (self.left, self.upper, self.right, self.lower) # type: ignore
@property
def coords_padded(self) -> Tuple[int, int, int, int]:
return (
max((0, self.left - self._padding)), # type: ignore
max((0, self.upper - self._padding)), # type: ignore
min((self._src_width, self.right + self._padding)), # type: ignore
min((self._src_height, self.lower + self._padding)), # type: ignore
)
# TODO: implement w/h detection ASAP, this is temporary
# solutions:
# 1) https://stackoverflow.com/a/9972699
# text surrounding box dimensions are known, but i had no success implementing this
# 2) try to keep track of full coords and just calculate distance
# a lot of coordinates might be missing, 1st solution is more reliable if it worked
@property
def width(self) -> int:
if self.angle in (0, 180, 360):
return self.right - self.left # type: ignore
if self.angle in (90, 270):
return self.lower - self.upper # type: ignore
assert False # noqa
@property
def height(self) -> int:
if self.angle in (0, 180, 360):
return self.lower - self.upper # type: ignore
if self.angle in (90, 270):
return self.right - self.left # type: ignore
assert False # noqa
@property
def font_size(self) -> int:
return max((1, int(1.3333333 * self.height) - 2))
@property
def stroke_width(self) -> int:
return max((1, round(self.font_size / 12)))
@property
def initialized(self) -> bool:
return None not in self.coords
def __repr__(self) -> str:
return f"<TextField text='{self.text}' coords={self.coords} angle={self.angle}>"
def _language_iterator(blocks: Sequence[Any]) -> Iterator[Optional[str]]:
"""Extracts language for each paragraph in Google OCR output"""
def extract_language(data: Any) -> Optional[str]:
if (properties := data.get("property")) is None:
return None
if (languages := properties.get("detectedLanguages")) is None:
return None
return sorted(languages, key=lambda l: l.get("confidence", 1))[-1][
"languageCode"
]
for block in blocks:
block_language = extract_language(block)
for paragraph in block["paragraphs"]:
paragraph_language = extract_language(paragraph)
yield paragraph_language or block_language
# line grouping differs between simple annotations and paragraph grouping in
# full annotations. "EOL_SURE_SPACE" indicates line break matching simple
# annotations
for word in paragraph["words"]:
last_symbol = word["symbols"][-1]
if (symbol_properties := last_symbol.get("property")) is None:
continue
if (detected_break := symbol_properties.get("detectedBreak")) is None:
continue
if detected_break["type"] != "EOL_SURE_SPACE":
continue
yield paragraph_language or block_language
async def ocr(ctx: Context, image_url: str) -> Dict[str, Any]:
async with ctx.session.post(
f"{PINK_PROXY}",
headers=dict(authorization=PINK_PROXY_TOKEN),
json=dict(url=image_url, ttl=3600),
) as r:
if r.status != 200:
await ctx.reply(
f"Unable to reach proxy: {r.status}\n"
f"Will try raw URL but it will most likely fail"
)
else:
json = await r.json()
image_url = f"{PINK_PROXY}/{json['id']}"
async with ctx.session.post(
OCR_API_URL,
params={
"key": os.environ["OCR_API_TOKEN"],
},
json={
"requests": [
{
"features": [{"type": "TEXT_DETECTION"}],
"image": {
"source": {
"imageUri": image_url,
}
},
}
]
},
headers={
"x-origin": "https://explorer.apis.google.com",
"x-referer": "https://explorer.apis.google.com",
},
) as r:
if r.status != 200:
if r.content_type.lower() != "application/json":
reason = await r.text()
if reason.count("\n") > 1:
# we got some garbage HTML response
reason = "unknown error"
raise PINKError(
f"Something really bad happened with underlying API[{r.status}]: {reason}"
)
json = await r.json()
raise PINKError(
f"Error in underlying API[{r.status}]: "
f'{json.get("message", "unknown error")}'
)
json = await r.json()
if len((responses := json["responses"])) == 0:
return {}
maybe_annotations = responses[0]
if "textAnnotations" not in maybe_annotations:
if "error" in maybe_annotations:
raise GoogleOCRError.from_response(maybe_annotations)
else:
raise PINKError("no text detected", formatted=False)
return maybe_annotations
def _draw_trocr(src: PIL.Image, fields: Sequence[TextField]) -> BytesIO:
FIELD_CAP = 150
fields = fields[:FIELD_CAP]
src = src.convert("RGBA")
for field in fields:
cropped = src.crop(field.coords_padded)
# NOTE: next line causes segfaults if coords are wrong, debug from here
blurred = cropped.filter(ImageFilter.GaussianBlur(10))
# Does not work anymore for some reason, black stroke is good anyway
# field.inverted_avg_color = ImageOps.invert(
# blurred.resize((1, 1)).convert("L")
# ).getpixel((0, 0)) # ugly!!!
src.paste(blurred, field.coords_padded)
for field in fields:
# TODO: figure out how to fit text into boxes with Pillow without creating
# extra images
font = FONT.font_variant(size=field.font_size)
text_im = PIL.Image.new(
"RGBA",
size=font.getsize(field.text, stroke_width=field.stroke_width),
)
ImageDraw.Draw(text_im).text(
(0, 0),
text=field.text,
font=font,
spacing=0,
stroke_width=field.stroke_width,
stroke_fill=(0, 0, 0),
)
src.alpha_composite(
text_im.resize(
(
min((text_im.width, field.width)),
min((text_im.height, field.height)),
),
).rotate(field.angle, expand=True, resample=PIL.Image.BICUBIC),
field.coords_padded[:2],
)
result = BytesIO()
src.save(result, format="PNG")
return BytesIO(result.getvalue())
def _apply_accents(ctx: Context, lines: List[str], accent: Accent) -> List[str]:
if (accent_cog := ctx.bot.get_cog("Accents")) is None:
raise RuntimeError("No accents cog loaded")
return [
# trocr fully depends on newlines, apply accents to each line separately and
# replace any newlines with spaces to make sure text order is preserved
accent_cog.apply_accents_to_text(line, [accent]).replace("\n", " ")
for line in lines
]
async def _apply_translation(
ctx: Context,
lines: List[str],
language: str,
block_annotations: Any,
) -> List[str]:
if (translator_cog := ctx.bot.get_cog("Translator")) is None:
raise RuntimeError("No translator cog loaded")
# TODO: group by input languages to improve translation?
need_trasnslation = {}
paragraph_languages = _language_iterator(block_annotations)
for i, line in enumerate(lines):
if next(paragraph_languages) is not None:
need_trasnslation[i] = line
if not need_trasnslation:
raise PINKError(
"nothing to translate on image "
"(either entire text is in target language or language is undetected)",
formatted=False,
)
translated = await translator_cog.translate(
"\n".join(need_trasnslation.values()), language
)
translated_lines = translated.split("\n")
if len(translated_lines) != len(need_trasnslation):
raise RuntimeError(
f"expected {len(need_trasnslation)} translated lines, got {len(translated_lines)}"
)
new_lines = lines.copy()
for idx, translated_line in zip(need_trasnslation.keys(), translated_lines):
new_lines[idx] = translated_line
return new_lines
async def ocr_translate(
ctx: Context, image: StaticImage, language: Union[str, Accent]
) -> Tuple[BytesIO, str]:
src = await image.to_pil_image(ctx)
annotations = await ocr(ctx, image.url)
word_annotations = annotations["textAnnotations"][1:]
block_annotations = annotations["fullTextAnnotation"]["pages"][0]["blocks"]
# Google OCR API returns entry for each word separately, but they can be joined
# by checking full image description. In description words are combined into
# lines, lines are separated by newlines, there is a trailing newline.
# Coordinates from words in the same line can be merged
lines = annotations["fullTextAnnotation"]["text"][:-1].split("\n")
if isinstance(language, Accent):
new_lines = _apply_accents(ctx, lines, language)
else:
new_lines = await _apply_translation(ctx, lines, language, block_annotations)
# error reporting
notes = ""
current_word = 0
fields = []
for original_line, line in zip(lines, new_lines):
field = TextField(line, src)
remaining_line = original_line
# TODO: sane iterator instead of this
for word in word_annotations[current_word:]:
text = word["description"]
if remaining_line.startswith(text):
current_word += 1
remaining_line = remaining_line[len(text) :].lstrip()
# TODO: merge multiple lines into box
try:
field.add_word(word["boundingPoly"]["vertices"], src.size)
except AngleUndetectable:
notes += f"angle for `{word}` is undetectable\n"
else:
break
if field.initialized:
if line.casefold() != original_line.casefold():
fields.append(field)
if not fields:
raise PINKError("could not translate anything on image", formatted=False)
result = await ctx.bot.loop.run_in_executor(None, _draw_trocr, src, fields)
stats = f"Words: {current_word}\nLines: {len(fields)}"
if notes:
stats += f"\nNotes: {notes}"
return result, stats
|
[
"PIL.ImageFilter.GaussianBlur",
"io.BytesIO",
"math.atan2",
"pink.cogs.utils.errorhandler.PINKError",
"PIL.ImageFont.truetype",
"PIL.ImageDraw.Draw",
"itertools.cycle"
] |
[((790, 826), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""DejaVuSans.ttf"""'], {}), "('DejaVuSans.ttf')\n", (808, 826), False, 'from PIL import ImageDraw, ImageFont, ImageFilter\n'), ((13030, 13039), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (13037, 13039), False, 'from io import BytesIO\n'), ((5087, 5112), 'itertools.cycle', 'itertools.cycle', (['vertices'], {}), '(vertices)\n', (5102, 5112), False, 'import itertools\n'), ((14177, 14315), 'pink.cogs.utils.errorhandler.PINKError', 'PINKError', (['"""nothing to translate on image (either entire text is in target language or language is undetected)"""'], {'formatted': '(False)'}), "(\n 'nothing to translate on image (either entire text is in target language or language is undetected)'\n , formatted=False)\n", (14186, 14315), False, 'from pink.cogs.utils.errorhandler import PINKError\n'), ((16751, 16818), 'pink.cogs.utils.errorhandler.PINKError', 'PINKError', (['"""could not translate anything on image"""'], {'formatted': '(False)'}), "('could not translate anything on image', formatted=False)\n", (16760, 16818), False, 'from pink.cogs.utils.errorhandler import PINKError\n'), ((11410, 11456), 'pink.cogs.utils.errorhandler.PINKError', 'PINKError', (['"""no text detected"""'], {'formatted': '(False)'}), "('no text detected', formatted=False)\n", (11419, 11456), False, 'from pink.cogs.utils.errorhandler import PINKError\n'), ((11834, 11862), 'PIL.ImageFilter.GaussianBlur', 'ImageFilter.GaussianBlur', (['(10)'], {}), '(10)\n', (11858, 11862), False, 'from PIL import ImageDraw, ImageFont, ImageFilter\n'), ((5648, 5676), 'math.atan2', 'math.atan2', (['delta_y', 'delta_x'], {}), '(delta_y, delta_x)\n', (5658, 5676), False, 'import math\n'), ((10763, 10853), 'pink.cogs.utils.errorhandler.PINKError', 'PINKError', (['f"""Something really bad happened with underlying API[{r.status}]: {reason}"""'], {}), "(\n f'Something really bad happened with underlying API[{r.status}]: {reason}')\n", (10772, 10853), False, 'from pink.cogs.utils.errorhandler import PINKError\n'), ((12471, 12494), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['text_im'], {}), '(text_im)\n', (12485, 12494), False, 'from PIL import ImageDraw, ImageFont, ImageFilter\n')]
|
#!/usr/bin/python
import roslib
import rospy
import cv2
import numpy as np
import cv_bridge
import time
from sensor_msgs.msg import Image
from std_msgs.msg import String
from common import *
from jupiter.msg import BallPosition
class Detector:
current_camera = None
camera_subscription = None
bridge = None
processed_image_publisher = None
processed_image_bw_publisher = None
offset = 100
wheel_publisher = None
state = ""
ball_at_middle_X_of_Asus_Camera = False
ball_positioned = False
front_camera_x_reference = 0
front_camera_y_reference = 0
move_robot_or_arm = ""
ball_position = None
def __init__(self):
init_arguments(self)
self.state = "NO_SEARCH"
rospy.Subscriber("/jupiter/detector/current_camera", String, self.camera_change)
rospy.Subscriber("/jupiter/detector/state_change", String, self.state_change)
self.robot_movement_publisher = rospy.Publisher("/jupiter/robot_movement/command", String, queue_size = 10)
self.state_machine_publisher = rospy.Publisher("/jupiter/robot_movement/result", String, queue_size = 10)
self.bridge = cv_bridge.CvBridge()
self.processed_image_publisher = rospy.Publisher("/jupiter/processed_image", Image, queue_size = 10)
self.processed_image_bw_publisher = rospy.Publisher("/jupiter/processed_image_bw", Image, queue_size = 10)
self.ball_position_publisher = rospy.Publisher("/jupiter/ball_position", BallPosition, queue_size = 10)
self.ball_position = BallPosition()
self.ball_position.detected = False
def camera_change(self, command):
self.current_camera = command.data
rospy.loginfo("Detector: current camera changed to %s", self.current_camera)
if self.camera_subscription:
self.camera_subscription.unregister()
if self.current_camera == "ASUS_CAMERA":
self.ball_at_middle_X_of_Asus_Camera = False
self.ball_at_bottom_message_sent = False
self.ball_positioned = False
self.offset = 100
self.camera_subscription = rospy.Subscriber(adjust_namespace(self.is_simulation, "/Asus_Camera/rgb/image_raw"), Image, self.process_image)
elif self.current_camera == "ARM_CAMERA":
self.camera_subscription = rospy.Subscriber("/Creative_Camera/rgb/image_raw" if self.is_simulation else "/komodo_1/arm_cam_node/image_raw", Image, self.process_image)
self.move_robot_or_arm = "MOVE_ROBOT"
def state_change(self, command):
if command.data == "SEARCH":
self.state = "SEARCH"
rospy.loginfo("Detector: starting to search for ball")
elif command.data == "NO_SEARCH":
self.state = "NO_SEARCH"
rospy.loginfo("Detector: stopped searching for ball")
def process_image(self, image):
if self.state == "NO_SEARCH":
return
image_cv = self.bridge.imgmsg_to_cv2(image, "bgr8")
blurred_image = cv2.GaussianBlur(image_cv, (9, 9), 0)
# The two cameras have different sensors, so their color rendition varies. Adjust for this issue when trying to filter the red colors in the image.
if self.current_camera == "ASUS_CAMERA":
(lower, upper) = ([0, 0, 100], [55, 55, 255]) # dark red
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
mask = cv2.inRange(blurred_image, lower, upper)
output = cv2.bitwise_and(blurred_image, blurred_image, mask = mask)
else: # ARM_CAMERA
blurred_image2 = cv2.GaussianBlur(image_cv, (9, 9), 0)
(lower, upper) = ([0, 0, 100], [70, 100, 255])
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
mask = cv2.inRange(blurred_image, lower, upper)
output_dark_orange = cv2.bitwise_and(blurred_image, blurred_image, mask = mask)
(lower2, upper2) = ([65, 50, 170], [100, 70, 255])
lower2 = np.array(lower2, dtype = "uint8")
upper2 = np.array(upper2, dtype = "uint8")
mask2 = cv2.inRange(blurred_image2, lower2, upper2)
output_light_orange = cv2.bitwise_and(blurred_image2, blurred_image2, mask = mask2)
output = output_light_orange
cv2.bitwise_or(output_dark_orange, output_light_orange, output)
image_grayscale = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
(thresh, image_binary) = cv2.threshold(image_grayscale, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
params = cv2.SimpleBlobDetector_Params()
params.filterByInertia = False
params.filterByConvexity = True
params.filterByColor = False
params.filterByCircularity = True
params.filterByArea = True
params.minArea = 30 if self.current_camera == "ASUS_CAMERA" else 15
params.maxArea = 2500 if self.current_camera == "ASUS_CAMERA" else 38400
params.minConvexity = 0.2
params.maxConvexity = 1.0
params.minCircularity = 0.25
params.maxCircularity = 1.0
if self.current_camera == "FRONT_CAMERA":
params.minDistBetweenBlobs = 20.0
# Create a detector with the parameters, according to your OpenCV version (2 or 3)
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3:
detector = cv2.SimpleBlobDetector(params)
else:
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs
keypoints = detector.detect(image_binary)
circles = []
for keypoint in keypoints:
x = keypoint.pt[0]
y = keypoint.pt[1]
r = keypoint.size / 2.0
circles.append([x, y, r])
target = None
if circles:
circles = np.uint16(np.around(circles))
max_r = 0.0
target = circles[0]
for circle in circles:
if circle[2] > max_r and (circle[1] >= (image.height * 0.5) if self.current_camera == "ASUS_CAMERA" else True):
max_r = circle[2]
target = circle
if target != None:
processed_image_bw = cv2.drawKeypoints(image_binary, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
center = (target[0], target[1])
cv2.circle(processed_image_bw, center, target[2], (255, 0, 0), 1, 8, 0)
processed_image = cv2.drawKeypoints(image_cv, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.circle(processed_image, center, target[2], (255, 0, 0), 1, 8, 0)
# publish the keypoints and target circle superimposed on the source image from the camera and on the b&w image
self.processed_image_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image, "bgr8"))
self.processed_image_bw_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image_bw, "bgr8"))
if target[2]:
rospy.loginfo("x: %d, y: %d, radius: %d", target[0], target[1], target[2])
if self.current_camera == "ASUS_CAMERA" and self.asus_ballpark(target[0], image) and not self.ball_at_middle_X_of_Asus_Camera:
self.ball_at_middle_X_of_Asus_Camera = True
self.robot_movement_publisher.publish("STOP-BALL_FOUND")
rospy.loginfo("Detector: ball found")
elif target != None and self.current_camera == "ASUS_CAMERA" and abs(target[1] - (image.height)) < (image.height / 10.0) and self.ball_at_middle_X_of_Asus_Camera and not self.ball_at_bottom_message_sent:
self.ball_at_bottom_message_sent = True
self.robot_movement_publisher.publish("STOP-BALL_AT_BOTTOM_OF_FRAME")
rospy.loginfo("Detector: ball is at bottom of Asus Camera frame")
elif target != None and self.current_camera == "ARM_CAMERA" and self.move_robot_or_arm == "MOVE_ROBOT":
if self.is_simulation: # the real arm cam emits an upside-down image, so adjust for orientation
if target[1] < 10:
if target[0] < image.width * 0.45:
self.robot_movement_publisher.publish("FORWARD-LEFT")
elif target[0] > image.width * 0.55:
self.robot_movement_publisher.publish("FORWARD-RIGHT")
else:
self.robot_movement_publisher.publish("FORWARD_ARM")
else:
self.move_robot_or_arm = "MOVE_ARM"
self.robot_movement_publisher.publish("STOP-READY_TO_GRAB")
else:
if target[1] > 10:
if target[0] < image.width * 0.45:
self.robot_movement_publisher.publish("FORWARD-RIGHT")
elif target[0] > image.width * 0.55:
self.robot_movement_publisher.publish("FORWARD-LEFT")
else:
self.robot_movement_publisher.publish("FORWARD_ARM")
else:
self.move_robot_or_arm = "MOVE_ARM"
self.robot_movement_publisher.publish("STOP-READY_TO_GRAB")
elif target != None and self.current_camera == "ARM_CAMERA" and self.move_robot_or_arm == "MOVE_ARM":
rospy.loginfo("Detector: publishing ball position")
self.ball_position.detected = True
self.ball_position.x = target[0]
self.ball_position.y = target[1]
self.ball_position.radius = target[2]
self.ball_position.img_width = image.width
self.ball_position.img_height = image.height
self.ball_position_publisher.publish(self.ball_position)
self.state = "NO_SEARCH"
def asus_ballpark(self, x, image):
return (image.width * 0.65) <= x and x <= (image.width * 0.85)
if __name__ == "__main__":
rospy.init_node("detector")
detector = Detector()
rospy.spin()
|
[
"cv2.GaussianBlur",
"rospy.Subscriber",
"cv2.bitwise_and",
"numpy.around",
"cv2.__version__.split",
"cv2.inRange",
"cv2.cvtColor",
"rospy.init_node",
"jupiter.msg.BallPosition",
"cv2.circle",
"rospy.loginfo",
"cv2.SimpleBlobDetector_create",
"cv2.SimpleBlobDetector",
"cv2.bitwise_or",
"cv_bridge.CvBridge",
"cv2.SimpleBlobDetector_Params",
"cv2.threshold",
"rospy.Publisher",
"numpy.array",
"rospy.spin"
] |
[((10179, 10206), 'rospy.init_node', 'rospy.init_node', (['"""detector"""'], {}), "('detector')\n", (10194, 10206), False, 'import rospy\n'), ((10237, 10249), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (10247, 10249), False, 'import rospy\n'), ((742, 827), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/jupiter/detector/current_camera"""', 'String', 'self.camera_change'], {}), "('/jupiter/detector/current_camera', String, self.camera_change\n )\n", (758, 827), False, 'import rospy\n'), ((831, 908), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/jupiter/detector/state_change"""', 'String', 'self.state_change'], {}), "('/jupiter/detector/state_change', String, self.state_change)\n", (847, 908), False, 'import rospy\n'), ((949, 1022), 'rospy.Publisher', 'rospy.Publisher', (['"""/jupiter/robot_movement/command"""', 'String'], {'queue_size': '(10)'}), "('/jupiter/robot_movement/command', String, queue_size=10)\n", (964, 1022), False, 'import rospy\n'), ((1064, 1136), 'rospy.Publisher', 'rospy.Publisher', (['"""/jupiter/robot_movement/result"""', 'String'], {'queue_size': '(10)'}), "('/jupiter/robot_movement/result', String, queue_size=10)\n", (1079, 1136), False, 'import rospy\n'), ((1161, 1181), 'cv_bridge.CvBridge', 'cv_bridge.CvBridge', ([], {}), '()\n', (1179, 1181), False, 'import cv_bridge\n'), ((1223, 1288), 'rospy.Publisher', 'rospy.Publisher', (['"""/jupiter/processed_image"""', 'Image'], {'queue_size': '(10)'}), "('/jupiter/processed_image', Image, queue_size=10)\n", (1238, 1288), False, 'import rospy\n'), ((1335, 1403), 'rospy.Publisher', 'rospy.Publisher', (['"""/jupiter/processed_image_bw"""', 'Image'], {'queue_size': '(10)'}), "('/jupiter/processed_image_bw', Image, queue_size=10)\n", (1350, 1403), False, 'import rospy\n'), ((1445, 1515), 'rospy.Publisher', 'rospy.Publisher', (['"""/jupiter/ball_position"""', 'BallPosition'], {'queue_size': '(10)'}), "('/jupiter/ball_position', BallPosition, queue_size=10)\n", (1460, 1515), False, 'import rospy\n'), ((1547, 1561), 'jupiter.msg.BallPosition', 'BallPosition', ([], {}), '()\n', (1559, 1561), False, 'from jupiter.msg import BallPosition\n'), ((1696, 1772), 'rospy.loginfo', 'rospy.loginfo', (['"""Detector: current camera changed to %s"""', 'self.current_camera'], {}), "('Detector: current camera changed to %s', self.current_camera)\n", (1709, 1772), False, 'import rospy\n'), ((3019, 3056), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image_cv', '(9, 9)', '(0)'], {}), '(image_cv, (9, 9), 0)\n', (3035, 3056), False, 'import cv2\n'), ((4464, 4504), 'cv2.cvtColor', 'cv2.cvtColor', (['output', 'cv2.COLOR_BGR2GRAY'], {}), '(output, cv2.COLOR_BGR2GRAY)\n', (4476, 4504), False, 'import cv2\n'), ((4538, 4615), 'cv2.threshold', 'cv2.threshold', (['image_grayscale', '(128)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(image_grayscale, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (4551, 4615), False, 'import cv2\n'), ((4633, 4664), 'cv2.SimpleBlobDetector_Params', 'cv2.SimpleBlobDetector_Params', ([], {}), '()\n', (4662, 4664), False, 'import cv2\n'), ((5357, 5383), 'cv2.__version__.split', 'cv2.__version__.split', (['"""."""'], {}), "('.')\n", (5378, 5383), False, 'import cv2\n'), ((2641, 2695), 'rospy.loginfo', 'rospy.loginfo', (['"""Detector: starting to search for ball"""'], {}), "('Detector: starting to search for ball')\n", (2654, 2695), False, 'import rospy\n'), ((3351, 3381), 'numpy.array', 'np.array', (['lower'], {'dtype': '"""uint8"""'}), "(lower, dtype='uint8')\n", (3359, 3381), True, 'import numpy as np\n'), ((3404, 3434), 'numpy.array', 'np.array', (['upper'], {'dtype': '"""uint8"""'}), "(upper, dtype='uint8')\n", (3412, 3434), True, 'import numpy as np\n'), ((3456, 3496), 'cv2.inRange', 'cv2.inRange', (['blurred_image', 'lower', 'upper'], {}), '(blurred_image, lower, upper)\n', (3467, 3496), False, 'import cv2\n'), ((3518, 3574), 'cv2.bitwise_and', 'cv2.bitwise_and', (['blurred_image', 'blurred_image'], {'mask': 'mask'}), '(blurred_image, blurred_image, mask=mask)\n', (3533, 3574), False, 'import cv2\n'), ((3633, 3670), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image_cv', '(9, 9)', '(0)'], {}), '(image_cv, (9, 9), 0)\n', (3649, 3670), False, 'import cv2\n'), ((3750, 3780), 'numpy.array', 'np.array', (['lower'], {'dtype': '"""uint8"""'}), "(lower, dtype='uint8')\n", (3758, 3780), True, 'import numpy as np\n'), ((3803, 3833), 'numpy.array', 'np.array', (['upper'], {'dtype': '"""uint8"""'}), "(upper, dtype='uint8')\n", (3811, 3833), True, 'import numpy as np\n'), ((3855, 3895), 'cv2.inRange', 'cv2.inRange', (['blurred_image', 'lower', 'upper'], {}), '(blurred_image, lower, upper)\n', (3866, 3895), False, 'import cv2\n'), ((3929, 3985), 'cv2.bitwise_and', 'cv2.bitwise_and', (['blurred_image', 'blurred_image'], {'mask': 'mask'}), '(blurred_image, blurred_image, mask=mask)\n', (3944, 3985), False, 'import cv2\n'), ((4072, 4103), 'numpy.array', 'np.array', (['lower2'], {'dtype': '"""uint8"""'}), "(lower2, dtype='uint8')\n", (4080, 4103), True, 'import numpy as np\n'), ((4127, 4158), 'numpy.array', 'np.array', (['upper2'], {'dtype': '"""uint8"""'}), "(upper2, dtype='uint8')\n", (4135, 4158), True, 'import numpy as np\n'), ((4181, 4224), 'cv2.inRange', 'cv2.inRange', (['blurred_image2', 'lower2', 'upper2'], {}), '(blurred_image2, lower2, upper2)\n', (4192, 4224), False, 'import cv2\n'), ((4259, 4318), 'cv2.bitwise_and', 'cv2.bitwise_and', (['blurred_image2', 'blurred_image2'], {'mask': 'mask2'}), '(blurred_image2, blurred_image2, mask=mask2)\n', (4274, 4318), False, 'import cv2\n'), ((4374, 4437), 'cv2.bitwise_or', 'cv2.bitwise_or', (['output_dark_orange', 'output_light_orange', 'output'], {}), '(output_dark_orange, output_light_orange, output)\n', (4388, 4437), False, 'import cv2\n'), ((5437, 5467), 'cv2.SimpleBlobDetector', 'cv2.SimpleBlobDetector', (['params'], {}), '(params)\n', (5459, 5467), False, 'import cv2\n'), ((5506, 5543), 'cv2.SimpleBlobDetector_create', 'cv2.SimpleBlobDetector_create', (['params'], {}), '(params)\n', (5535, 5543), False, 'import cv2\n'), ((6445, 6516), 'cv2.circle', 'cv2.circle', (['processed_image_bw', 'center', 'target[2]', '(255, 0, 0)', '(1)', '(8)', '(0)'], {}), '(processed_image_bw, center, target[2], (255, 0, 0), 1, 8, 0)\n', (6455, 6516), False, 'import cv2\n'), ((6669, 6737), 'cv2.circle', 'cv2.circle', (['processed_image', 'center', 'target[2]', '(255, 0, 0)', '(1)', '(8)', '(0)'], {}), '(processed_image, center, target[2], (255, 0, 0), 1, 8, 0)\n', (6679, 6737), False, 'import cv2\n'), ((2330, 2473), 'rospy.Subscriber', 'rospy.Subscriber', (["('/Creative_Camera/rgb/image_raw' if self.is_simulation else\n '/komodo_1/arm_cam_node/image_raw')", 'Image', 'self.process_image'], {}), "('/Creative_Camera/rgb/image_raw' if self.is_simulation else\n '/komodo_1/arm_cam_node/image_raw', Image, self.process_image)\n", (2346, 2473), False, 'import rospy\n'), ((2787, 2840), 'rospy.loginfo', 'rospy.loginfo', (['"""Detector: stopped searching for ball"""'], {}), "('Detector: stopped searching for ball')\n", (2800, 2840), False, 'import rospy\n'), ((5887, 5905), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (5896, 5905), True, 'import numpy as np\n'), ((6318, 6330), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6326, 6330), True, 'import numpy as np\n'), ((6586, 6598), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6594, 6598), True, 'import numpy as np\n'), ((7116, 7190), 'rospy.loginfo', 'rospy.loginfo', (['"""x: %d, y: %d, radius: %d"""', 'target[0]', 'target[1]', 'target[2]'], {}), "('x: %d, y: %d, radius: %d', target[0], target[1], target[2])\n", (7129, 7190), False, 'import rospy\n'), ((7479, 7516), 'rospy.loginfo', 'rospy.loginfo', (['"""Detector: ball found"""'], {}), "('Detector: ball found')\n", (7492, 7516), False, 'import rospy\n'), ((7891, 7956), 'rospy.loginfo', 'rospy.loginfo', (['"""Detector: ball is at bottom of Asus Camera frame"""'], {}), "('Detector: ball is at bottom of Asus Camera frame')\n", (7904, 7956), False, 'import rospy\n'), ((9547, 9598), 'rospy.loginfo', 'rospy.loginfo', (['"""Detector: publishing ball position"""'], {}), "('Detector: publishing ball position')\n", (9560, 9598), False, 'import rospy\n')]
|
# Copyright 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import unittest
from AppImageBuilder.app_dir.runtimes.classic import DynamicLoader
class DynamicLoaderTestCase(unittest.TestCase):
def setUp(self) -> None:
self.app_dir_files = [
'AppDir/lib/',
'AppDir/lib/ld-linux-aarch64.so.1',
'AppDir/lib/aarch64-linux-gnu',
'AppDir/lib/aarch64-linux-gnu/libpthread-2.27.so',
'AppDir/lib/aarch64-linux-gnu/libnss_hesiod-2.27.so',
'AppDir/lib/aarch64-linux-gnu/libnss_nis.so.2',
'AppDir/lib/aarch64-linux-gnu/libmemusage.so',
'AppDir/lib/aarch64-linux-gnu/ld-2.27.so',
'AppDir/lib/aarch64-linux-gnu/libpthread.so.0',
'AppDir/lib/aarch64-linux-gnu/libacl.so.1.1.0',
'AppDir/lib/aarch64-linux-gnu/libcrypt.so.1',
'AppDir/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1',
'AppDir/lib/aarch64-linux-gnu/libutil.so.1',
'AppDir/lib/aarch64-linux-gnu/libnsl.so.1',
]
def test_get_binary_path(self):
dl = DynamicLoader('AppDir', self.app_dir_files)
self.assertEqual(dl.get_binary_path(), 'lib/aarch64-linux-gnu/ld-2.27.so')
def test_list_libs(self):
dl = DynamicLoader('AppDir', ['/path/to/file', 'path/to/shared_lib.so', 'path/to/shared_lib.so.1'])
self.assertEqual(dl._list_libs(), ['path/to/shared_lib.so', 'path/to/shared_lib.so.1'])
|
[
"AppImageBuilder.app_dir.runtimes.classic.DynamicLoader"
] |
[((1647, 1690), 'AppImageBuilder.app_dir.runtimes.classic.DynamicLoader', 'DynamicLoader', (['"""AppDir"""', 'self.app_dir_files'], {}), "('AppDir', self.app_dir_files)\n", (1660, 1690), False, 'from AppImageBuilder.app_dir.runtimes.classic import DynamicLoader\n'), ((1818, 1916), 'AppImageBuilder.app_dir.runtimes.classic.DynamicLoader', 'DynamicLoader', (['"""AppDir"""', "['/path/to/file', 'path/to/shared_lib.so', 'path/to/shared_lib.so.1']"], {}), "('AppDir', ['/path/to/file', 'path/to/shared_lib.so',\n 'path/to/shared_lib.so.1'])\n", (1831, 1916), False, 'from AppImageBuilder.app_dir.runtimes.classic import DynamicLoader\n')]
|
import json
import matplotlib.pyplot as plt
import numpy as np
import pickle
import tensorflow as tf
import traceback
from support.data_model import TAG_CLASS_MAP, CLASSES
def load_raw_tracks(path):
tracks = []
with open(path, 'rb') as f:
try:
while True:
tracks.append(pickle.load(f))
except Exception as e:
traceback.print_exc()
pass
return tracks
def tracks_by_tag(tracks):
tag_tracks = {t: [] for t in CLASSES}
for track in tracks:
if track.tag in TAG_CLASS_MAP:
track.tag = TAG_CLASS_MAP[track.tag]
tag_tracks[track.tag].append(track)
return tag_tracks
def flatten_tag_tracks(tag_tracks):
flat_tracks = []
for tracks in tag_tracks.values():
flat_tracks += tracks
return flat_tracks
def print_tag_track_info(infos):
for k in infos:
tracks = infos[k]
fcount = np.sum([t.frame_count for t in tracks])
print(f'{k}: {len(tracks)} tracks with {fcount} frames')
def split_training_validation(tag_tracks, validate_frame_counts):
train_tracks = {}
validate_tracks = {}
for tag in tag_tracks.keys():
if tag in CLASSES:
tracks = tag_tracks[tag]
np.random.shuffle(tracks)
vcount = 0
train_use = []
validate_use = []
for track_info in tracks:
if vcount < validate_frame_counts[tag]:
validate_use.append(track_info)
vcount += track_info.frame_count
else:
train_use.append(track_info)
train_tracks[tag] = train_use
validate_tracks[tag] = validate_use
return train_tracks, validate_tracks
def first_time_model(model, training_config_text, model_config_text, save_directory):
print(model.summary())
with open(f'{save_directory}/model.txt', 'w') as f:
def summary_print(s):
print(s, file=f)
f.write('\nTraining configuration:\n' + training_config_text + '\n')
f.write('\nModel configuration:\n' + model_config_text + '\n')
print(model.summary(print_fn=summary_print))
tf.keras.utils.plot_model(model, to_file=f'{save_directory}/model.png', show_shapes=True)
def frame_count(tracks):
return int(np.sum([t.frame_count for t in tracks]))
def all_frame_counts(tag_tracks):
return int(np.sum([frame_count(tag_tracks[t]) for t in CLASSES]))
def print_track_information(training_tracks, validation_tracks):
details = f'\nTraining with {all_frame_counts(training_tracks)} frames, validating with {all_frame_counts(validation_tracks)} frames:\n'
print(details)
print(' Train Validate')
for key in CLASSES:
print(f'{key:12} {frame_count(training_tracks[key]):>7} {frame_count(validation_tracks[key]):>7}')
def dense_norm_relu(n, x):
x = tf.keras.layers.Dense(n, kernel_initializer='he_normal')(x)
x = tf.keras.layers.BatchNormalization()(x)
return tf.keras.layers.Activation("relu")(x)
def compute_scores(tp, fp, fn):
if tp != 0:
precision = tp / (tp + fp)
recall = tp / (tp + fn)
fscore = 2. * precision * recall / (precision + recall)
return precision, recall, fscore
else:
return 0.0, 0.0, 0.0
def build_callback(config, save_directory):
callback_name = config['name']
config_copy = config.copy()
del config_copy['name']
if callback_name == 'checkpoint_callback':
checkpoint_filename = config_copy['filepath']
config_copy['filepath'] = save_directory + '/' + checkpoint_filename
print(f'saving checkpoints to {config_copy["filepath"]}')
return tf.keras.callbacks.ModelCheckpoint(**config_copy)
elif callback_name == 'lr_callback':
return tf.keras.callbacks.ReduceLROnPlateau(**config_copy)
elif callback_name == 'stopping_callback':
return tf.keras.callbacks.EarlyStopping(**config_copy)
else:
raise Exception(f'Unknown callback type {callback_name}')
def draw_figures(history, plots, save_directory):
plt.figure(figsize=(8, 6 * len(plots)))
plt_position = len(plots) * 100 + 11
for i, plot in enumerate(plots):
plt.subplot(plt_position + i)
plt.title(plot['title'])
legends = []
for value in plot['values']:
plt.plot(history.history[value])
legend = value.replace('_', ' ').title()
legends.append('Training ' + legend)
value = 'val_' + value
plt.plot(history.history[value])
legends.append('Validation ' + legend)
plt.xlim(left=1)
plt.ylim(0.0,1.0)
plt.ylabel(plot['y-label'])
plt.xlabel('Epoch')
plt.legend(legends, loc=plot['caption-loc'], framealpha=.5)
plt.savefig(f'{save_directory}/history.png')
plt.close()
|
[
"matplotlib.pyplot.title",
"numpy.sum",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ModelCheckpoint",
"pickle.load",
"tensorflow.keras.callbacks.EarlyStopping",
"traceback.print_exc",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"matplotlib.pyplot.close",
"tensorflow.keras.utils.plot_model",
"tensorflow.keras.layers.Activation",
"numpy.random.shuffle",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((4864, 4908), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{save_directory}/history.png"""'], {}), "(f'{save_directory}/history.png')\n", (4875, 4908), True, 'import matplotlib.pyplot as plt\n'), ((4913, 4924), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4922, 4924), True, 'import matplotlib.pyplot as plt\n'), ((937, 976), 'numpy.sum', 'np.sum', (['[t.frame_count for t in tracks]'], {}), '([t.frame_count for t in tracks])\n', (943, 976), True, 'import numpy as np\n'), ((2215, 2308), 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['model'], {'to_file': 'f"""{save_directory}/model.png"""', 'show_shapes': '(True)'}), "(model, to_file=f'{save_directory}/model.png',\n show_shapes=True)\n", (2240, 2308), True, 'import tensorflow as tf\n'), ((2347, 2386), 'numpy.sum', 'np.sum', (['[t.frame_count for t in tracks]'], {}), '([t.frame_count for t in tracks])\n', (2353, 2386), True, 'import numpy as np\n'), ((2934, 2990), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['n'], {'kernel_initializer': '"""he_normal"""'}), "(n, kernel_initializer='he_normal')\n", (2955, 2990), True, 'import tensorflow as tf\n'), ((3002, 3038), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (3036, 3038), True, 'import tensorflow as tf\n'), ((3053, 3087), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (3079, 3087), True, 'import tensorflow as tf\n'), ((3752, 3801), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {}), '(**config_copy)\n', (3786, 3801), True, 'import tensorflow as tf\n'), ((4278, 4307), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(plt_position + i)'], {}), '(plt_position + i)\n', (4289, 4307), True, 'import matplotlib.pyplot as plt\n'), ((4316, 4340), 'matplotlib.pyplot.title', 'plt.title', (["plot['title']"], {}), "(plot['title'])\n", (4325, 4340), True, 'import matplotlib.pyplot as plt\n'), ((4685, 4701), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'left': '(1)'}), '(left=1)\n', (4693, 4701), True, 'import matplotlib.pyplot as plt\n'), ((4710, 4728), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (4718, 4728), True, 'import matplotlib.pyplot as plt\n'), ((4736, 4763), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["plot['y-label']"], {}), "(plot['y-label'])\n", (4746, 4763), True, 'import matplotlib.pyplot as plt\n'), ((4772, 4791), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (4782, 4791), True, 'import matplotlib.pyplot as plt\n'), ((4800, 4860), 'matplotlib.pyplot.legend', 'plt.legend', (['legends'], {'loc': "plot['caption-loc']", 'framealpha': '(0.5)'}), "(legends, loc=plot['caption-loc'], framealpha=0.5)\n", (4810, 4860), True, 'import matplotlib.pyplot as plt\n'), ((1267, 1292), 'numpy.random.shuffle', 'np.random.shuffle', (['tracks'], {}), '(tracks)\n', (1284, 1292), True, 'import numpy as np\n'), ((3858, 3909), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'tf.keras.callbacks.ReduceLROnPlateau', ([], {}), '(**config_copy)\n', (3894, 3909), True, 'import tensorflow as tf\n'), ((4411, 4443), 'matplotlib.pyplot.plot', 'plt.plot', (['history.history[value]'], {}), '(history.history[value])\n', (4419, 4443), True, 'import matplotlib.pyplot as plt\n'), ((4593, 4625), 'matplotlib.pyplot.plot', 'plt.plot', (['history.history[value]'], {}), '(history.history[value])\n', (4601, 4625), True, 'import matplotlib.pyplot as plt\n'), ((377, 398), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (396, 398), False, 'import traceback\n'), ((3972, 4019), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {}), '(**config_copy)\n', (4004, 4019), True, 'import tensorflow as tf\n'), ((318, 332), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (329, 332), False, 'import pickle\n')]
|
from PIL import Image
import os, pprint
old_directory = 'old'
new_directory = 'new'
new_origin = (36, 32)
for file in os.listdir(old_directory):
filename = "{}/{}".format(old_directory, file)
img = Image.open(filename)
width = img.size[0]
height = img.size[1]
if height != 1040:
print(file)
continue
cropped_img = img.crop(
(
new_origin[0],
new_origin[1],
675 + new_origin[0],
976 + new_origin[1],
)
)
save_location = "{}/{}".format(new_directory, file)
cropped_img.save(save_location)
|
[
"os.listdir",
"PIL.Image.open"
] |
[((121, 146), 'os.listdir', 'os.listdir', (['old_directory'], {}), '(old_directory)\n', (131, 146), False, 'import os, pprint\n'), ((209, 229), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (219, 229), False, 'from PIL import Image\n')]
|
import unittest
from ..utils.inject import assign_injectables
from ..utils.immutabledict import ImmutableDict
from ..generator.exporter import Exporter
directory_values = ['title', 'images']
picture_values = ['alt_text', 'src', 'caption_data']
class MockJinja2Template(object):
def __init__(self, required_values):
assign_injectables(self, locals())
def render(self, template_arguments):
for argument in template_arguments:
assert (argument in self.required_values)
class StubJpegPicture(object):
def __init__(self, alt_text, src, caption_data):
assign_injectables(self, locals())
def get_contents(self):
return []
def as_view(self):
return ImmutableDict.of(alt_text=self.alt_text, src=self.src,
caption_data=self.caption_data)
def get_exporter(self):
return Exporter(MockJinja2Template(picture_values))
def get_name(self):
return self.src
def get_output_file_name(self):
return self.src
class StubJpegDirectory(object):
def __init__(self, title, images):
assign_injectables(self, locals())
def get_contents(self):
return self.images
def as_view(self):
return ImmutableDict.of(title=self.title, images=self.images)
def get_exporter(self):
return Exporter(MockJinja2Template(directory_values))
def get_name(self):
return self.title
def get_output_file_name(self):
return self.title
class SimpleExporterTest(unittest.TestCase):
def setUp(self):
self.mock_template = MockJinja2Template(picture_values)
self.picture = StubJpegPicture('a picture', 'picture1.jpg', 'Caption')
self.exporter = Exporter(self.mock_template)
def test_it_should_populate_the_jinja2_template(self):
self.exporter.export(self.picture)
class DirectoryExporterTest(unittest.TestCase):
def setUp(self):
self.pictures_in_dir = [
StubJpegPicture('first picture', 'picture1.jpg', 'Caption1'),
StubJpegPicture('second picture', 'picture2.jpg', 'Caption2')]
self.stub_directory = StubJpegDirectory('My Pictures', self.pictures_in_dir)
self.mock_template = MockJinja2Template(directory_values)
self.exporter = Exporter(self.mock_template)
def test_it_should_populate_the_jinja2_template(self):
self.exporter.export(self.stub_directory)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main"
] |
[((2304, 2319), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2317, 2319), False, 'import unittest\n')]
|
# Licensed under MIT License.
# See LICENSE in the project root for license information.
"""Longest common subsequence. The subsequence does not need to be continuous in the original sequence."""
from typing import Sequence, Tuple
from tests import jovian
import functools
##########################################
### Test cases
tests = []
# List
tests.append({
'input': {
'seq1': [1, 3, 5, 6, 7, 2, 5, 2, 3],
'seq2': [6, 2, 4, 7, 1, 5, 6, 2, 3]
},
'output': ([1, 5, 6, 2, 3], 5)
})
# Tuple
tests.append({
'input': {
'seq1': (1, 3, 5, 6, 7, 2, 5, 2, 3),
'seq2': (6, 2, 4, 7, 1, 5, 6, 2, 3)
},
'output': ((1, 5, 6, 2, 3), 5)
})
# String
tests.append({
'input': {
'seq1': 'serendipitous',
'seq2': 'precipitation'
},
'output': ('reipito', 7)
})
# One is a subseq of the other
tests.append({
'input': {
'seq1': 'dense',
'seq2': 'condensed'
},
'output': ('dense', 5)
})
# Multiple subseqs with same length
# In this case, return the first common subseq (the first from the left of seq1).
tests.append({
'input': {
'seq1': 'abcdef',
'seq2': 'badcfe'
},
'output': ('ace', 3)
})
# No common subseq
tests.append({
'input': {
'seq1': 'a',
'seq2': 'bb'
},
'output': ('', 0)
})
# One is empty
tests.append({
'input': {
'seq1': '',
'seq2': 'stone'
},
'output': ('', 0)
})
##########################################
### Methods
def memoize(obj):
"""Cache a function's return value each time it is called. If called later with the same arguments, the cached value is directly returned rather than reevaluated."""
# Initialize cache and obj.cache as an empty dict
cache = obj.cache = {}
# The decorator 'wraps' will run `functools.partial(update_wrapper, wrapped=obj)`,
# ie `update_wrapper(wrapper=memoizer, wrapped=obj)`. (wrapped is the orig func,
# while wrapper is the func to be updated.) So obj's attributes will be copied to
# memoizer. memoizer() is returned as the replacement for the orig `obj`
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
# When args are not present in cache's keys, add them
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
# The decorator 'memoize' will go and execute function `memoize(lcs)`, return memoizer.
# Without memoization, the orig func runs too slow (impossible when len(seq) > 7)
@memoize
def lcs_recursive(seq1: Sequence, seq2: Sequence) -> Tuple[Sequence, int]:
"""Find the longest common subsequence (both itself and its length) of two sequences recursively.
Note
----
If there are multiple subseqs with same length, return the first common subseq from the left of `seq1`.
"""
# Time complexity: O(2 ^ (len(seq1) + len(seq2)))
if type(seq1) != type(seq2):
raise TypeError("Both input sequences should be of the same type.")
# Consider all subclasses of generic type `Sequence`
if isinstance(seq1, list):
empty = []
elif isinstance(seq1, str):
empty = ''
elif isinstance(seq1, tuple):
empty = ()
else:
raise TypeError("This type of sequence is not supported; try list, str, tuple.")
if not seq1 or not seq2:
# If any one of the seqs is empty, then return the empty seq-type
return empty, 0
if seq1[0] == seq2[0]:
if isinstance(seq1, list):
add_elem = [seq1[0]]
elif isinstance(seq1, str):
add_elem = seq1[0]
elif isinstance(seq1, tuple):
# A one-elem tuple can only be shown as (3,) but not (3)
add_elem = (seq1[0],)
return (
add_elem + lcs_recursive(seq1[1:], seq2[1:])[0],
1 + lcs_recursive(seq1[1:], seq2[1:])[1]
)
else:
# max(s1, s2, key=len) means to get from s1, s2 the one with bigger len()
return (
max(lcs_recursive(seq1, seq2[1:])[0], lcs_recursive(seq1[1:], seq2)[0], key=len),
max(lcs_recursive(seq1, seq2[1:])[1], lcs_recursive(seq1[1:], seq2)[1])
)
def lcs_dynamic(seq1: Sequence, seq2: Sequence) -> int:
"""Find the longest common subsequence (both itself and its length) of two sequences by dynamic programming.
Note
----
If there are multiple subseqs with same length, return the first common subseq from the left of `seq1`.
"""
# Time complexity: O(len1 * len2). Space complexity: O(len1 * len2).
# Step 1: find the lcs's length
if type(seq1) != type(seq2):
raise TypeError("Both input sequences should be of the same type.")
# Consider all subclasses of generic type `Sequence`
if isinstance(seq1, list):
empty = []
elif isinstance(seq1, str):
empty = ''
elif isinstance(seq1, tuple):
empty = ()
else:
raise TypeError("This type of sequence is not supported; try list, str, tuple.")
if not seq1 or not seq2:
# If any one of the seqs is empty, then return the empty seq-type
return empty, 0
len1, len2 = len(seq1), len(seq2)
# Use nested lists to make a (len1+1) * (len2+1) 2D array (ie a table).
# table[i][j] is the lcs length of seq1[0:i] and seq2[0:j]
table = [[0] * (len2 + 1) for _ in range(len1 + 1)]
for i in range(1, len1 + 1):
for j in range(1, len2 + 1):
# We start from range(1,) since seq[0:0] is empty, so its lcs w/any seq is 0
if seq1[i - 1] == seq2[j - 1]:
table[i][j] = table[i - 1][j - 1] + 1
else:
table[i][j] = max(table[i - 1][j], table[i][j - 1])
# The next two lines are equivalent; use either
# lcs_length = table[len1][len2]
lcs_length = table[-1][-1]
# Step 2: find the lcs ITSELF
lcs = empty
# Note: The vital idea here is, now that we know the length of lcs to be index,
# ie the elem at the lower right corner of `table`, we should travel from it
# BACKWARDS (ie going up and right `table`) to find the feasible lcs.
i, j = len1, len2
while i > 0 and j > 0:
if seq1[i-1] == seq2[j-1]:
if isinstance(seq1, list):
add_elem = [seq1[i-1]]
elif isinstance(seq1, str):
add_elem = seq1[i-1]
elif isinstance(seq1, tuple):
# A one-elem tuple can only be shown as (3,) but not (3)
add_elem = (seq1[i-1],)
lcs = add_elem + lcs
i -= 1
j -= 1
elif table[i-1][j] < table[i][j-1]:
# If the current elem of seq1 & seq2 are not the same, then find the larger
# of the two predecessors and go in that direction (ie in search of lcs).
# Note: Putting this `elif <` first is important; if we swap this elif with
# the next `else`, the resulting lcs will be the 1st common subseq from the
# left of seq2, instead of the left of seq1.
j -= 1
else:
i -= 1
return lcs, lcs_length
##########################################
### Test client
jovian.evaluate_test_cases(func=lcs_recursive, test_cases=tests)
# From the next two tests, we can see that memoized recursion is faster than plain-
# vanilla dynamic programming
jovian.evaluate_test_cases_justyre(func=lcs_recursive, tests=tests)
jovian.evaluate_test_cases_justyre(func=lcs_dynamic, tests=tests)
|
[
"tests.jovian.evaluate_test_cases",
"functools.wraps",
"tests.jovian.evaluate_test_cases_justyre"
] |
[((7314, 7378), 'tests.jovian.evaluate_test_cases', 'jovian.evaluate_test_cases', ([], {'func': 'lcs_recursive', 'test_cases': 'tests'}), '(func=lcs_recursive, test_cases=tests)\n', (7340, 7378), False, 'from tests import jovian\n'), ((7493, 7560), 'tests.jovian.evaluate_test_cases_justyre', 'jovian.evaluate_test_cases_justyre', ([], {'func': 'lcs_recursive', 'tests': 'tests'}), '(func=lcs_recursive, tests=tests)\n', (7527, 7560), False, 'from tests import jovian\n'), ((7561, 7626), 'tests.jovian.evaluate_test_cases_justyre', 'jovian.evaluate_test_cases_justyre', ([], {'func': 'lcs_dynamic', 'tests': 'tests'}), '(func=lcs_dynamic, tests=tests)\n', (7595, 7626), False, 'from tests import jovian\n'), ((2143, 2163), 'functools.wraps', 'functools.wraps', (['obj'], {}), '(obj)\n', (2158, 2163), False, 'import functools\n')]
|
# This class manages the game's state
import pyglet
from pyglet import clock
from Entity import Asteroid, AsteroidDebris, Player
from Entity import ParticleSpawner, ParticleFactory, Bullet
from HUD import HUD
from pyglet.window import key
from Vect2 import Vect2
import math
# Target window size constant
WIDTH = 800
HEIGHT = 400
targetNo = 5 # number of asteroids to spawn
DEBOUNCE = 1
class StateManager(object):
def __init__(self):
self.quit = False
self._init_window()
self._init_game()
self.mode = "SPLASH"
# Prevent bouncing on switching game modes
self.debounce_timer = DEBOUNCE
# Create a window for the game
def _init_window(self):
# Window object represents the game's window
self.window = pyglet.window.Window(WIDTH, HEIGHT)
# Keys holds a handler that keeps track of keyboard state, part of pyglet
self.keys = pyglet.window.key.KeyStateHandler()
self.window.push_handlers(self.keys)
# Stage the game or return it to its initial state
def _init_game(self):
self.hud = HUD()
self.entities = []
self.spawn_player()
self.exhaust = ParticleSpawner(
self.player.pos.getCopy(),
self.player.angle + math.pi,
math.pi / 4, .01,
ParticleFactory(speed=20, color=(255, 0, 0)),
True)
self.entities.append(self.exhaust)
#Create a new instance of the Player class at the center of the screen
def spawn_player(self):
self.player = Player(Vect2(x=self.window.width/2, y=self.window.height/2))
self.entities.append(self.player)
# This function runs when the look is in game mode, and has all the updating/drawing logic
def game_loop(self, dt):
#Clear frame before looping
self.window.clear()
#print(pyglet.gl.get_current_context())
# On a proper engine the controller would probably be its own class.
# That level of abstraction makes it easier to use keyboards, mice, and
# other controllers the user may have
controller = {
'acc': self.keys[key.W],
'left': self.keys[key.A],
'right': self.keys[key.D],
'fire': self.keys[key.SPACE],
'quit': self.keys[key.ESCAPE],
'pause': self.keys[key.P]
}
self.quit = controller['quit']
if controller['pause'] and self.debounce_timer <= 0:
self.mode = "PAUSE"
self.debounce_timer = DEBOUNCE
self.player.input(controller)
#turn on thrust effect if ship is accelerating
self.exhaust.active = controller['acc']
self.exhaust.angle = (self.player.angle + math.pi)
self.exhaust.pos = self.player.pos.getCopy()
self.spawn_bullets()
self.spawn_asteroids()
self.detect_collisions()
for e in self.entities:
e.update(dt)
#for e in self.entities:
# print(e)
batch = pyglet.graphics.Batch()
for e in self.entities:
# batch.add expects a series of arguments
# most easily delivered as a tuple.
# * is the untuple argument.
batch.add(*e.draw())
# Filter out any dead objects
self.entities[:] = [e for e in self.entities if e.isAlive()]
# Draw objects to the frame
batch.draw()
self.hud.drawHUD()
# Determine if a bullet should be spawned, and then spawns a bullet
def spawn_bullets(self):
if self.player.isFiring():
self.entities.append(
Bullet(
self.player.pos.getCopy(),
self.player.angle
)
)
# Maintain a minimum asteroid population
def spawn_asteroids(self):
# Asteroid Spawning
asteroids = [e for e in self.entities if isinstance(e, Asteroid)]
if len(asteroids) < targetNo:
newAsteroid = Asteroid(3, Vect2(0, 0))
self.entities.append(newAsteroid)
# This function determines if any objects are colliding in a meaningful way for the game
def detect_collisions(self):
asteroids = [e for e in self.entities if isinstance(e, Asteroid)]
for asteroid in asteroids:
if self.player.overlaps(asteroid.hit_radius, asteroid.pos.getCopy()):
self.player.kill()
# Check if player is actually dead, it may be in invuln
# period
if (self.player.isAlive() != True):
if (self.hud.has_lives()):
self.spawn_player()
self.hud.kill()
else: self.mode = "GAMEOVER"
# Process asteroid/bullet collisions
for bullet in [e for e in self.entities if isinstance(e, Bullet)]:
for asteroid in asteroids:
if bullet.overlaps(
asteroid.hit_radius,
asteroid.pos.getCopy()):
asteroid.kill()
self.entities.append(
AsteroidDebris(
asteroid.pos.getCopy()))
if asteroid.size > 1:
# add two baby asteroids!
self.entities.append(
Asteroid(
asteroid.size - 1,
asteroid.pos.getCopy()))
self.entities.append(
Asteroid(
asteroid.size - 1,
asteroid.pos.getCopy()))
# Remove bullet
bullet.kill()
# Log the points
self.hud.hit()
# Inform the main function if the player requested to quit
def is_quit(self):
return self.quit
# Dispatch loop to the right function
def loop(self, dt):
if self.debounce_timer > 0:
self.debounce_timer -= dt
if self.mode == "GAME":
self.game_loop(dt)
elif self.mode == "PAUSE":
self.pause_loop(dt)
elif self.mode == "SPLASH":
self.splash_loop(dt)
elif self.mode == "GAMEOVER":
self.game_over_loop(dt)
else:
self.quit == True
print("Error: Debug: state.mode == Invalid state!")
# Pause screen
def pause_loop(self, dt):
self.window.clear()
label = pyglet.text.Label("Game Paused: Press p to unpause, or ESC to quit", font_size=24,
x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center')
label.draw()
if self.keys[key.P] and self.debounce_timer <= 0:
self.mode = "GAME"
self.debounce_timer = DEBOUNCE
elif self.keys[key.ESCAPE]: self.quit = True
# Splash screen
def splash_loop(self, dt):
label = pyglet.text.Label("Rocks in Space: Press s to start", font_size=38,
x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center')
label.draw()
if self.keys[key.S]: self.mode = "GAME"
elif self.keys[key.ESCAPE]: self.quit = True
# Game over screen
def game_over_loop(self, dt):
self.window.clear()
label = pyglet.text.Label("Game over! Press S to restart, or ESC to quit", font_size=24,
x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center')
label.draw()
if self.keys[key.S]:
self.mode = "GAME"
self._init_game()
elif self.keys[key.ESCAPE]: self.quit = True
|
[
"Vect2.Vect2",
"pyglet.text.Label",
"pyglet.window.key.KeyStateHandler",
"HUD.HUD",
"pyglet.graphics.Batch",
"Entity.ParticleFactory",
"pyglet.window.Window"
] |
[((780, 815), 'pyglet.window.Window', 'pyglet.window.Window', (['WIDTH', 'HEIGHT'], {}), '(WIDTH, HEIGHT)\n', (800, 815), False, 'import pyglet\n'), ((910, 945), 'pyglet.window.key.KeyStateHandler', 'pyglet.window.key.KeyStateHandler', ([], {}), '()\n', (943, 945), False, 'import pyglet\n'), ((1093, 1098), 'HUD.HUD', 'HUD', ([], {}), '()\n', (1096, 1098), False, 'from HUD import HUD\n'), ((3015, 3038), 'pyglet.graphics.Batch', 'pyglet.graphics.Batch', ([], {}), '()\n', (3036, 3038), False, 'import pyglet\n'), ((6986, 7144), 'pyglet.text.Label', 'pyglet.text.Label', (['"""Game Paused: Press p to unpause, or ESC to quit"""'], {'font_size': '(24)', 'x': '(WIDTH // 2)', 'y': '(HEIGHT // 2)', 'anchor_x': '"""center"""', 'anchor_y': '"""center"""'}), "('Game Paused: Press p to unpause, or ESC to quit',\n font_size=24, x=WIDTH // 2, y=HEIGHT // 2, anchor_x='center', anchor_y=\n 'center')\n", (7003, 7144), False, 'import pyglet\n'), ((7419, 7557), 'pyglet.text.Label', 'pyglet.text.Label', (['"""Rocks in Space: Press s to start"""'], {'font_size': '(38)', 'x': '(WIDTH // 2)', 'y': '(HEIGHT // 2)', 'anchor_x': '"""center"""', 'anchor_y': '"""center"""'}), "('Rocks in Space: Press s to start', font_size=38, x=WIDTH //\n 2, y=HEIGHT // 2, anchor_x='center', anchor_y='center')\n", (7436, 7557), False, 'import pyglet\n'), ((7787, 7943), 'pyglet.text.Label', 'pyglet.text.Label', (['"""Game over! Press S to restart, or ESC to quit"""'], {'font_size': '(24)', 'x': '(WIDTH // 2)', 'y': '(HEIGHT // 2)', 'anchor_x': '"""center"""', 'anchor_y': '"""center"""'}), "('Game over! Press S to restart, or ESC to quit',\n font_size=24, x=WIDTH // 2, y=HEIGHT // 2, anchor_x='center', anchor_y=\n 'center')\n", (7804, 7943), False, 'import pyglet\n'), ((1316, 1360), 'Entity.ParticleFactory', 'ParticleFactory', ([], {'speed': '(20)', 'color': '(255, 0, 0)'}), '(speed=20, color=(255, 0, 0))\n', (1331, 1360), False, 'from Entity import ParticleSpawner, ParticleFactory, Bullet\n'), ((1556, 1612), 'Vect2.Vect2', 'Vect2', ([], {'x': '(self.window.width / 2)', 'y': '(self.window.height / 2)'}), '(x=self.window.width / 2, y=self.window.height / 2)\n', (1561, 1612), False, 'from Vect2 import Vect2\n'), ((4035, 4046), 'Vect2.Vect2', 'Vect2', (['(0)', '(0)'], {}), '(0, 0)\n', (4040, 4046), False, 'from Vect2 import Vect2\n')]
|
from pyleecan.Classes.Segment import Segment
from pyleecan.Classes.SurfLine import SurfLine
import pytest
line_list = list()
line_list.append(Segment(begin=-1j, end=1j))
line_list.append(Segment(begin=1j, end=1j + 1))
line_list.append(Segment(begin=1j + 1, end=-1j + 1))
line_list.append(Segment(begin=-1j + 1, end=-1j))
surf = SurfLine(line_list=line_list, label="test", point_ref=0.5)
split_test = list()
# Cut Square top
line_list = list()
line_list.append(Segment(begin=0, end=1j))
line_list.append(Segment(begin=1j, end=1j + 1))
line_list.append(Segment(begin=1j + 1, end=1))
line_list.append(Segment(begin=1, end=0))
exp_top_surf = SurfLine(line_list=line_list, label="test", point_ref=0.5 + 0.5j)
# Cut Square bottom
line_list = list()
line_list.append(Segment(begin=-1j, end=0))
line_list.append(Segment(begin=0, end=1))
line_list.append(Segment(begin=1, end=-1j + 1))
line_list.append(Segment(begin=-1j + 1, end=-1j))
exp_bot_surf = SurfLine(line_list=line_list, label="test", point_ref=0.5 - 0.5j)
split_test.append(
{
"surf": surf,
"exp_top_surf": exp_top_surf,
"exp_bot_surf": exp_bot_surf,
"Z1": 0,
"Z2": 2,
"is_join": True,
}
)
@pytest.mark.parametrize("test_dict", split_test)
def test_split_line(test_dict):
res_top_surf, res_bot_surf = test_dict["surf"].split_line(
Z1=test_dict["Z1"],
Z2=test_dict["Z2"],
is_join=test_dict["is_join"],
)
assert res_top_surf == test_dict["exp_top_surf"], (
"Differente Top surface:\nResult:\n"
+ str(res_top_surf)
+ "\nExpected:\n"
+ str(test_dict["exp_top_surf"])
)
assert res_bot_surf == test_dict["exp_bot_surf"], (
"Differente Bot surface:\nResult:\n"
+ str(res_bot_surf)
+ "\nExpected:\n"
+ str(test_dict["exp_bot_surf"])
)
if __name__ == "__main__":
for test_dict in split_test:
test_split_line(test_dict)
print("Done")
|
[
"pytest.mark.parametrize",
"pyleecan.Classes.SurfLine.SurfLine",
"pyleecan.Classes.Segment.Segment"
] |
[((330, 388), 'pyleecan.Classes.SurfLine.SurfLine', 'SurfLine', ([], {'line_list': 'line_list', 'label': '"""test"""', 'point_ref': '(0.5)'}), "(line_list=line_list, label='test', point_ref=0.5)\n", (338, 388), False, 'from pyleecan.Classes.SurfLine import SurfLine\n'), ((641, 706), 'pyleecan.Classes.SurfLine.SurfLine', 'SurfLine', ([], {'line_list': 'line_list', 'label': '"""test"""', 'point_ref': '(0.5 + 0.5j)'}), "(line_list=line_list, label='test', point_ref=0.5 + 0.5j)\n", (649, 706), False, 'from pyleecan.Classes.SurfLine import SurfLine\n'), ((945, 1010), 'pyleecan.Classes.SurfLine.SurfLine', 'SurfLine', ([], {'line_list': 'line_list', 'label': '"""test"""', 'point_ref': '(0.5 - 0.5j)'}), "(line_list=line_list, label='test', point_ref=0.5 - 0.5j)\n", (953, 1010), False, 'from pyleecan.Classes.SurfLine import SurfLine\n'), ((1205, 1253), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_dict"""', 'split_test'], {}), "('test_dict', split_test)\n", (1228, 1253), False, 'import pytest\n'), ((143, 173), 'pyleecan.Classes.Segment.Segment', 'Segment', ([], {'begin': '(-1.0j)', 'end': '(1.0j)'}), '(begin=-1.0j, end=1.0j)\n', (150, 173), False, 'from pyleecan.Classes.Segment import Segment\n'), ((188, 221), 'pyleecan.Classes.Segment.Segment', 'Segment', ([], {'begin': '(1.0j)', 'end': '(1.0j + 1)'}), '(begin=1.0j, end=1.0j + 1)\n', (195, 221), False, 'from pyleecan.Classes.Segment import Segment\n'), ((236, 274), 'pyleecan.Classes.Segment.Segment', 'Segment', ([], {'begin': '(1.0j + 1)', 'end': '(-1.0j + 1)'}), '(begin=1.0j + 1, end=-1.0j + 1)\n', (243, 274), False, 'from pyleecan.Classes.Segment import Segment\n'), ((289, 324), 'pyleecan.Classes.Segment.Segment', 'Segment', ([], {'begin': '(-1.0j + 1)', 'end': '(-1.0j)'}), '(begin=-1.0j + 1, end=-1.0j)\n', (296, 324), False, 'from pyleecan.Classes.Segment import Segment\n'), ((463, 489), 'pyleecan.Classes.Segment.Segment', 'Segment', ([], {'begin': '(0)', 'end': '(1.0j)'}), '(begin=0, end=1.0j)\n', (470, 489), False, 'from pyleecan.Classes.Segment import Segment\n'), ((506, 539), 'pyleecan.Classes.Segment.Segment', 'Segment', ([], {'begin': '(1.0j)', 'end': '(1.0j + 1)'}), '(begin=1.0j, end=1.0j + 1)\n', (513, 539), False, 'from pyleecan.Classes.Segment import Segment\n'), ((554, 584), 'pyleecan.Classes.Segment.Segment', 'Segment', ([], {'begin': '(1.0j + 1)', 'end': '(1)'}), '(begin=1.0j + 1, end=1)\n', (561, 584), False, 'from pyleecan.Classes.Segment import Segment\n'), ((601, 624), 'pyleecan.Classes.Segment.Segment', 'Segment', ([], {'begin': '(1)', 'end': '(0)'}), '(begin=1, end=0)\n', (608, 624), False, 'from pyleecan.Classes.Segment import Segment\n'), ((763, 790), 'pyleecan.Classes.Segment.Segment', 'Segment', ([], {'begin': '(-1.0j)', 'end': '(0)'}), '(begin=-1.0j, end=0)\n', (770, 790), False, 'from pyleecan.Classes.Segment import Segment\n'), ((807, 830), 'pyleecan.Classes.Segment.Segment', 'Segment', ([], {'begin': '(0)', 'end': '(1)'}), '(begin=0, end=1)\n', (814, 830), False, 'from pyleecan.Classes.Segment import Segment\n'), ((849, 880), 'pyleecan.Classes.Segment.Segment', 'Segment', ([], {'begin': '(1)', 'end': '(-1.0j + 1)'}), '(begin=1, end=-1.0j + 1)\n', (856, 880), False, 'from pyleecan.Classes.Segment import Segment\n'), ((897, 932), 'pyleecan.Classes.Segment.Segment', 'Segment', ([], {'begin': '(-1.0j + 1)', 'end': '(-1.0j)'}), '(begin=-1.0j + 1, end=-1.0j)\n', (904, 932), False, 'from pyleecan.Classes.Segment import Segment\n')]
|
from suitcase.nxsas.utils import _parse_bluesky_document_path
def test__build_bluesky_document_path():
parsed_path = _parse_bluesky_document_path("#bluesky/start@abc")
assert parsed_path["doc"] == "start"
assert parsed_path["attribute"] == "abc"
parsed_path = _parse_bluesky_document_path("#bluesky/start/abc")
assert parsed_path["doc"] == "start"
assert parsed_path["keys"] == ("abc",)
parsed_path = _parse_bluesky_document_path("#bluesky/start/abc/def")
assert parsed_path["doc"] == "start"
assert parsed_path["keys"] == ("abc", "def")
parsed_path = _parse_bluesky_document_path("#bluesky/start/abc/def@ghi")
assert parsed_path["doc"] == "start"
assert parsed_path["keys"] == ("abc", "def")
assert parsed_path["attribute"] == "ghi"
parsed_path = _parse_bluesky_document_path("#bluesky/desc/primary/abc/def@ghi")
assert parsed_path["doc"] == "desc"
assert parsed_path["stream"] == "primary"
assert parsed_path["keys"] == ("abc", "def")
assert parsed_path["attribute"] == "ghi"
parsed_path = _parse_bluesky_document_path("#bluesky/stop/abc/def@ghi")
assert parsed_path["doc"] == "stop"
assert parsed_path["keys"] == ("abc", "def")
assert parsed_path["attribute"] == "ghi"
|
[
"suitcase.nxsas.utils._parse_bluesky_document_path"
] |
[((123, 173), 'suitcase.nxsas.utils._parse_bluesky_document_path', '_parse_bluesky_document_path', (['"""#bluesky/start@abc"""'], {}), "('#bluesky/start@abc')\n", (151, 173), False, 'from suitcase.nxsas.utils import _parse_bluesky_document_path\n'), ((279, 329), 'suitcase.nxsas.utils._parse_bluesky_document_path', '_parse_bluesky_document_path', (['"""#bluesky/start/abc"""'], {}), "('#bluesky/start/abc')\n", (307, 329), False, 'from suitcase.nxsas.utils import _parse_bluesky_document_path\n'), ((433, 487), 'suitcase.nxsas.utils._parse_bluesky_document_path', '_parse_bluesky_document_path', (['"""#bluesky/start/abc/def"""'], {}), "('#bluesky/start/abc/def')\n", (461, 487), False, 'from suitcase.nxsas.utils import _parse_bluesky_document_path\n'), ((597, 655), 'suitcase.nxsas.utils._parse_bluesky_document_path', '_parse_bluesky_document_path', (['"""#bluesky/start/abc/def@ghi"""'], {}), "('#bluesky/start/abc/def@ghi')\n", (625, 655), False, 'from suitcase.nxsas.utils import _parse_bluesky_document_path\n'), ((810, 875), 'suitcase.nxsas.utils._parse_bluesky_document_path', '_parse_bluesky_document_path', (['"""#bluesky/desc/primary/abc/def@ghi"""'], {}), "('#bluesky/desc/primary/abc/def@ghi')\n", (838, 875), False, 'from suitcase.nxsas.utils import _parse_bluesky_document_path\n'), ((1075, 1132), 'suitcase.nxsas.utils._parse_bluesky_document_path', '_parse_bluesky_document_path', (['"""#bluesky/stop/abc/def@ghi"""'], {}), "('#bluesky/stop/abc/def@ghi')\n", (1103, 1132), False, 'from suitcase.nxsas.utils import _parse_bluesky_document_path\n')]
|
# coding=utf-8
# Copyright 2022 Google LLC., LongT5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch LongT5 model."""
import copy
import math
import warnings
from typing import Any, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils.checkpoint import checkpoint
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_torch_fx_proxy,
logging,
replace_return_docstrings,
)
from .configuration_longt5 import LongT5Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "LongT5Config"
_TOKENIZER_FOR_DOC = "T5Tokenizer"
_CHECKPOINT_FOR_DOC = "google/long-t5-local-base"
# TODO: Update before the merge
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/long-t5-local-base",
"google/long-t5-local-large",
"google/long-t5-tglobal-base",
"google/long-t5-tglobal-large",
]
def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value: int = 0) -> torch.Tensor:
"""Pad a tensor so that a sequence length will be a multiple of `block_len`"""
pad_len = -x.shape[dim] % block_len
# Handle cases when an empty input sequence is given
if not all(x.shape):
new_shape = list(x.shape)
new_shape[dim] += pad_len
return torch.zeros(new_shape, dtype=x.dtype)
pad = [(0, 0)] * x.ndim
pad[dim] = (0, pad_len)
pad = sum(pad[::-1], ())
x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value)
return x
def _split_into_blocks(x: torch.Tensor, block_len: int, dim: int) -> torch.Tensor:
"""Split an input tensor into blocks of a given `block_len` along the given `dim`. If the dimension length
is not a multiple of `block_len`, it will be padded first with selected `pad_value`.
"""
# pad tensor to multiple of block_len
if x.shape[dim] % block_len != 0:
x = _pad_to_multiple(x, block_len, dim, pad_value=0)
num_blocks = x.shape[dim] // block_len
output_shape = x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim + 1) :]
# If 0 is in output_shape, we cannot apply reshape because of incompatibility with ONNX conversion
if 0 in output_shape:
return torch.empty(output_shape, dtype=x.dtype, device=x.device)
return x.reshape(output_shape)
def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int, pad_value: int = 0) -> torch.Tensor:
"""Concatenate three consecutive blocks for each input block for local attentiont.
For more information, see: https://arxiv.org/pdf/2112.07916.pdf.
"""
num_blocks = x.shape[block_dim]
pad = [(0, 0)] * x.ndim
pad[block_dim] = (1, 1)
pad = sum(pad[::-1], ())
# [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len]
x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value)
blocks_list: List[torch.Tensor] = []
for i in range(3):
# We use indexing approach here:
# https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs
indices = [slice(0, None)] * x.ndim
indices[block_dim] = slice(i, i + num_blocks)
indices = tuple(indices)
blocks_list.append(x[indices])
# [batch_size, num_blocks, 3 * block_len, ...]
return torch.cat(blocks_list, dim=sequence_dim)
def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor:
"""Makes 3-blocked relative position ids for local attention."""
position_ids = torch.arange(3 * block_len, dtype=torch.int32)
center_position_ids = position_ids[block_len:-block_len]
# [block_len, 3 * block_len]
relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1)
return relative_position_ids
def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor:
"""Mask local attention mask to enforce that tokens are not allowed to attend tokens farther than ``local_radius."""
relative_position_ids = _make_3block_relative_position_ids(block_len)
locality_mask = torch.abs(relative_position_ids) < block_len
locality_mask = locality_mask[None, None, :, :]
locality_mask = locality_mask.to(local_attention_mask.device)
return torch.logical_and(local_attention_mask, locality_mask)
def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device) -> torch.Tensor:
"""Prepare attention mask to be applied for a local attention."""
# [batch_size, num_blocks, block_len]
_blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1)
# [batch_size, num_block, 3 * block_len]
_3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2)
_blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1)
_3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2)
# [batch_size, num_block, block_len, 3 * block_len]
local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask)
local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len)
# [batch_size, 1, num_block, block_len, 3 * block_len]
return local_attention_mask.unsqueeze(1).to(device)
def _make_global_fixed_block_ids(
attention_mask: torch.Tensor, global_block_size: int
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Obtain the "fixed block" global id corresponding to each input token.
This implementation is a simlified version of the original Flaxformr implementation adopted from:
https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py.
In our scenario, as we use this strategy only for a decoder, orphan tokens, i.e. those tokens which do not make for
the whole fixed block, are assigned to the preceding block.
Padding tokens from the original sequence are represented by -1.
"""
batch_size, seq_len = attention_mask.shape[:2]
def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor:
block_ends = (torch.arange(seq_len) % global_block_size) == global_block_size - 1
block_ends = block_ends.to(block_ids.device)
true_block_ends = torch.logical_and(block_ends, block_ids >= 0)
full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1
block_ids = torch.where(block_ids < full_blocks, block_ids, full_blocks)
return block_ids
fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size
fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask
mask = torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype)
global_block_ids = torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype)
_global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device)
global_block_ids = torch.where(
global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound
)
# set padding tokens to -1
global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1)
# [batch_size, seq_len]
global_block_ids = handle_orphan_tokens(global_block_ids)
num_globals = seq_len // global_block_size
# [batch_size, seq_len // global_block_size]
if num_globals > 0:
_sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1)
else:
_sequence_block_ids_max = torch.zeros(
batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device
)
global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1
global_segment_ids = global_segment_ids.to(attention_mask.device)
global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0)
return global_block_ids.type(torch.int), global_segment_ids.type(torch.int)
def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor:
"""Create the relative position tensor for local -> global attention."""
block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size)
global_seq_len = global_segment_ids.shape[-1]
global_positions = torch.arange(global_seq_len, device=block_ids.device)
side_relative_position = global_positions - block_ids[..., None]
return side_relative_position.type(torch.int64)
def _create_global_aggregates(
hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int
) -> torch.Tensor:
"""Compute individual block aggregates by summing over individual blocks."""
# (batch..., seq_len, global_seq_len))
block_ids = block_ids.where(
block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device)
)
one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1]
return torch.einsum("...nd,...ng->...gd", hidden_states, one_hot_block_ids.type(hidden_states.dtype))
# Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5
class LongT5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the LongT5 style. No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# LongT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
try:
from apex.normalization import FusedRMSNorm
LongT5LayerNorm = FusedRMSNorm # noqa
logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of LongT5LayerNorm")
except ImportError:
# using the normal LongT5LayerNorm
pass
except Exception:
logger.warning("discovered apex but it failed to load, falling back to LongT5LayerNorm")
pass
# Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5
class LongT5DenseActDense(nn.Module):
def __init__(self, config: LongT5Config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_states = self.wi(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
# Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->LongT5
class LongT5DenseGatedActDense(nn.Module):
def __init__(self, config: LongT5Config):
super().__init__()
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_gelu = self.act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
# Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5
class LongT5LayerFF(nn.Module):
def __init__(self, config: LongT5Config):
super().__init__()
if config.is_gated_act:
self.DenseReluDense = LongT5DenseGatedActDense(config)
else:
self.DenseReluDense = LongT5DenseActDense(config)
self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.DenseReluDense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
# Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5
class LongT5Attention(nn.Module):
def __init__(self, config: LongT5Config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
self.gradient_checkpointing = False
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
)
# Prune linear layers
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.key_value_proj_dim * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length, device=None):
"""Compute binned relative position bias"""
if device is None:
device = self.relative_attention_bias.weight.device
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
return values
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_value=None,
layer_head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
# past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
batch_size, seq_length = hidden_states.shape[:2]
real_seq_length = seq_length
if past_key_value is not None:
assert (
len(past_key_value) == 2
), f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
def shape(states):
"""projection"""
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
def unshape(states):
"""reshape"""
return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
def project(hidden_states, proj_layer, key_value_states, past_key_value):
"""projects hidden states correctly to key/query states"""
if key_value_states is None:
# self-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(hidden_states))
elif past_key_value is None:
# cross-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(key_value_states))
if past_key_value is not None:
if key_value_states is None:
# self-attn
# (batch_size, n_heads, key_length, dim_per_head)
hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
else:
# cross-attn
hidden_states = past_key_value
return hidden_states
# get query states
query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)
# get key/value states
key_states = project(
hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None
)
value_states = project(
hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None
)
# compute scores
scores = torch.matmul(
query_states, key_states.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
if mask is not None:
position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
scores += position_bias
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(
scores
) # (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.dropout(
attn_weights, p=self.dropout, training=self.training
) # (batch_size, n_heads, seq_length, key_length)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim)
attn_output = self.o(attn_output)
present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
class LongT5LocalAttention(nn.Module):
def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None:
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.local_radius = config.local_radius
self.block_len = self.local_radius + 1
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
self.gradient_checkpointing = False
# Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
)
# Prune linear layers
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.key_value_proj_dim * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
# Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
def compute_bias(self, block_length: int):
"""Compute binned relative position bias"""
memory_position = torch.arange(
3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device
)
context_position = memory_position[block_length:-block_length]
# (block_length, 3 * block_length)
relative_position = memory_position[None, :] - context_position[:, None]
relative_position_bucket = self._relative_position_bucket(
relative_position, # (block_length, 3 * block_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
# (block_length, 3 * block_length, num_heads)
values = self.relative_attention_bias(relative_position_bucket)
# (1, 1, num_heads, block_length, 3 * block_length)
values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0)
return values
def forward(
self,
hidden_states,
mask=None,
position_bias=None,
layer_head_mask=None,
output_attentions=False,
):
batch_size, seq_length = hidden_states.shape[:2]
def shape(states):
"""projection"""
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim)
def unshape(states):
"""reshape"""
return states.contiguous().view(batch_size, -1, self.inner_dim)
# get query/key/value states -> (batch_size, seq_length, n_heads, dim_per_head)
query_states = shape(self.q(hidden_states))
key_states = shape(self.k(hidden_states))
value_states = shape(self.v(hidden_states))
# Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head)
query_states = _split_into_blocks(query_states, self.block_len, dim=1)
key_states = _split_into_blocks(key_states, self.block_len, dim=1)
value_states = _split_into_blocks(value_states, self.block_len, dim=1)
# Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2)
value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2)
# Compute scores
scores = torch.einsum(
"...qhd,...khd->...hqk", query_states, key_states
) # (batch_size, num_block, n_heads, block_len, 3 * block_len)
if position_bias is None:
# position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(self.block_len)
if mask is not None:
# Replace masked positions with -1e10 (according to the original implementation)
mask = torch.where(mask > 0, 0.0, -1e10)
# We need to adjust position bias shape to be sum with mask
position_bias = position_bias + mask.transpose(1, 2)
scores += position_bias
# (batch_size, num_blocks, n_heads, block_len, 3 * block_len)
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
# (batch_size, num_blocks, n_heads, block_len, 3 * block_len)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_weights = attn_weights.type(value_states.dtype)
attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states))
attn_output = attn_output[:, :seq_length, :]
attn_output = self.o(attn_output)
present_key_value_state = None
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
class LongT5TransientGlobalAttention(nn.Module):
def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None:
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.local_radius = config.local_radius
self.block_len = self.local_radius + 1
self.global_block_size = config.global_block_size
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
self.gradient_checkpointing = False
# Relativen attention bias & Layer norm for global attention
if self.has_relative_attention_bias:
self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
# Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
)
# Prune linear layers
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.key_value_proj_dim * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
# Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
def compute_bias(self, block_length: int):
"""Compute binned relative position bias"""
memory_position = torch.arange(
3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device
)
context_position = memory_position[block_length:-block_length]
# (block_length, 3 * block_length)
relative_position = memory_position[None, :] - context_position[:, None]
relative_position_bucket = self._relative_position_bucket(
relative_position, # (block_length, 3 * block_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
# (block_length, 3 * block_length, num_heads)
values = self.relative_attention_bias(relative_position_bucket)
# (1, 1, num_heads, block_length, 3 * block_length)
values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0)
return values
def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor:
# (batch_size, 1, seq_len, global_seq_len)
side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...]
attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10)
# (batch_size, seq_len, global_seq_len)
side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size)
side_relative_position_bucket = self._relative_position_bucket(
side_relative_position,
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
# (batch_size, seq_len, global_seq_len, num_heads)
side_bias = self.global_relative_attention_bias(side_relative_position_bucket)
# (batch_size, num_heads, seq_len, global_seq_len)
side_bias = side_bias.permute([0, 3, 1, 2])
# (batch_size, num_heads, seq_len, global_seq_len)
attention_side_bias = attention_side_bias + side_bias
return attention_side_bias
def forward(
self,
hidden_states,
mask=None,
position_bias=None,
layer_head_mask=None,
output_attentions=False,
):
batch_size, seq_length = hidden_states.shape[:2]
def shape(states):
"""projection"""
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim)
def unshape(states):
"""reshape"""
return states.contiguous().view(batch_size, -1, self.inner_dim)
# Prepare components for transient-global attention
# Obtain block_ids and global_segment_ids
# global_seq_len := seq_len // self.global_block_size
# shapes: (batch_size, seq_len) & (batch_size, global_seq_len)
block_ids, global_segment_ids = _make_global_fixed_block_ids(
mask if mask is not None else torch.ones(hidden_states.shape[:-1]),
self.global_block_size,
)
# Create global inputs
_global_seq_len = global_segment_ids.shape[-1]
global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len)
global_inputs = self.global_input_layer_norm(global_inputs)
# get query states -> (batch_size, seq_length, n_heads, dim_per_head)
query_states = shape(self.q(hidden_states))
key_states = shape(self.k(hidden_states))
value_states = shape(self.v(hidden_states))
# Get global/side key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head)
side_key_states = shape(self.k(global_inputs))
side_value_states = shape(self.v(global_inputs))
# Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head)
query_states = _split_into_blocks(query_states, self.block_len, dim=1)
key_states = _split_into_blocks(key_states, self.block_len, dim=1)
value_states = _split_into_blocks(value_states, self.block_len, dim=1)
# Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2)
value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2)
# Tile side inputs across local key/value blocks
# New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head)
reps = [1] * (side_key_states.ndim + 1)
reps[1] = key_states.shape[1]
side_key_states = side_key_states.unsqueeze(1).repeat(reps)
side_value_states = side_value_states.unsqueeze(1).repeat(reps)
# Concatenate "local" and "side"/"global" key/value states to allow each token to attend global aggregated ones
# New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head)
key_states = torch.cat([key_states, side_key_states], dim=2)
value_states = torch.cat([value_states, side_value_states], dim=2)
# Compute scores -> (batch_size, num_block, n_heads, block_len, 3 * block_len + global_seq_len)
scores = torch.einsum("...qhd,...khd->...hqk", query_states, key_states)
if mask is not None:
# We need to adjust position bias shape to be sum with mask
local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device)
# Replace masked positions with -10_000 (according to the original implementation)
local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10)
else:
local_attention_mask = None
if position_bias is None:
# position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, 1, self.n_heads, self.block_len, 3 * self.block_len),
device=scores.device,
dtype=scores.dtype,
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(self.block_len)
if local_attention_mask is not None:
# (batch_size, 1, n_heads, block_len, 3 * block_len)
position_bias = position_bias + local_attention_mask.transpose(1, 2)
position_bias = position_bias.type(scores.dtype)
# Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len)
if mask is None:
mask = torch.ones(batch_size, seq_length)
# (batch_size, num_heads, seq_len, global_seq_len)
side_position_bias = self.compute_side_bias(mask, global_segment_ids)
# (batch_size, num_blocks, num_heads, block_len, global_seq_len)
side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2)
side_position_bias = side_position_bias.type(scores.dtype).to(scores.device)
# (batch_size, num_blocks, num_heads, block_len, 3 * block_len + global_seq_len)
position_bias = torch.cat([position_bias, side_position_bias], dim=-1)
scores += position_bias
# (batch_size, num_blocks, n_heads, block_len, 3 * block_len + global_seq_len)
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_weights = attn_weights.type(value_states.dtype)
attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states))
attn_output = attn_output[:, :seq_length, :]
attn_output = self.o(attn_output)
present_key_value_state = None
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
# Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5
class LongT5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias)
self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
return outputs
class LongT5LayerLocalSelfAttention(nn.Module):
"""Local self attention used in encoder"""
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias)
self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
output_attentions=False,
**kwargs: Any, # to accept past_key_value and use_cache kwargs
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.LocalSelfAttention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
return outputs
class LongT5LayerTransientGlobalSelfAttention(nn.Module):
"""Transient-Global self attention used in encoder"""
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention(
config, has_relative_attention_bias=has_relative_attention_bias
)
self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
output_attentions=False,
**kwargs: Any, # to accept past_key_value and use_cache kwargs
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.TransientGlobalSelfAttention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5
class LongT5LayerCrossAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False)
self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
key_value_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
query_length=None,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
normed_hidden_states,
mask=attention_mask,
key_value_states=key_value_states,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
)
layer_output = hidden_states + self.dropout(attention_output[0])
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class LongT5Block(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
if config.is_decoder:
attention_layer = LongT5LayerSelfAttention
elif config.encoder_attention_type == "local":
attention_layer = LongT5LayerLocalSelfAttention
elif config.encoder_attention_type == "transient-global":
attention_layer = LongT5LayerTransientGlobalSelfAttention
else:
raise ValueError(
"For encoder attention mechanism, either `local` or `transient-global` attention type is expected, "
f"but got {config.encoder_attention_type}."
)
self.layer = nn.ModuleList()
self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias))
if self.is_decoder:
self.layer.append(LongT5LayerCrossAttention(config))
self.layer.append(LongT5LayerFF(config))
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
layer_head_mask=None,
cross_attn_layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
return_dict=True,
):
if past_key_value is not None:
if not self.is_decoder:
logger.warning("`past_key_values` is passed to the encoder. Please make sure this is intended.")
expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
if len(past_key_value) != expected_num_past_key_values:
raise ValueError(
f"There should be {expected_num_past_key_values} past states. "
f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}"
f"Got {len(past_key_value)} past key / value states"
)
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
self_attention_outputs = self.layer[0](
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present_key_value_state = self_attention_outputs[:2]
attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
do_cross_attention = self.is_decoder and encoder_hidden_states is not None
if do_cross_attention:
# the actual query length is unknown for cross attention
# if using past key value states. Need to inject it here
if present_key_value_state is not None:
query_length = present_key_value_state[0].shape[2]
else:
query_length = None
cross_attention_outputs = self.layer[1](
hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
query_length=query_length,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
# Combine self attn and cross attn key value states
if present_key_value_state is not None:
present_key_value_state = present_key_value_state + cross_attention_outputs[1]
# Keep cross-attention outputs and relative position weights
attention_outputs = attention_outputs + cross_attention_outputs[2:]
# Apply Feed Forward layer
hidden_states = self.layer[-1](hidden_states)
outputs = (hidden_states,)
if use_cache:
outputs = outputs + (present_key_value_state,) + attention_outputs
else:
outputs = outputs + attention_outputs
return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
class LongT5PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LongT5Config
base_model_prefix = "transformer"
supports_gradient_checkpointing = True
@property
# Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"decoder_input_ids": input_ids,
"input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor # Used for testing weights initialization
if isinstance(module, LongT5LayerNorm):
module.weight.data.fill_(factor * 1.0)
elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)):
# Mesh TensorFlow embeddings initialization
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
elif isinstance(module, LongT5DenseActDense):
# Mesh TensorFlow FF initialization
# See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
# and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi, "bias") and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, LongT5DenseGatedActDense):
module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
module.wi_0.bias.data.zero_()
module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
module.wi_1.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)):
# Mesh TensorFlow attention initialization to avoid scaling before softmax
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
d_model = self.config.d_model
key_value_proj_dim = self.config.d_kv
n_heads = self.config.num_heads
module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
if module.has_relative_attention_bias:
module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
if isinstance(module, LongT5TransientGlobalAttention):
module.global_relative_attention_bias.weight.data.normal_(
mean=0.0, std=factor * ((d_model) ** -0.5)
)
# Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing with T5->LongT5
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (LongT5Attention, LongT5Stack)):
module.gradient_checkpointing = value
# Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert decoder_start_token_id is not None, (
"self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually set to the"
" pad_token_id. See LongT5 docs for more information"
)
# shift inputs to the right
if is_torch_fx_proxy(input_ids):
# Item assignment is not supported natively for proxies.
shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
else:
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values"
return shifted_input_ids
class LongT5Stack(LongT5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.local_radius = config.local_radius
self.block_len = self.local_radius + 1
self.block = nn.ModuleList(
[LongT5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
)
self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
# Initialize weights and apply final processing
self.post_init()
self.gradient_checkpointing = False
# Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings
def get_input_embeddings(self):
return self.embed_tokens
# Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds")
if inputs_embeds is None:
assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
# required mask seq length can be calculated via length of past
mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
if use_cache is True:
assert self.is_decoder, f"`use_cache` can only be set to `True` if {self} is used as a decoder"
if attention_mask is None:
attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(
batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
)
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.block)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
# We use local attention in encoder self-attention, otherwise standard self & cross attentions are used
if self.is_decoder:
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_shape, inputs_embeds.device
)
elif self.config.encoder_attention_type == "local":
extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device)
else: # we need to use both local attention mask and standard extended mask for transient-global attention
extended_attention_mask = attention_mask
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
present_key_value_states = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_cross_attentions = () if (output_attentions and self.is_decoder) else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):
layer_head_mask = head_mask[i]
cross_attn_layer_head_mask = cross_attn_head_mask[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
if use_cache:
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return tuple(module(*inputs, use_cache, output_attentions))
return custom_forward
layer_outputs = checkpoint(
create_custom_forward(layer_module),
hidden_states,
extended_attention_mask,
position_bias,
encoder_hidden_states,
encoder_extended_attention_mask,
encoder_decoder_position_bias,
layer_head_mask,
cross_attn_layer_head_mask,
None, # past_key_value is always None with gradient checkpointing
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
layer_head_mask=layer_head_mask,
cross_attn_layer_head_mask=cross_attn_layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
if use_cache is False:
layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
hidden_states, present_key_value_state = layer_outputs[:2]
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
# (cross-attention position bias), (cross-attention weights)
position_bias = layer_outputs[2]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
# append next layer key value states
if use_cache:
present_key_value_states = present_key_value_states + (present_key_value_state,)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[3],)
if self.is_decoder:
all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
present_key_value_states,
all_hidden_states,
all_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=present_key_value_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
cross_attentions=all_cross_attentions,
)
LONGT5_START_DOCSTRING = r"""
The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long
Sequences](https://arxiv.org/abs/2112.07916) by <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. It's an encoder-decoder transformer pre-trained in a text-to-text denoising
generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different
efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`LongT5Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
LONGT5_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
you should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
Training](./longt5#training).
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5
Training](./longt5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
the output of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
of `inputs_embeds`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
LONGT5_ENCODER_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
you should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
Training](./longt5#training).
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
# Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
__HEAD_MASK_WARNING_MSG = """
The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,
`decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions.
If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,
num_heads)`.
"""
@add_start_docstrings(
"The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top.",
LONGT5_START_DOCSTRING,
)
class LongT5Model(LongT5PreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"encoder.embed_tokens.weight",
r"decoder.embed_tokens.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight",
]
def __init__(self, config: LongT5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = LongT5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = LongT5Stack(decoder_config, self.shared)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
decoder_head_mask: Optional[torch.FloatTensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
r"""
Returns:
Example:
```python
>>> from transformers import T5Tokenizer, LongT5Model
>>> tokenizer = T5Tokenizer.from_pretrained("google/long-t5-local-base")
>>> model = LongT5Model.from_pretrained("google/long-t5-local-base")
>>> # Let's try a very long encoder input.
>>> input_ids = tokenizer(
... 100 * "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> # forward pass
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""LONGT5 Model with a `language modeling` head on top.""", LONGT5_START_DOCSTRING)
class LongT5ForConditionalGeneration(LongT5PreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"encoder.embed_tokens.weight",
r"decoder.embed_tokens.weight",
r"lm_head.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight",
]
def __init__(self, config: LongT5Config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = LongT5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = LongT5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
decoder_head_mask: Optional[torch.FloatTensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
Returns:
Examples:
```python
>>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps")
>>> model = LongT5ForConditionalGeneration.from_pretrained(
... "Stancld/longt5-tglobal-large-16384-pubmed-3k_steps"
... )
>>> # Let's try a very long input.
>>> input_ids = tokenizer(
... "summarize: " + 100 * "studies have shown that owning a dog is good for you ", return_tensors="pt"
... ).input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
>>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
abstractthe aim of this article is to summarize the studies have shown that owning a dog
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
if self.config.tie_word_embeddings:
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.model_dim**-0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
# TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"decoder_input_ids": input_ids,
"past_key_values": past,
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache,
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
def _reorder_cache(self, past, beam_idx):
# if decoder past is not included in output
# speedy decoding is disabled and no need to reorder
if past is None:
logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
return past
reordered_decoder_past = ()
for layer_past_states in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` is at 2nd position
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
# need to set correct `past` for each of the four key / value states
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
)
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
assert len(reordered_layer_past_states) == len(layer_past_states)
reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
return reordered_decoder_past
@add_start_docstrings(
"The bare LONGT5 Model transformer outputting encoder's raw hidden-states without any specific head on top.",
LONGT5_START_DOCSTRING,
)
class LongT5EncoderModel(LongT5PreTrainedModel):
authorized_missing_keys = [
r"encoder.embed_tokens.weight",
]
def __init__(self, config: LongT5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = LongT5Stack(encoder_config, self.shared)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]:
r"""
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base")
>>> model = LongT5EncoderModel.from_pretrained("google/long-t5-local-base")
>>> input_ids = tokenizer(
... 100 * "Studies have been shown that owning a dog is good for you ", return_tensors="pt"
... ).input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return encoder_outputs
|
[
"torch.nn.Dropout",
"torch.nn.Embedding",
"torch.empty",
"torch.cat",
"torch.nn.functional.dropout",
"torch.full",
"torch.arange",
"torch.nn.functional.pad",
"torch.ones",
"torch.nn.Linear",
"torch.zeros",
"math.log",
"torch.matmul",
"copy.deepcopy",
"torch.where",
"torch.nn.ModuleList",
"torch.zeros_like",
"torch.einsum",
"torch.floor",
"torch.rsqrt",
"torch.max",
"torch.all",
"torch.ones_like",
"torch.eq",
"torch.full_like",
"torch.nn.CrossEntropyLoss",
"torch.cumsum",
"warnings.warn",
"torch.abs",
"torch.tensor",
"torch.logical_and"
] |
[((2298, 2361), 'torch.nn.functional.pad', 'nn.functional.pad', (['x'], {'pad': 'pad', 'mode': '"""constant"""', 'value': 'pad_value'}), "(x, pad=pad, mode='constant', value=pad_value)\n", (2315, 2361), False, 'from torch import nn\n'), ((3668, 3731), 'torch.nn.functional.pad', 'nn.functional.pad', (['x'], {'pad': 'pad', 'mode': '"""constant"""', 'value': 'pad_value'}), "(x, pad=pad, mode='constant', value=pad_value)\n", (3685, 3731), False, 'from torch import nn\n'), ((4192, 4232), 'torch.cat', 'torch.cat', (['blocks_list'], {'dim': 'sequence_dim'}), '(blocks_list, dim=sequence_dim)\n', (4201, 4232), False, 'import torch\n'), ((4395, 4441), 'torch.arange', 'torch.arange', (['(3 * block_len)'], {'dtype': 'torch.int32'}), '(3 * block_len, dtype=torch.int32)\n', (4407, 4441), False, 'import torch\n'), ((5149, 5203), 'torch.logical_and', 'torch.logical_and', (['local_attention_mask', 'locality_mask'], {}), '(local_attention_mask, locality_mask)\n', (5166, 5203), False, 'import torch\n'), ((5890, 5958), 'torch.logical_and', 'torch.logical_and', (['_blocked_attention_mask', '_3blocked_attention_mask'], {}), '(_blocked_attention_mask, _3blocked_attention_mask)\n', (5907, 5958), False, 'import torch\n'), ((7772, 7850), 'torch.tensor', 'torch.tensor', (['(-1)'], {'dtype': 'global_block_ids.dtype', 'device': 'global_block_ids.device'}), '(-1, dtype=global_block_ids.dtype, device=global_block_ids.device)\n', (7784, 7850), False, 'import torch\n'), ((7874, 7988), 'torch.where', 'torch.where', (['(global_block_ids > _global_block_ids_lower_bound)', 'global_block_ids', '_global_block_ids_lower_bound'], {}), '(global_block_ids > _global_block_ids_lower_bound,\n global_block_ids, _global_block_ids_lower_bound)\n', (7885, 7988), False, 'import torch\n'), ((8775, 8839), 'torch.where', 'torch.where', (['(global_segment_ids <= _sequence_block_ids_max)', '(1)', '(0)'], {}), '(global_segment_ids <= _sequence_block_ids_max, 1, 0)\n', (8786, 8839), False, 'import torch\n'), ((9280, 9333), 'torch.arange', 'torch.arange', (['global_seq_len'], {'device': 'block_ids.device'}), '(global_seq_len, device=block_ids.device)\n', (9292, 9333), False, 'import torch\n'), ((2166, 2203), 'torch.zeros', 'torch.zeros', (['new_shape'], {'dtype': 'x.dtype'}), '(new_shape, dtype=x.dtype)\n', (2177, 2203), False, 'import torch\n'), ((3078, 3135), 'torch.empty', 'torch.empty', (['output_shape'], {'dtype': 'x.dtype', 'device': 'x.device'}), '(output_shape, dtype=x.dtype, device=x.device)\n', (3089, 3135), False, 'import torch\n'), ((4975, 5007), 'torch.abs', 'torch.abs', (['relative_position_ids'], {}), '(relative_position_ids)\n', (4984, 5007), False, 'import torch\n'), ((7131, 7176), 'torch.logical_and', 'torch.logical_and', (['block_ends', '(block_ids >= 0)'], {}), '(block_ends, block_ids >= 0)\n', (7148, 7176), False, 'import torch\n'), ((7283, 7343), 'torch.where', 'torch.where', (['(block_ids < full_blocks)', 'block_ids', 'full_blocks'], {}), '(block_ids < full_blocks, block_ids, full_blocks)\n', (7294, 7343), False, 'import torch\n'), ((7393, 7454), 'torch.ones_like', 'torch.ones_like', (['attention_mask'], {'device': 'attention_mask.device'}), '(attention_mask, device=attention_mask.device)\n', (7408, 7454), False, 'import torch\n'), ((7498, 7536), 'torch.cumsum', 'torch.cumsum', (['fixed_block_mask'], {'axis': '(1)'}), '(fixed_block_mask, axis=1)\n', (7510, 7536), False, 'import torch\n'), ((8482, 8575), 'torch.zeros', 'torch.zeros', (['batch_size', '(0)'], {'dtype': 'global_block_ids.dtype', 'device': 'global_block_ids.device'}), '(batch_size, 0, dtype=global_block_ids.dtype, device=\n global_block_ids.device)\n', (8493, 8575), False, 'import torch\n'), ((9766, 9842), 'torch.tensor', 'torch.tensor', (['global_seq_len'], {'dtype': 'block_ids.dtype', 'device': 'block_ids.device'}), '(global_seq_len, dtype=block_ids.dtype, device=block_ids.device)\n', (9778, 9842), False, 'import torch\n'), ((11850, 11900), 'torch.nn.Linear', 'nn.Linear', (['config.d_model', 'config.d_ff'], {'bias': '(False)'}), '(config.d_model, config.d_ff, bias=False)\n', (11859, 11900), False, 'from torch import nn\n'), ((11919, 11969), 'torch.nn.Linear', 'nn.Linear', (['config.d_ff', 'config.d_model'], {'bias': '(False)'}), '(config.d_ff, config.d_model, bias=False)\n', (11928, 11969), False, 'from torch import nn\n'), ((11993, 12024), 'torch.nn.Dropout', 'nn.Dropout', (['config.dropout_rate'], {}), '(config.dropout_rate)\n', (12003, 12024), False, 'from torch import nn\n'), ((12558, 12608), 'torch.nn.Linear', 'nn.Linear', (['config.d_model', 'config.d_ff'], {'bias': '(False)'}), '(config.d_model, config.d_ff, bias=False)\n', (12567, 12608), False, 'from torch import nn\n'), ((12629, 12679), 'torch.nn.Linear', 'nn.Linear', (['config.d_model', 'config.d_ff'], {'bias': '(False)'}), '(config.d_model, config.d_ff, bias=False)\n', (12638, 12679), False, 'from torch import nn\n'), ((12698, 12748), 'torch.nn.Linear', 'nn.Linear', (['config.d_ff', 'config.d_model'], {'bias': '(False)'}), '(config.d_ff, config.d_model, bias=False)\n', (12707, 12748), False, 'from torch import nn\n'), ((12772, 12803), 'torch.nn.Dropout', 'nn.Dropout', (['config.dropout_rate'], {}), '(config.dropout_rate)\n', (12782, 12803), False, 'from torch import nn\n'), ((13646, 13677), 'torch.nn.Dropout', 'nn.Dropout', (['config.dropout_rate'], {}), '(config.dropout_rate)\n', (13656, 13677), False, 'from torch import nn\n'), ((14768, 14819), 'torch.nn.Linear', 'nn.Linear', (['self.d_model', 'self.inner_dim'], {'bias': '(False)'}), '(self.d_model, self.inner_dim, bias=False)\n', (14777, 14819), False, 'from torch import nn\n'), ((14837, 14888), 'torch.nn.Linear', 'nn.Linear', (['self.d_model', 'self.inner_dim'], {'bias': '(False)'}), '(self.d_model, self.inner_dim, bias=False)\n', (14846, 14888), False, 'from torch import nn\n'), ((14906, 14957), 'torch.nn.Linear', 'nn.Linear', (['self.d_model', 'self.inner_dim'], {'bias': '(False)'}), '(self.d_model, self.inner_dim, bias=False)\n', (14915, 14957), False, 'from torch import nn\n'), ((14975, 15026), 'torch.nn.Linear', 'nn.Linear', (['self.inner_dim', 'self.d_model'], {'bias': '(False)'}), '(self.inner_dim, self.d_model, bias=False)\n', (14984, 15026), False, 'from torch import nn\n'), ((18381, 18449), 'torch.where', 'torch.where', (['is_small', 'relative_position', 'relative_position_if_large'], {}), '(is_small, relative_position, relative_position_if_large)\n', (18392, 18449), False, 'import torch\n'), ((23759, 23834), 'torch.nn.functional.dropout', 'nn.functional.dropout', (['attn_weights'], {'p': 'self.dropout', 'training': 'self.training'}), '(attn_weights, p=self.dropout, training=self.training)\n', (23780, 23834), False, 'from torch import nn\n'), ((25345, 25396), 'torch.nn.Linear', 'nn.Linear', (['self.d_model', 'self.inner_dim'], {'bias': '(False)'}), '(self.d_model, self.inner_dim, bias=False)\n', (25354, 25396), False, 'from torch import nn\n'), ((25414, 25465), 'torch.nn.Linear', 'nn.Linear', (['self.d_model', 'self.inner_dim'], {'bias': '(False)'}), '(self.d_model, self.inner_dim, bias=False)\n', (25423, 25465), False, 'from torch import nn\n'), ((25483, 25534), 'torch.nn.Linear', 'nn.Linear', (['self.d_model', 'self.inner_dim'], {'bias': '(False)'}), '(self.d_model, self.inner_dim, bias=False)\n', (25492, 25534), False, 'from torch import nn\n'), ((25552, 25603), 'torch.nn.Linear', 'nn.Linear', (['self.inner_dim', 'self.d_model'], {'bias': '(False)'}), '(self.inner_dim, self.d_model, bias=False)\n', (25561, 25603), False, 'from torch import nn\n'), ((29126, 29194), 'torch.where', 'torch.where', (['is_small', 'relative_position', 'relative_position_if_large'], {}), '(is_small, relative_position, relative_position_if_large)\n', (29137, 29194), False, 'import torch\n'), ((29353, 29457), 'torch.arange', 'torch.arange', (['(3 * block_length)'], {'dtype': 'torch.long', 'device': 'self.relative_attention_bias.weight.device'}), '(3 * block_length, dtype=torch.long, device=self.\n relative_attention_bias.weight.device)\n', (29365, 29457), False, 'import torch\n'), ((31669, 31732), 'torch.einsum', 'torch.einsum', (['"""...qhd,...khd->...hqk"""', 'query_states', 'key_states'], {}), "('...qhd,...khd->...hqk', query_states, key_states)\n", (31681, 31732), False, 'import torch\n'), ((32985, 33060), 'torch.nn.functional.dropout', 'nn.functional.dropout', (['attn_weights'], {'p': 'self.dropout', 'training': 'self.training'}), '(attn_weights, p=self.dropout, training=self.training)\n', (33006, 33060), False, 'from torch import nn\n'), ((34606, 34657), 'torch.nn.Linear', 'nn.Linear', (['self.d_model', 'self.inner_dim'], {'bias': '(False)'}), '(self.d_model, self.inner_dim, bias=False)\n', (34615, 34657), False, 'from torch import nn\n'), ((34675, 34726), 'torch.nn.Linear', 'nn.Linear', (['self.d_model', 'self.inner_dim'], {'bias': '(False)'}), '(self.d_model, self.inner_dim, bias=False)\n', (34684, 34726), False, 'from torch import nn\n'), ((34744, 34795), 'torch.nn.Linear', 'nn.Linear', (['self.d_model', 'self.inner_dim'], {'bias': '(False)'}), '(self.d_model, self.inner_dim, bias=False)\n', (34753, 34795), False, 'from torch import nn\n'), ((34813, 34864), 'torch.nn.Linear', 'nn.Linear', (['self.inner_dim', 'self.d_model'], {'bias': '(False)'}), '(self.inner_dim, self.d_model, bias=False)\n', (34822, 34864), False, 'from torch import nn\n'), ((38718, 38786), 'torch.where', 'torch.where', (['is_small', 'relative_position', 'relative_position_if_large'], {}), '(is_small, relative_position, relative_position_if_large)\n', (38729, 38786), False, 'import torch\n'), ((38945, 39049), 'torch.arange', 'torch.arange', (['(3 * block_length)'], {'dtype': 'torch.long', 'device': 'self.relative_attention_bias.weight.device'}), '(3 * block_length, dtype=torch.long, device=self.\n relative_attention_bias.weight.device)\n', (38957, 39049), False, 'import torch\n'), ((40144, 40201), 'torch.where', 'torch.where', (['(side_attention_mask > 0)', '(0.0)', '(-10000000000.0)'], {}), '(side_attention_mask > 0, 0.0, -10000000000.0)\n', (40155, 40201), False, 'import torch\n'), ((43907, 43954), 'torch.cat', 'torch.cat', (['[key_states, side_key_states]'], {'dim': '(2)'}), '([key_states, side_key_states], dim=2)\n', (43916, 43954), False, 'import torch\n'), ((43978, 44029), 'torch.cat', 'torch.cat', (['[value_states, side_value_states]'], {'dim': '(2)'}), '([value_states, side_value_states], dim=2)\n', (43987, 44029), False, 'import torch\n'), ((44152, 44215), 'torch.einsum', 'torch.einsum', (['"""...qhd,...khd->...hqk"""', 'query_states', 'key_states'], {}), "('...qhd,...khd->...hqk', query_states, key_states)\n", (44164, 44215), False, 'import torch\n'), ((46533, 46608), 'torch.nn.functional.dropout', 'nn.functional.dropout', (['attn_weights'], {'p': 'self.dropout', 'training': 'self.training'}), '(attn_weights, p=self.dropout, training=self.training)\n', (46554, 46608), False, 'from torch import nn\n'), ((47666, 47697), 'torch.nn.Dropout', 'nn.Dropout', (['config.dropout_rate'], {}), '(config.dropout_rate)\n', (47676, 47697), False, 'from torch import nn\n'), ((48946, 48977), 'torch.nn.Dropout', 'nn.Dropout', (['config.dropout_rate'], {}), '(config.dropout_rate)\n', (48956, 48977), False, 'from torch import nn\n'), ((50236, 50267), 'torch.nn.Dropout', 'nn.Dropout', (['config.dropout_rate'], {}), '(config.dropout_rate)\n', (50246, 50267), False, 'from torch import nn\n'), ((51443, 51474), 'torch.nn.Dropout', 'nn.Dropout', (['config.dropout_rate'], {}), '(config.dropout_rate)\n', (51453, 51474), False, 'from torch import nn\n'), ((53200, 53215), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (53213, 53215), False, 'from torch import nn\n'), ((57599, 57625), 'torch.tensor', 'torch.tensor', (['DUMMY_INPUTS'], {}), '(DUMMY_INPUTS)\n', (57611, 57625), False, 'import torch\n'), ((57647, 57671), 'torch.tensor', 'torch.tensor', (['DUMMY_MASK'], {}), '(DUMMY_MASK)\n', (57659, 57671), False, 'import torch\n'), ((63554, 63585), 'torch.nn.Dropout', 'nn.Dropout', (['config.dropout_rate'], {}), '(config.dropout_rate)\n', (63564, 63585), False, 'from torch import nn\n'), ((83829, 83876), 'torch.nn.Embedding', 'nn.Embedding', (['config.vocab_size', 'config.d_model'], {}), '(config.vocab_size, config.d_model)\n', (83841, 83876), False, 'from torch import nn\n'), ((83903, 83924), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (83916, 83924), False, 'import copy\n'), ((84148, 84169), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (84161, 84169), False, 'import copy\n'), ((90411, 90458), 'torch.nn.Embedding', 'nn.Embedding', (['config.vocab_size', 'config.d_model'], {}), '(config.vocab_size, config.d_model)\n', (90423, 90458), False, 'from torch import nn\n'), ((90485, 90506), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (90498, 90506), False, 'import copy\n'), ((90730, 90751), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (90743, 90751), False, 'import copy\n'), ((90993, 91049), 'torch.nn.Linear', 'nn.Linear', (['config.d_model', 'config.vocab_size'], {'bias': '(False)'}), '(config.d_model, config.vocab_size, bias=False)\n', (91002, 91049), False, 'from torch import nn\n'), ((100372, 100419), 'torch.nn.Embedding', 'nn.Embedding', (['config.vocab_size', 'config.d_model'], {}), '(config.vocab_size, config.d_model)\n', (100384, 100419), False, 'from torch import nn\n'), ((100446, 100467), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (100459, 100467), False, 'import copy\n'), ((7567, 7615), 'torch.where', 'torch.where', (['(attention_mask != 0.0)', '(1.0)', '(-1000.0)'], {}), '(attention_mask != 0.0, 1.0, -1000.0)\n', (7578, 7615), False, 'import torch\n'), ((7666, 7708), 'torch.floor', 'torch.floor', (['(mask + fixed_block_mask - 1.0)'], {}), '(mask + fixed_block_mask - 1.0)\n', (7677, 7708), False, 'import torch\n'), ((8631, 8666), 'torch.ones', 'torch.ones', (['batch_size', 'num_globals'], {}), '(batch_size, num_globals)\n', (8641, 8666), False, 'import torch\n'), ((10401, 10424), 'torch.ones', 'torch.ones', (['hidden_size'], {}), '(hidden_size)\n', (10411, 10424), False, 'import torch\n'), ((10976, 11021), 'torch.rsqrt', 'torch.rsqrt', (['(variance + self.variance_epsilon)'], {}), '(variance + self.variance_epsilon)\n', (10987, 11021), False, 'import torch\n'), ((15116, 15179), 'torch.nn.Embedding', 'nn.Embedding', (['self.relative_attention_num_buckets', 'self.n_heads'], {}), '(self.relative_attention_num_buckets, self.n_heads)\n', (15128, 15179), False, 'from torch import nn\n'), ((17504, 17532), 'torch.abs', 'torch.abs', (['relative_position'], {}), '(relative_position)\n', (17513, 17532), False, 'import torch\n'), ((18281, 18341), 'torch.full_like', 'torch.full_like', (['relative_position_if_large', '(num_buckets - 1)'], {}), '(relative_position_if_large, num_buckets - 1)\n', (18296, 18341), False, 'import torch\n'), ((18720, 18779), 'torch.arange', 'torch.arange', (['query_length'], {'dtype': 'torch.long', 'device': 'device'}), '(query_length, dtype=torch.long, device=device)\n', (18732, 18779), False, 'import torch\n'), ((18815, 18872), 'torch.arange', 'torch.arange', (['key_length'], {'dtype': 'torch.long', 'device': 'device'}), '(key_length, dtype=torch.long, device=device)\n', (18827, 18872), False, 'import torch\n'), ((24071, 24111), 'torch.matmul', 'torch.matmul', (['attn_weights', 'value_states'], {}), '(attn_weights, value_states)\n', (24083, 24111), False, 'import torch\n'), ((25693, 25756), 'torch.nn.Embedding', 'nn.Embedding', (['self.relative_attention_num_buckets', 'self.n_heads'], {}), '(self.relative_attention_num_buckets, self.n_heads)\n', (25705, 25756), False, 'from torch import nn\n'), ((28249, 28277), 'torch.abs', 'torch.abs', (['relative_position'], {}), '(relative_position)\n', (28258, 28277), False, 'import torch\n'), ((29026, 29086), 'torch.full_like', 'torch.full_like', (['relative_position_if_large', '(num_buckets - 1)'], {}), '(relative_position_if_large, num_buckets - 1)\n', (29041, 29086), False, 'import torch\n'), ((33286, 33351), 'torch.einsum', 'torch.einsum', (['"""...hqk,...khd->...qhd"""', 'attn_weights', 'value_states'], {}), "('...hqk,...khd->...qhd', attn_weights, value_states)\n", (33298, 33351), False, 'import torch\n'), ((34954, 35017), 'torch.nn.Embedding', 'nn.Embedding', (['self.relative_attention_num_buckets', 'self.n_heads'], {}), '(self.relative_attention_num_buckets, self.n_heads)\n', (34966, 35017), False, 'from torch import nn\n'), ((35261, 35324), 'torch.nn.Embedding', 'nn.Embedding', (['self.relative_attention_num_buckets', 'self.n_heads'], {}), '(self.relative_attention_num_buckets, self.n_heads)\n', (35273, 35324), False, 'from torch import nn\n'), ((37841, 37869), 'torch.abs', 'torch.abs', (['relative_position'], {}), '(relative_position)\n', (37850, 37869), False, 'import torch\n'), ((38618, 38678), 'torch.full_like', 'torch.full_like', (['relative_position_if_large', '(num_buckets - 1)'], {}), '(relative_position_if_large, num_buckets - 1)\n', (38633, 38678), False, 'import torch\n'), ((40042, 40099), 'torch.eq', 'torch.eq', (['mask[..., None]', 'global_segment_ids[:, None, :]'], {}), '(mask[..., None], global_segment_ids[:, None, :])\n', (40050, 40099), False, 'import torch\n'), ((44553, 44611), 'torch.where', 'torch.where', (['(local_attention_mask > 0)', '(0.0)', '(-10000000000.0)'], {}), '(local_attention_mask > 0, 0.0, -10000000000.0)\n', (44564, 44611), False, 'import torch\n'), ((46250, 46304), 'torch.cat', 'torch.cat', (['[position_bias, side_position_bias]'], {'dim': '(-1)'}), '([position_bias, side_position_bias], dim=-1)\n', (46259, 46304), False, 'import torch\n'), ((46834, 46899), 'torch.einsum', 'torch.einsum', (['"""...hqk,...khd->...qhd"""', 'attn_weights', 'value_states'], {}), "('...hqk,...khd->...qhd', attn_weights, value_states)\n", (46846, 46899), False, 'import torch\n'), ((62206, 62269), 'torch.full', 'torch.full', (['(input_ids.shape[:-1] + (1,))', 'decoder_start_token_id'], {}), '(input_ids.shape[:-1] + (1,), decoder_start_token_id)\n', (62216, 62269), False, 'import torch\n'), ((62302, 62361), 'torch.cat', 'torch.cat', (['[shifted_input_ids, input_ids[..., :-1]]'], {'dim': '(-1)'}), '([shifted_input_ids, input_ids[..., :-1]], dim=-1)\n', (62311, 62361), False, 'import torch\n'), ((66521, 66614), 'torch.ones', 'torch.ones', (['batch_size', 'encoder_seq_length'], {'device': 'inputs_embeds.device', 'dtype': 'torch.long'}), '(batch_size, encoder_seq_length, device=inputs_embeds.device,\n dtype=torch.long)\n', (66531, 66614), False, 'import torch\n'), ((96901, 96936), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {'ignore_index': '(-100)'}), '(ignore_index=-100)\n', (96917, 96936), False, 'from torch.nn import CrossEntropyLoss\n'), ((6984, 7005), 'torch.arange', 'torch.arange', (['seq_len'], {}), '(seq_len)\n', (6996, 7005), False, 'import torch\n'), ((22795, 22901), 'torch.zeros', 'torch.zeros', (['(1, self.n_heads, real_seq_length, key_length)'], {'device': 'scores.device', 'dtype': 'scores.dtype'}), '((1, self.n_heads, real_seq_length, key_length), device=scores.\n device, dtype=scores.dtype)\n', (22806, 22901), False, 'import torch\n'), ((32016, 32131), 'torch.zeros', 'torch.zeros', (['(1, 1, self.n_heads, self.block_len, 3 * self.block_len)'], {'device': 'scores.device', 'dtype': 'scores.dtype'}), '((1, 1, self.n_heads, self.block_len, 3 * self.block_len),\n device=scores.device, dtype=scores.dtype)\n', (32027, 32131), False, 'import torch\n'), ((32525, 32567), 'torch.where', 'torch.where', (['(mask > 0)', '(0.0)', '(-10000000000.0)'], {}), '(mask > 0, 0.0, -10000000000.0)\n', (32536, 32567), False, 'import torch\n'), ((41902, 41938), 'torch.ones', 'torch.ones', (['hidden_states.shape[:-1]'], {}), '(hidden_states.shape[:-1])\n', (41912, 41938), False, 'import torch\n'), ((44856, 44971), 'torch.zeros', 'torch.zeros', (['(1, 1, self.n_heads, self.block_len, 3 * self.block_len)'], {'device': 'scores.device', 'dtype': 'scores.dtype'}), '((1, 1, self.n_heads, self.block_len, 3 * self.block_len),\n device=scores.device, dtype=scores.dtype)\n', (44867, 44971), False, 'import torch\n'), ((45671, 45705), 'torch.ones', 'torch.ones', (['batch_size', 'seq_length'], {}), '(batch_size, seq_length)\n', (45681, 45705), False, 'import torch\n'), ((62834, 62867), 'torch.all', 'torch.all', (['(shifted_input_ids >= 0)'], {}), '(shifted_input_ids >= 0)\n', (62843, 62867), False, 'import torch\n'), ((68129, 68190), 'torch.ones', 'torch.ones', (['encoder_hidden_shape'], {'device': 'inputs_embeds.device'}), '(encoder_hidden_shape, device=inputs_embeds.device)\n', (68139, 68190), False, 'import torch\n'), ((87566, 87619), 'warnings.warn', 'warnings.warn', (['__HEAD_MASK_WARNING_MSG', 'FutureWarning'], {}), '(__HEAD_MASK_WARNING_MSG, FutureWarning)\n', (87579, 87619), False, 'import warnings\n'), ((94515, 94568), 'warnings.warn', 'warnings.warn', (['__HEAD_MASK_WARNING_MSG', 'FutureWarning'], {}), '(__HEAD_MASK_WARNING_MSG, FutureWarning)\n', (94528, 94568), False, 'import warnings\n'), ((17609, 17644), 'torch.zeros_like', 'torch.zeros_like', (['relative_position'], {}), '(relative_position)\n', (17625, 17644), False, 'import torch\n'), ((21797, 21846), 'torch.cat', 'torch.cat', (['[past_key_value, hidden_states]'], {'dim': '(2)'}), '([past_key_value, hidden_states], dim=2)\n', (21806, 21846), False, 'import torch\n'), ((28354, 28389), 'torch.zeros_like', 'torch.zeros_like', (['relative_position'], {}), '(relative_position)\n', (28370, 28389), False, 'import torch\n'), ((37946, 37981), 'torch.zeros_like', 'torch.zeros_like', (['relative_position'], {}), '(relative_position)\n', (37962, 37981), False, 'import torch\n'), ((66254, 66293), 'torch.ones', 'torch.ones', (['batch_size', 'mask_seq_length'], {}), '(batch_size, mask_seq_length)\n', (66264, 66293), False, 'import torch\n'), ((8356, 8391), 'torch.max', 'torch.max', (['global_block_ids'], {'dim': '(-1)'}), '(global_block_ids, dim=-1)\n', (8365, 8391), False, 'import torch\n'), ((18093, 18127), 'math.log', 'math.log', (['(max_distance / max_exact)'], {}), '(max_distance / max_exact)\n', (18101, 18127), False, 'import math\n'), ((28838, 28872), 'math.log', 'math.log', (['(max_distance / max_exact)'], {}), '(max_distance / max_exact)\n', (28846, 28872), False, 'import math\n'), ((38430, 38464), 'math.log', 'math.log', (['(max_distance / max_exact)'], {}), '(max_distance / max_exact)\n', (38438, 38464), False, 'import math\n')]
|
"""
demo05_gridsearch.py 网格搜索
"""
import numpy as np
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
import matplotlib.pyplot as mp
data = np.loadtxt('../ml_data/multiple2.txt',
delimiter=',', dtype='f8')
x = data[:, :-1]
y = data[:, -1]
# 选择svm做分类
train_x, test_x, train_y, test_y = \
ms.train_test_split(x, y, test_size=0.25,
random_state=5)
model = svm.SVC(probability=True)
# 根据网格搜索选择最优模型
params = [{'kernel':['linear'],'C':[1, 10, 100, 1000]},
{'kernel':['poly'], 'C':[1], 'degree':[2, 3]},
{'kernel':['rbf'], 'C':[1,10,100,1000],
'gamma':[1, 0.1, 0.01, 0.001]}]
model = ms.GridSearchCV(model, params, cv=5)
model.fit(train_x, train_y)
print(model.best_params_)
print(model.best_score_)
print(model.best_estimator_)
# 输出每个超参数组合信息及其得分
for param, score in zip(
model.cv_results_['params'],
model.cv_results_['mean_test_score']):
print(param, '->', score)
pred_test_y = model.predict(test_x)
print(sm.classification_report(test_y, pred_test_y))
# 新增样本
prob_x = np.array([
[2, 1.5],
[8, 9],
[4.8, 5.2],
[4, 4],
[2.5, 7],
[7.6, 2],
[5.4, 5.9]])
pred_prob_y = model.predict(prob_x)
probs = model.predict_proba(prob_x)
print(probs)
# 绘制分类边界线
n = 500
l, r = x[:, 0].min() - 1, x[:, 0].max() + 1
b, t = x[:, 1].min() - 1, x[:, 1].max() + 1
grid_x = np.meshgrid(np.linspace(l, r, n),
np.linspace(b, t, n))
flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel()))
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)
mp.figure('Probability', facecolor='lightgray')
mp.title('Probability', fontsize=20)
mp.xlabel('x', fontsize=14)
mp.ylabel('y', fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y,
cmap='gray')
mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80)
mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y,
cmap='jet_r', s=80, marker='D')
for i in range(len(probs)):
mp.annotate(
'{}% {}%'.format(
round(probs[i, 0] * 100, 2),
round(probs[i, 1] * 100, 2)),
xy=(prob_x[i, 0], prob_x[i, 1]),
xytext=(12, -12),
textcoords='offset points',
horizontalalignment='left',
verticalalignment='top',
fontsize=9,
bbox={'boxstyle': 'round,pad=0.6',
'fc': 'orange', 'alpha': 0.8})
mp.show()
|
[
"matplotlib.pyplot.title",
"sklearn.model_selection.GridSearchCV",
"matplotlib.pyplot.show",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.scatter",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.loadtxt",
"matplotlib.pyplot.pcolormesh",
"sklearn.svm.SVC",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel",
"numpy.linspace",
"matplotlib.pyplot.xlabel"
] |
[((185, 250), 'numpy.loadtxt', 'np.loadtxt', (['"""../ml_data/multiple2.txt"""'], {'delimiter': '""","""', 'dtype': '"""f8"""'}), "('../ml_data/multiple2.txt', delimiter=',', dtype='f8')\n", (195, 250), True, 'import numpy as np\n'), ((338, 395), 'sklearn.model_selection.train_test_split', 'ms.train_test_split', (['x', 'y'], {'test_size': '(0.25)', 'random_state': '(5)'}), '(x, y, test_size=0.25, random_state=5)\n', (357, 395), True, 'import sklearn.model_selection as ms\n'), ((410, 435), 'sklearn.svm.SVC', 'svm.SVC', ([], {'probability': '(True)'}), '(probability=True)\n', (417, 435), True, 'import sklearn.svm as svm\n'), ((649, 685), 'sklearn.model_selection.GridSearchCV', 'ms.GridSearchCV', (['model', 'params'], {'cv': '(5)'}), '(model, params, cv=5)\n', (664, 685), True, 'import sklearn.model_selection as ms\n'), ((1045, 1130), 'numpy.array', 'np.array', (['[[2, 1.5], [8, 9], [4.8, 5.2], [4, 4], [2.5, 7], [7.6, 2], [5.4, 5.9]]'], {}), '([[2, 1.5], [8, 9], [4.8, 5.2], [4, 4], [2.5, 7], [7.6, 2], [5.4, 5.9]]\n )\n', (1053, 1130), True, 'import numpy as np\n'), ((1576, 1623), 'matplotlib.pyplot.figure', 'mp.figure', (['"""Probability"""'], {'facecolor': '"""lightgray"""'}), "('Probability', facecolor='lightgray')\n", (1585, 1623), True, 'import matplotlib.pyplot as mp\n'), ((1624, 1660), 'matplotlib.pyplot.title', 'mp.title', (['"""Probability"""'], {'fontsize': '(20)'}), "('Probability', fontsize=20)\n", (1632, 1660), True, 'import matplotlib.pyplot as mp\n'), ((1661, 1688), 'matplotlib.pyplot.xlabel', 'mp.xlabel', (['"""x"""'], {'fontsize': '(14)'}), "('x', fontsize=14)\n", (1670, 1688), True, 'import matplotlib.pyplot as mp\n'), ((1689, 1716), 'matplotlib.pyplot.ylabel', 'mp.ylabel', (['"""y"""'], {'fontsize': '(14)'}), "('y', fontsize=14)\n", (1698, 1716), True, 'import matplotlib.pyplot as mp\n'), ((1717, 1745), 'matplotlib.pyplot.tick_params', 'mp.tick_params', ([], {'labelsize': '(10)'}), '(labelsize=10)\n', (1731, 1745), True, 'import matplotlib.pyplot as mp\n'), ((1746, 1802), 'matplotlib.pyplot.pcolormesh', 'mp.pcolormesh', (['grid_x[0]', 'grid_x[1]', 'grid_y'], {'cmap': '"""gray"""'}), "(grid_x[0], grid_x[1], grid_y, cmap='gray')\n", (1759, 1802), True, 'import matplotlib.pyplot as mp\n'), ((1804, 1870), 'matplotlib.pyplot.scatter', 'mp.scatter', (['test_x[:, 0]', 'test_x[:, 1]'], {'c': 'test_y', 'cmap': '"""brg"""', 's': '(80)'}), "(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80)\n", (1814, 1870), True, 'import matplotlib.pyplot as mp\n'), ((1871, 1960), 'matplotlib.pyplot.scatter', 'mp.scatter', (['prob_x[:, 0]', 'prob_x[:, 1]'], {'c': 'pred_prob_y', 'cmap': '"""jet_r"""', 's': '(80)', 'marker': '"""D"""'}), "(prob_x[:, 0], prob_x[:, 1], c=pred_prob_y, cmap='jet_r', s=80,\n marker='D')\n", (1881, 1960), True, 'import matplotlib.pyplot as mp\n'), ((2390, 2399), 'matplotlib.pyplot.show', 'mp.show', ([], {}), '()\n', (2397, 2399), True, 'import matplotlib.pyplot as mp\n'), ((981, 1026), 'sklearn.metrics.classification_report', 'sm.classification_report', (['test_y', 'pred_test_y'], {}), '(test_y, pred_test_y)\n', (1005, 1026), True, 'import sklearn.metrics as sm\n'), ((1369, 1389), 'numpy.linspace', 'np.linspace', (['l', 'r', 'n'], {}), '(l, r, n)\n', (1380, 1389), True, 'import numpy as np\n'), ((1412, 1432), 'numpy.linspace', 'np.linspace', (['b', 't', 'n'], {}), '(b, t, n)\n', (1423, 1432), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.