code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2013 Mag. <NAME>. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. <EMAIL>
# ****************************************************************************
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# Apply_All
#
# Purpose
# Class transparently applying method calls to a set of objects
#
# Revision Dates
# 20-Feb-2005 (CT) Creation
# ««revision-date»»···
#--
from _TFL import TFL
import _TFL._Meta.Object
class Apply_All (TFL.Meta.Object) :
"""Class transparently applying method calls to a set of objects.
>>> l1 = list (range (5))
>>> l2 = ["f", "b", "c", "a"]
>>> all = Apply_All (l1, l2)
>>> all._receivers
([0, 1, 2, 3, 4], ['f', 'b', 'c', 'a'])
>>> all.sort ()
>>> all._receivers
([0, 1, 2, 3, 4], ['a', 'b', 'c', 'f'])
>>> all.count ("a")
[0, 1]
>>> all.reverse ()
>>> all._receivers
([4, 3, 2, 1, 0], ['f', 'c', 'b', 'a'])
>>> all.pop ()
[0, 'a']
>>> all._receivers
([4, 3, 2, 1], ['f', 'c', 'b'])
"""
def __init__ (self, * receivers) :
self._receivers = receivers
# end def __init__
def _apply (self, name, * args, ** kw) :
result = []
for r in self._receivers :
f = getattr (r, name)
r = f (* args, ** kw)
if r is not None :
result.append (r)
return result or None
# end def _apply
def __getattr__ (self, name) :
return lambda * args, ** kw : self._apply (name, * args, ** kw)
# end def __getattr__
# end class Apply_All
if __name__ != "__main__" :
TFL._Export ("*")
### __END__ Apply_All
|
[
"_TFL.TFL._Export"
] |
[((1871, 1887), '_TFL.TFL._Export', 'TFL._Export', (['"""*"""'], {}), "('*')\n", (1882, 1887), False, 'from _TFL import TFL\n')]
|
from NJ_tree_analysis_functions import start_gui_explorer
# nov v omegaCen?
objs = [
140305003201095, 140305003201103, 140305003201185, 140307002601128, 140307002601147, 140311006101253,
140314005201008, 140608002501266, 150211004701104, 150428002601118, 150703002101192
]
# nov v NGC6774
objs = [
140707002601170, 140707002601363, 140806003501357, 151009001601071, 160522005601187, 170506006401032,
170506006401321, 170506006401334, 170506006401352, 170506006401367, 170506006401373, 170506006401374,
170506006401392, 170802004301085, 170906002601139, 170907002601241, 140708005301211,150703005601230,161013001601131,161109002601048,170506005401371,170506006401241,170506006401303,170907003101232,170907003101274,170910003101093,170506006401009, 170506006401032, 170506006401039, 170506006401063, 170506006401095, 170506006401189, 170506006401265, 170506006401281, 170506006401321, 170506006401331, 170506006401334, 170506006401345, 170506006401352, 170506006401367, 170506006401373, 170506006401374, 170506006401392
]
objs = [
140308001401117,140308001401346,151229004001035,151229004001161,160327002601047,160327002601054,160327002601078,160327002601137,160327002601145,160327002601160,160327002601181,160327002601229,160327002601258,160327002601299,160327002601314,160327002601391,170407002101038
]
objs = [str(o) for o in objs]
start_gui_explorer(objs,
manual=True, initial_only=False, loose=True,
kinematics_source='ucac5')
# start_gui_explorer(objs,
# manual=False, initial_only=False, loose=True,
# kinematics_source='ucac5')
|
[
"NJ_tree_analysis_functions.start_gui_explorer"
] |
[((1358, 1458), 'NJ_tree_analysis_functions.start_gui_explorer', 'start_gui_explorer', (['objs'], {'manual': '(True)', 'initial_only': '(False)', 'loose': '(True)', 'kinematics_source': '"""ucac5"""'}), "(objs, manual=True, initial_only=False, loose=True,\n kinematics_source='ucac5')\n", (1376, 1458), False, 'from NJ_tree_analysis_functions import start_gui_explorer\n')]
|
import os
import asyncio
import discord
from discord.ext import commands
# Try to get the bot token from file, quit if it fails
try:
with open('token') as file:
token = file.readline()
except IOError:
print("Missing token file containing the bot's token")
quit()
# Create config and cog folders if they don't exist
if not os.path.exists('./config/'):
os.makedirs('./config/')
if not os.path.exists('./cogs/'):
os.makedirs('./cogs/')
# Create bot object
bot = commands.Bot(command_prefix="mhh",
strip_after_prefix=True,
owner_id=289887222310764545,
intents=discord.Intents.all())
# Load all .py files from 'cogs' directory
for filename in os.listdir('./cogs'):
if (filename.endswith('.py')):
asyncio.run(bot.load_extension(f'cogs.{filename[:-3]}'))
@bot.event
async def on_ready():
# Set the bot presence status
await bot.change_presence(status=discord.Status.online)
# Print a bunch of info about the bot
print ("\n--------------------------------\n")
print ("Bot Name:", bot.user.name)
print ("Bot ID:", bot.user.id)
print ("discord.py version:", discord.__version__)
print ("\n--------------------------------\n")
bot.run(token)
|
[
"os.makedirs",
"os.path.exists",
"os.listdir",
"discord.Intents.all"
] |
[((743, 763), 'os.listdir', 'os.listdir', (['"""./cogs"""'], {}), "('./cogs')\n", (753, 763), False, 'import os\n'), ((345, 372), 'os.path.exists', 'os.path.exists', (['"""./config/"""'], {}), "('./config/')\n", (359, 372), False, 'import os\n'), ((378, 402), 'os.makedirs', 'os.makedirs', (['"""./config/"""'], {}), "('./config/')\n", (389, 402), False, 'import os\n'), ((410, 435), 'os.path.exists', 'os.path.exists', (['"""./cogs/"""'], {}), "('./cogs/')\n", (424, 435), False, 'import os\n'), ((441, 463), 'os.makedirs', 'os.makedirs', (['"""./cogs/"""'], {}), "('./cogs/')\n", (452, 463), False, 'import os\n'), ((660, 681), 'discord.Intents.all', 'discord.Intents.all', ([], {}), '()\n', (679, 681), False, 'import discord\n')]
|
import hazel
import glob
import os
def test_file_generators():
tmp = hazel.tools.File_observation(mode='single')
tmp.set_size(n_lambda=128, n_pixel=1)
tmp.save('test')
tmp = hazel.tools.File_observation(mode='multi')
tmp.set_size(n_lambda=128, n_pixel=10)
tmp.save('test2')
tmp = hazel.tools.File_photosphere(mode='single')
tmp.set_default(n_pixel=1)
tmp.save('photosphere')
tmp = hazel.tools.File_photosphere(mode='multi')
tmp.set_default(n_pixel=10)
tmp.save('photosphere2')
tmp = hazel.tools.File_chromosphere(mode='single')
tmp.set_default(n_pixel=1)
tmp.save('chromosphere')
tmp = hazel.tools.File_chromosphere(mode='multi')
tmp.set_default(n_pixel=10)
tmp.save('chromosphere2')
try:
for f in glob.glob('test*.*'):
os.remove(f)
except:
pass
try:
for f in glob.glob('photosphere*.*'):
os.remove(f)
except:
pass
try:
for f in glob.glob('chromosphere*.*'):
os.remove(f)
except:
pass
|
[
"hazel.tools.File_chromosphere",
"os.remove",
"hazel.tools.File_observation",
"glob.glob",
"hazel.tools.File_photosphere"
] |
[((75, 118), 'hazel.tools.File_observation', 'hazel.tools.File_observation', ([], {'mode': '"""single"""'}), "(mode='single')\n", (103, 118), False, 'import hazel\n'), ((193, 235), 'hazel.tools.File_observation', 'hazel.tools.File_observation', ([], {'mode': '"""multi"""'}), "(mode='multi')\n", (221, 235), False, 'import hazel\n'), ((312, 355), 'hazel.tools.File_photosphere', 'hazel.tools.File_photosphere', ([], {'mode': '"""single"""'}), "(mode='single')\n", (340, 355), False, 'import hazel\n'), ((426, 468), 'hazel.tools.File_photosphere', 'hazel.tools.File_photosphere', ([], {'mode': '"""multi"""'}), "(mode='multi')\n", (454, 468), False, 'import hazel\n'), ((541, 585), 'hazel.tools.File_chromosphere', 'hazel.tools.File_chromosphere', ([], {'mode': '"""single"""'}), "(mode='single')\n", (570, 585), False, 'import hazel\n'), ((657, 700), 'hazel.tools.File_chromosphere', 'hazel.tools.File_chromosphere', ([], {'mode': '"""multi"""'}), "(mode='multi')\n", (686, 700), False, 'import hazel\n'), ((790, 810), 'glob.glob', 'glob.glob', (['"""test*.*"""'], {}), "('test*.*')\n", (799, 810), False, 'import glob\n'), ((889, 916), 'glob.glob', 'glob.glob', (['"""photosphere*.*"""'], {}), "('photosphere*.*')\n", (898, 916), False, 'import glob\n'), ((995, 1023), 'glob.glob', 'glob.glob', (['"""chromosphere*.*"""'], {}), "('chromosphere*.*')\n", (1004, 1023), False, 'import glob\n'), ((824, 836), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (833, 836), False, 'import os\n'), ((930, 942), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (939, 942), False, 'import os\n'), ((1037, 1049), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (1046, 1049), False, 'import os\n')]
|
import osmnx as ox
import networkx as nx
ox.config(use_cache=True, log_console=False)
graph = ox.graph_from_address('953 Danby Rd, Ithaca, New York', network_type='walk')
fig, ax = ox.plot_graph(graph)
|
[
"osmnx.graph_from_address",
"osmnx.config",
"osmnx.plot_graph"
] |
[((42, 86), 'osmnx.config', 'ox.config', ([], {'use_cache': '(True)', 'log_console': '(False)'}), '(use_cache=True, log_console=False)\n', (51, 86), True, 'import osmnx as ox\n'), ((96, 172), 'osmnx.graph_from_address', 'ox.graph_from_address', (['"""953 Danby Rd, Ithaca, New York"""'], {'network_type': '"""walk"""'}), "('953 Danby Rd, Ithaca, New York', network_type='walk')\n", (117, 172), True, 'import osmnx as ox\n'), ((184, 204), 'osmnx.plot_graph', 'ox.plot_graph', (['graph'], {}), '(graph)\n', (197, 204), True, 'import osmnx as ox\n')]
|
import pyspark
import pyspark.sql.functions as f
from airtunnel import PySparkDataAsset, PySparkDataAssetIO
def rebuild_for_store(asset: PySparkDataAsset, airflow_context):
spark_session = pyspark.sql.SparkSession.builder.getOrCreate()
student = PySparkDataAsset(name="student_pyspark")
programme = PySparkDataAsset(name="programme_pyspark")
enrollment = PySparkDataAsset(name="enrollment_pyspark")
student_df = student.retrieve_from_store(
airflow_context=airflow_context,
consuming_asset=asset,
spark_session=spark_session,
)
programme_df = programme.retrieve_from_store(
airflow_context=airflow_context,
consuming_asset=asset,
spark_session=spark_session,
)
enrollment_df = enrollment.retrieve_from_store(
airflow_context=airflow_context,
consuming_asset=asset,
spark_session=spark_session,
)
enrollment_summary: pyspark.sql.DataFrame = enrollment_df.join(
other=student_df, on=student.declarations.key_columns
).join(other=programme_df, on=programme.declarations.key_columns)
enrollment_summary = (
enrollment_summary.select(["student_major", "programme_name", "student_id"])
.groupby(["student_major", "programme_name"])
.agg(f.count("*").alias("count"))
)
PySparkDataAssetIO.write_data_asset(asset=asset, data=enrollment_summary)
spark_session.stop()
|
[
"airtunnel.PySparkDataAsset",
"airtunnel.PySparkDataAssetIO.write_data_asset",
"pyspark.sql.functions.count",
"pyspark.sql.SparkSession.builder.getOrCreate"
] |
[((196, 242), 'pyspark.sql.SparkSession.builder.getOrCreate', 'pyspark.sql.SparkSession.builder.getOrCreate', ([], {}), '()\n', (240, 242), False, 'import pyspark\n'), ((258, 298), 'airtunnel.PySparkDataAsset', 'PySparkDataAsset', ([], {'name': '"""student_pyspark"""'}), "(name='student_pyspark')\n", (274, 298), False, 'from airtunnel import PySparkDataAsset, PySparkDataAssetIO\n'), ((315, 357), 'airtunnel.PySparkDataAsset', 'PySparkDataAsset', ([], {'name': '"""programme_pyspark"""'}), "(name='programme_pyspark')\n", (331, 357), False, 'from airtunnel import PySparkDataAsset, PySparkDataAssetIO\n'), ((375, 418), 'airtunnel.PySparkDataAsset', 'PySparkDataAsset', ([], {'name': '"""enrollment_pyspark"""'}), "(name='enrollment_pyspark')\n", (391, 418), False, 'from airtunnel import PySparkDataAsset, PySparkDataAssetIO\n'), ((1335, 1408), 'airtunnel.PySparkDataAssetIO.write_data_asset', 'PySparkDataAssetIO.write_data_asset', ([], {'asset': 'asset', 'data': 'enrollment_summary'}), '(asset=asset, data=enrollment_summary)\n', (1370, 1408), False, 'from airtunnel import PySparkDataAsset, PySparkDataAssetIO\n'), ((1295, 1307), 'pyspark.sql.functions.count', 'f.count', (['"""*"""'], {}), "('*')\n", (1302, 1307), True, 'import pyspark.sql.functions as f\n')]
|
import cv2
import numpy as np
import math
# Func to cal eucledian dist b/w 2 pts:
def euc_dst(x1, y1, x2, y2):
pt_a = (x1 - x2)**2
pt_b = (y1 - y2)**2
return math.sqrt(pt_a + pt_b)
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, dp=1,
minDist=10, param1=100, param2=50, minRadius=0, maxRadius=500)
if circles is not None:
circles = np.uint16(np.around(circles))
x_cord = []
y_cord = []
rad = []
# Converting parameters of circle (center coordinates:x,y & radius)
for pt in circles[0, :]:
x, y, r = pt[0], pt[1], pt[2]
# Storing centers & radius of all circles
x_cord.append(x)
y_cord.append(y)
rad.append(r)
# Drawing outer circle
cv2.circle(frame, (x, y), r, (0, 255, 0), 2)
# Drawing circle center
cv2.circle(frame, (x, y), 1, (0, 0, 255), 3)
if len(rad) > 1:
for i in range(0, len(rad)):
x1 = x_cord[i]
y1 = y_cord[i]
for j in range(i+1, len(rad)):
x2 = x_cord[j]
y2 = y_cord[j]
cv2.line(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
mid_x = (x1+x2)/2
mid_y = (y1+y2)/2
dist = euc_dst(x1/25, y1/25, x2/25, y2/25)
cv2.putText(frame, "{:.1f}cm".format(dist), (int(mid_x), int(
mid_y - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)
cv2.imshow('video', frame)
if cv2.waitKey(1) == 27: # esc Key
break
cap.release()
cv2.destroyAllWindows()
|
[
"cv2.line",
"cv2.HoughCircles",
"cv2.circle",
"math.sqrt",
"cv2.medianBlur",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"numpy.around",
"cv2.destroyAllWindows"
] |
[((217, 236), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (233, 236), False, 'import cv2\n'), ((1899, 1922), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1920, 1922), False, 'import cv2\n'), ((183, 205), 'math.sqrt', 'math.sqrt', (['(pt_a + pt_b)'], {}), '(pt_a + pt_b)\n', (192, 205), False, 'import math\n'), ((294, 333), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (306, 333), False, 'import cv2\n'), ((346, 369), 'cv2.medianBlur', 'cv2.medianBlur', (['gray', '(5)'], {}), '(gray, 5)\n', (360, 369), False, 'import cv2\n'), ((385, 500), 'cv2.HoughCircles', 'cv2.HoughCircles', (['gray', 'cv2.HOUGH_GRADIENT'], {'dp': '(1)', 'minDist': '(10)', 'param1': '(100)', 'param2': '(50)', 'minRadius': '(0)', 'maxRadius': '(500)'}), '(gray, cv2.HOUGH_GRADIENT, dp=1, minDist=10, param1=100,\n param2=50, minRadius=0, maxRadius=500)\n', (401, 500), False, 'import cv2\n'), ((1798, 1824), 'cv2.imshow', 'cv2.imshow', (['"""video"""', 'frame'], {}), "('video', frame)\n", (1808, 1824), False, 'import cv2\n'), ((1833, 1847), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1844, 1847), False, 'import cv2\n'), ((589, 607), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (598, 607), True, 'import numpy as np\n'), ((1014, 1058), 'cv2.circle', 'cv2.circle', (['frame', '(x, y)', 'r', '(0, 255, 0)', '(2)'], {}), '(frame, (x, y), r, (0, 255, 0), 2)\n', (1024, 1058), False, 'import cv2\n'), ((1111, 1155), 'cv2.circle', 'cv2.circle', (['frame', '(x, y)', '(1)', '(0, 0, 255)', '(3)'], {}), '(frame, (x, y), 1, (0, 0, 255), 3)\n', (1121, 1155), False, 'import cv2\n'), ((1429, 1480), 'cv2.line', 'cv2.line', (['frame', '(x1, y1)', '(x2, y2)', '(0, 255, 0)', '(2)'], {}), '(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)\n', (1437, 1480), False, 'import cv2\n')]
|
from scripts.utils.helpful_scripts import get_account, LOCAL_BLOCKCHAIN_ENVIRONMENTS
from scripts.simple_collectible.deploy_and_create import deploy_and_create
from brownie import network
import pytest
def network_checker():
if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:
pytest.skip()
def test_can_create__simple_collectible():
network_checker()
simple_collectible = deploy_and_create()
assert simple_collectible.ownerOf(0) == get_account()
|
[
"scripts.utils.helpful_scripts.get_account",
"scripts.simple_collectible.deploy_and_create.deploy_and_create",
"pytest.skip",
"brownie.network.show_active"
] |
[((408, 427), 'scripts.simple_collectible.deploy_and_create.deploy_and_create', 'deploy_and_create', ([], {}), '()\n', (425, 427), False, 'from scripts.simple_collectible.deploy_and_create import deploy_and_create\n'), ((234, 255), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (253, 255), False, 'from brownie import network\n'), ((302, 315), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (313, 315), False, 'import pytest\n'), ((472, 485), 'scripts.utils.helpful_scripts.get_account', 'get_account', ([], {}), '()\n', (483, 485), False, 'from scripts.utils.helpful_scripts import get_account, LOCAL_BLOCKCHAIN_ENVIRONMENTS\n')]
|
#!/usr/bin/env python
def simple():
from TestComponents import ComplexFacility
return ComplexFacility()
# End of file
|
[
"TestComponents.ComplexFacility"
] |
[((96, 113), 'TestComponents.ComplexFacility', 'ComplexFacility', ([], {}), '()\n', (111, 113), False, 'from TestComponents import ComplexFacility\n')]
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
Created on : Mon Jun 4 23:17:56 2018
@author : Sourabh
"""
# %%
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
import matplotlib.pyplot as plt
# ============================================================================ #
np.set_printoptions(threshold=np.nan)
# constant properties that need changes according to the actual problem
Data_File = 'Position_Salaries.csv'
Dependent_Variable_Column = 2
Test_Data_Size = 0.2
# import the dataset & extract the feature and the dependent variable vectors
dataset = pd.read_csv(Data_File)
X = dataset.iloc[:, 1:Dependent_Variable_Column].values
y = dataset.iloc[:, Dependent_Variable_Column].values
# feature scaling: SVR does not support it automatically, we need to do it here
sc_X = StandardScaler()
sc_y = StandardScaler()
X_scaled = sc_X.fit_transform(X.reshape(-1, 1))
y_scaled = sc_y.fit_transform(y.reshape(-1, 1))
# ============================================================================ #
# creating and fitting the SVR model to the dataset
# as we know that our training data set is not linear, we should not use linear
# kernel here, it's better we use any of Polynomial or Gaussian kernel.
regressor = SVR(kernel='rbf')
regressor.fit(X_scaled, y_scaled)
# predicting a new result with SVR model
# the sample should also be a 1 x m matrix with m feature values
sampleValue = np.array([[6.5]])
y_pred = sc_y.inverse_transform(
regressor.predict(
sc_X.transform(sampleValue)
)
)
# ============================================================================ #
# visualising the SVR results
stepSize = 0.1
X_grid = np.arange(start=min(X), stop=max(X)+stepSize, step=stepSize)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color='red', marker='o', label='Samples')
plt.plot(X_grid,
sc_y.inverse_transform(regressor.predict(sc_X.transform(X_grid))),
color='blue',
label='SVR Model')
plt.title('Truth or Bluff (SVR)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.legend(loc='best')
plt.show()
|
[
"matplotlib.pyplot.title",
"sklearn.svm.SVR",
"numpy.set_printoptions",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((356, 393), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.nan'}), '(threshold=np.nan)\n', (375, 393), True, 'import numpy as np\n'), ((643, 665), 'pandas.read_csv', 'pd.read_csv', (['Data_File'], {}), '(Data_File)\n', (654, 665), True, 'import pandas as pd\n'), ((864, 880), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (878, 880), False, 'from sklearn.preprocessing import StandardScaler\n'), ((888, 904), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (902, 904), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1300, 1317), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""'}), "(kernel='rbf')\n", (1303, 1317), False, 'from sklearn.svm import SVR\n'), ((1473, 1490), 'numpy.array', 'np.array', (['[[6.5]]'], {}), '([[6.5]])\n', (1481, 1490), True, 'import numpy as np\n'), ((1871, 1930), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'y'], {'color': '"""red"""', 'marker': '"""o"""', 'label': '"""Samples"""'}), "(X, y, color='red', marker='o', label='Samples')\n", (1882, 1930), True, 'import matplotlib.pyplot as plt\n'), ((2075, 2108), 'matplotlib.pyplot.title', 'plt.title', (['"""Truth or Bluff (SVR)"""'], {}), "('Truth or Bluff (SVR)')\n", (2084, 2108), True, 'import matplotlib.pyplot as plt\n'), ((2109, 2137), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position Level"""'], {}), "('Position Level')\n", (2119, 2137), True, 'import matplotlib.pyplot as plt\n'), ((2138, 2158), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Salary"""'], {}), "('Salary')\n", (2148, 2158), True, 'import matplotlib.pyplot as plt\n'), ((2159, 2181), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2169, 2181), True, 'import matplotlib.pyplot as plt\n'), ((2182, 2192), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2190, 2192), True, 'import matplotlib.pyplot as plt\n')]
|
from django.contrib import admin
from .models import Car
@admin.register(Car)
class CarAdmin(admin.ModelAdmin):
list_display = ['name', 'updated', 'user']
|
[
"django.contrib.admin.register"
] |
[((60, 79), 'django.contrib.admin.register', 'admin.register', (['Car'], {}), '(Car)\n', (74, 79), False, 'from django.contrib import admin\n')]
|
import pytest
from django_dynamic_fixture import G
from silver.models import Transaction, Proforma, Invoice, Customer
from silver import payment_processors
from silver_instamojo.models import InstamojoPaymentMethod
@pytest.fixture
def customer():
return G(Customer, currency='RON', address_1='9', address_2='9',
sales_tax_number=0)
@pytest.fixture
def payment_processor():
return payment_processors.get_instance('instamojo_manual')
@pytest.fixture
def payment_processor_triggered():
return payment_processors.get_instance('instamojo_triggered')
@pytest.fixture
def payment_method(customer, payment_processor):
return G(InstamojoPaymentMethod, customer=customer,
payment_processor=payment_processor.name)
@pytest.fixture
def proforma(customer):
return G(Proforma, state=Invoice.STATES.ISSUED, customer=customer,
transaction_currency='RON')
@pytest.fixture
def invoice(customer, proforma):
return G(Invoice, related_document=proforma, state=Invoice.STATES.ISSUED,
customer=customer, transaction_currency='RON')
@pytest.fixture
def transaction(customer, payment_processor, payment_method, proforma, invoice):
return G(Transaction, invoice=invoice, proforma=proforma, currency='RON',
amount=invoice.total, payment_method=payment_method)
@pytest.fixture
def transaction_triggered(customer, payment_processor_triggered,
payment_method, proforma, invoice):
return G(Transaction, invoice=invoice, proforma=proforma, currency='RON',
amount=invoice.total, payment_method=payment_method)
|
[
"silver.payment_processors.get_instance",
"django_dynamic_fixture.G"
] |
[((261, 338), 'django_dynamic_fixture.G', 'G', (['Customer'], {'currency': '"""RON"""', 'address_1': '"""9"""', 'address_2': '"""9"""', 'sales_tax_number': '(0)'}), "(Customer, currency='RON', address_1='9', address_2='9', sales_tax_number=0)\n", (262, 338), False, 'from django_dynamic_fixture import G\n'), ((406, 457), 'silver.payment_processors.get_instance', 'payment_processors.get_instance', (['"""instamojo_manual"""'], {}), "('instamojo_manual')\n", (437, 457), False, 'from silver import payment_processors\n'), ((522, 576), 'silver.payment_processors.get_instance', 'payment_processors.get_instance', (['"""instamojo_triggered"""'], {}), "('instamojo_triggered')\n", (553, 576), False, 'from silver import payment_processors\n'), ((655, 746), 'django_dynamic_fixture.G', 'G', (['InstamojoPaymentMethod'], {'customer': 'customer', 'payment_processor': 'payment_processor.name'}), '(InstamojoPaymentMethod, customer=customer, payment_processor=\n payment_processor.name)\n', (656, 746), False, 'from django_dynamic_fixture import G\n'), ((808, 899), 'django_dynamic_fixture.G', 'G', (['Proforma'], {'state': 'Invoice.STATES.ISSUED', 'customer': 'customer', 'transaction_currency': '"""RON"""'}), "(Proforma, state=Invoice.STATES.ISSUED, customer=customer,\n transaction_currency='RON')\n", (809, 899), False, 'from django_dynamic_fixture import G\n'), ((971, 1089), 'django_dynamic_fixture.G', 'G', (['Invoice'], {'related_document': 'proforma', 'state': 'Invoice.STATES.ISSUED', 'customer': 'customer', 'transaction_currency': '"""RON"""'}), "(Invoice, related_document=proforma, state=Invoice.STATES.ISSUED, customer\n =customer, transaction_currency='RON')\n", (972, 1089), False, 'from django_dynamic_fixture import G\n'), ((1208, 1332), 'django_dynamic_fixture.G', 'G', (['Transaction'], {'invoice': 'invoice', 'proforma': 'proforma', 'currency': '"""RON"""', 'amount': 'invoice.total', 'payment_method': 'payment_method'}), "(Transaction, invoice=invoice, proforma=proforma, currency='RON', amount=\n invoice.total, payment_method=payment_method)\n", (1209, 1332), False, 'from django_dynamic_fixture import G\n'), ((1497, 1621), 'django_dynamic_fixture.G', 'G', (['Transaction'], {'invoice': 'invoice', 'proforma': 'proforma', 'currency': '"""RON"""', 'amount': 'invoice.total', 'payment_method': 'payment_method'}), "(Transaction, invoice=invoice, proforma=proforma, currency='RON', amount=\n invoice.total, payment_method=payment_method)\n", (1498, 1621), False, 'from django_dynamic_fixture import G\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'DefinitionAuthorizationArgs',
]
@pulumi.input_type
class DefinitionAuthorizationArgs:
def __init__(__self__, *,
principal_id: pulumi.Input[str],
role_definition_id: pulumi.Input[str],
delegated_role_definition_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
principal_display_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] principal_id: Principal ID of the security group/service principal/user that would be assigned permissions to the projected subscription.
:param pulumi.Input[str] role_definition_id: The role definition identifier. This role will define the permissions that are granted to the principal. This cannot be an `Owner` role.
:param pulumi.Input[Sequence[pulumi.Input[str]]] delegated_role_definition_ids: The set of role definition ids which define all the permissions that the principal id can assign.
:param pulumi.Input[str] principal_display_name: The display name of the security group/service principal/user that would be assigned permissions to the projected subscription.
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "role_definition_id", role_definition_id)
if delegated_role_definition_ids is not None:
pulumi.set(__self__, "delegated_role_definition_ids", delegated_role_definition_ids)
if principal_display_name is not None:
pulumi.set(__self__, "principal_display_name", principal_display_name)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> pulumi.Input[str]:
"""
Principal ID of the security group/service principal/user that would be assigned permissions to the projected subscription.
"""
return pulumi.get(self, "principal_id")
@principal_id.setter
def principal_id(self, value: pulumi.Input[str]):
pulumi.set(self, "principal_id", value)
@property
@pulumi.getter(name="roleDefinitionId")
def role_definition_id(self) -> pulumi.Input[str]:
"""
The role definition identifier. This role will define the permissions that are granted to the principal. This cannot be an `Owner` role.
"""
return pulumi.get(self, "role_definition_id")
@role_definition_id.setter
def role_definition_id(self, value: pulumi.Input[str]):
pulumi.set(self, "role_definition_id", value)
@property
@pulumi.getter(name="delegatedRoleDefinitionIds")
def delegated_role_definition_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The set of role definition ids which define all the permissions that the principal id can assign.
"""
return pulumi.get(self, "delegated_role_definition_ids")
@delegated_role_definition_ids.setter
def delegated_role_definition_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "delegated_role_definition_ids", value)
@property
@pulumi.getter(name="principalDisplayName")
def principal_display_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of the security group/service principal/user that would be assigned permissions to the projected subscription.
"""
return pulumi.get(self, "principal_display_name")
@principal_display_name.setter
def principal_display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "principal_display_name", value)
|
[
"pulumi.get",
"pulumi.getter",
"pulumi.set"
] |
[((1946, 1979), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""principalId"""'}), "(name='principalId')\n", (1959, 1979), False, 'import pulumi\n'), ((2381, 2419), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""roleDefinitionId"""'}), "(name='roleDefinitionId')\n", (2394, 2419), False, 'import pulumi\n'), ((2864, 2912), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""delegatedRoleDefinitionIds"""'}), "(name='delegatedRoleDefinitionIds')\n", (2877, 2912), False, 'import pulumi\n'), ((3441, 3483), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""principalDisplayName"""'}), "(name='principalDisplayName')\n", (3454, 3483), False, 'import pulumi\n'), ((1523, 1573), 'pulumi.set', 'pulumi.set', (['__self__', '"""principal_id"""', 'principal_id'], {}), "(__self__, 'principal_id', principal_id)\n", (1533, 1573), False, 'import pulumi\n'), ((1582, 1644), 'pulumi.set', 'pulumi.set', (['__self__', '"""role_definition_id"""', 'role_definition_id'], {}), "(__self__, 'role_definition_id', role_definition_id)\n", (1592, 1644), False, 'import pulumi\n'), ((2200, 2232), 'pulumi.get', 'pulumi.get', (['self', '"""principal_id"""'], {}), "(self, 'principal_id')\n", (2210, 2232), False, 'import pulumi\n'), ((2321, 2360), 'pulumi.set', 'pulumi.set', (['self', '"""principal_id"""', 'value'], {}), "(self, 'principal_id', value)\n", (2331, 2360), False, 'import pulumi\n'), ((2659, 2697), 'pulumi.get', 'pulumi.get', (['self', '"""role_definition_id"""'], {}), "(self, 'role_definition_id')\n", (2669, 2697), False, 'import pulumi\n'), ((2798, 2843), 'pulumi.set', 'pulumi.set', (['self', '"""role_definition_id"""', 'value'], {}), "(self, 'role_definition_id', value)\n", (2808, 2843), False, 'import pulumi\n'), ((3158, 3207), 'pulumi.get', 'pulumi.get', (['self', '"""delegated_role_definition_ids"""'], {}), "(self, 'delegated_role_definition_ids')\n", (3168, 3207), False, 'import pulumi\n'), ((3364, 3420), 'pulumi.set', 'pulumi.set', (['self', '"""delegated_role_definition_ids"""', 'value'], {}), "(self, 'delegated_role_definition_ids', value)\n", (3374, 3420), False, 'import pulumi\n'), ((3728, 3770), 'pulumi.get', 'pulumi.get', (['self', '"""principal_display_name"""'], {}), "(self, 'principal_display_name')\n", (3738, 3770), False, 'import pulumi\n'), ((3889, 3938), 'pulumi.set', 'pulumi.set', (['self', '"""principal_display_name"""', 'value'], {}), "(self, 'principal_display_name', value)\n", (3899, 3938), False, 'import pulumi\n'), ((1711, 1799), 'pulumi.set', 'pulumi.set', (['__self__', '"""delegated_role_definition_ids"""', 'delegated_role_definition_ids'], {}), "(__self__, 'delegated_role_definition_ids',\n delegated_role_definition_ids)\n", (1721, 1799), False, 'import pulumi\n'), ((1855, 1925), 'pulumi.set', 'pulumi.set', (['__self__', '"""principal_display_name"""', 'principal_display_name'], {}), "(__self__, 'principal_display_name', principal_display_name)\n", (1865, 1925), False, 'import pulumi\n')]
|
# -*- coding: utf-8 -*-
# @File : api.py
# @Date : 2021/2/25
# @Desc :
import random
import string
def get_random_str(len):
value = ''.join(random.sample(string.ascii_letters + string.digits, len))
return value
def data_return(code=500, data=None,
msg_zh="服务器发生错误,请检查服务器",
msg_en="An error occurred on the server, please check the server."):
return {'code': code, 'data': data, 'msg_zh': msg_zh, "msg_en": msg_en}
|
[
"random.sample"
] |
[((149, 205), 'random.sample', 'random.sample', (['(string.ascii_letters + string.digits)', 'len'], {}), '(string.ascii_letters + string.digits, len)\n', (162, 205), False, 'import random\n')]
|
from pathlib import Path
from .config import Config
from .command import get_command, command_exist
def is_command_disabled(channel: str, cmd: str):
if channel in cfg_disabled_commands.data:
if command_exist(cmd):
cmd = get_command(cmd).fullname
return cmd in cfg_disabled_commands[channel]
return False
def disable_command(channel: str, cmd: str):
if channel not in cfg_disabled_commands.data:
cfg_disabled_commands[channel] = [cmd]
return
if command_exist(cmd):
cmd = get_command(cmd).fullname
if cmd in cfg_disabled_commands[channel]:
return
cfg_disabled_commands[channel].append(cmd)
cfg_disabled_commands.save()
def enable_command(channel: str, cmd: str):
if channel not in cfg_disabled_commands.data:
return
if command_exist(cmd):
cmd = get_command(cmd).fullname
if cmd in cfg_disabled_commands[channel]:
cfg_disabled_commands[channel].remove(cmd)
cfg_disabled_commands.save()
cfg_disabled_commands = Config(Path('configs', 'disabled_commands.json'))
|
[
"pathlib.Path"
] |
[((1061, 1102), 'pathlib.Path', 'Path', (['"""configs"""', '"""disabled_commands.json"""'], {}), "('configs', 'disabled_commands.json')\n", (1065, 1102), False, 'from pathlib import Path\n')]
|
#!/usr/bin/env python3
help_str = """
roll is a tool for computing die rolls
Pass any number of arguments of the form
<number>d<number>
The first number refers to the number of dice to roll;
The second refers to the number of sides on the die.
For example, to roll 5, 6-sided dice, pass '5d6'.
It also computes rolls with advantage or disadvantage:
each of these rolls 2 dice instead of one, then chooses
the greater for advantage and the lesser for disadvantage.
Use this option by adding the letter 'a' for advantage
or the letter 'd' for disadvantage to the end of the
argument. For example, passing 4d20d will roll 4 pairs
of 20-sided dice, and for each pair will return the lesser
of the two numbers rolled.
<NAME> - 2020
"""
import re
import sys
import random
from math import floor
def roll_x_y_sided_dice(x,y):
"""Rolls x, y-sided dice
Parameters:
x (int): the number of dice to roll
y (int): the number of sides on each die
Returns:
rolls (list): the value of each roll
"""
return [floor(random.random()*y)+1 for _ in range(x)]
def do_rolls(rolls):
"""accepts a list of 3 tuples, where the first is the number of dice
to roll, the second is the number of sides on the die, and the third
is either None, 'a' signifying advantage, or 'd' signifying
disadvantage
Parameters:
rolls (list): the list of rolls to do
Returns:
results (list): a list of 2 tuples containing the numbers rolled
and the total
total (int): the total for all the rolls
"""
# result variables
results = []
total = 0
# for each roll we need to do
for roll in rolls:
# if it's advantace, handle that
if roll[2] == 'a':
# take the max of 2 y-sided dice x times
result = [max(roll_x_y_sided_dice(2,int(roll[1]))) for _ in range(int(roll[0]))]
elif roll[2] == 'd':
# take the min of 2 y-sided dice x times
result = [min(roll_x_y_sided_dice(2,int(roll[1]))) for _ in range(int(roll[0]))]
else:
# take x, y-sided dice
result = roll_x_y_sided_dice(int(roll[0]), int(roll[1]))
# total them up, add to the running total and the results
s = sum(result)
total += s
results.append((result,s))
# return the generated rolls
return results, total
# if this is the main method
if __name__ == "__main__":
# check for a help message and print it
if len(sys.argv) == 1 or (len(sys.argv) == 2 and sys.argv[1] == 'help'):
print(help_str)
sys.exit(0)
# compile a pattern to match the die roll args
pattern = re.compile(r'^([1-9][0-9]*)d([1-9][0-9]*)(a|d)?$')
# a list of compiled matches
matches = []
# match each roll and get the groups
for arg in sys.argv[1:]:
match = pattern.match(arg)
# bad arg, complain
if not match:
print(f"Bad argument: {arg}")
print(help_str)
sys.exit(1)
matches.append(match.groups())
# do the hard work
results, grand_total = do_rolls(matches)
# print results
for roll, (res, total) in zip(sys.argv[1:], results):
print(f"{roll:<7}: {total}")
print(res)
if len(sys.argv) > 2:
print()
# print grand total
if len(sys.argv) > 2:
print(f"Total: {grand_total}")
|
[
"random.random",
"sys.exit",
"re.compile"
] |
[((2697, 2746), 're.compile', 're.compile', (['"""^([1-9][0-9]*)d([1-9][0-9]*)(a|d)?$"""'], {}), "('^([1-9][0-9]*)d([1-9][0-9]*)(a|d)?$')\n", (2707, 2746), False, 'import re\n'), ((2619, 2630), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2627, 2630), False, 'import sys\n'), ((3037, 3048), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3045, 3048), False, 'import sys\n'), ((1056, 1071), 'random.random', 'random.random', ([], {}), '()\n', (1069, 1071), False, 'import random\n')]
|
"""
Utilities for working with command line strings and arguments
"""
import re
from typing import List, Dict, Optional
DOUBLE_QUOTED_GROUPS = re.compile(r"(\".+?\")")
DOUBLE_QUOTED_STRING = re.compile(r"^\".+\"?")
def argsplit(cmd: str) -> List[str]:
"""
Split a command line string on spaces into an argument list
that can be passed to subprocess.run()
Use doublequotes to preserve spaces.
>>> argsplit(" word1 word2")
['word1', 'word2']
>>> argsplit('word1 word2 "blah blah"')
['word1', 'word2', 'blah blah']
"""
# Strip string of whitespace and remove repeated spaces
cmd = cmd.strip()
# Split into quoted and unquoted chunks
# (This trips up on escaped doublequotes!)
args = []
chunks = DOUBLE_QUOTED_GROUPS.split(cmd)
for chunk in chunks:
if chunk:
if DOUBLE_QUOTED_STRING.fullmatch(chunk):
# Strip then add quoted chunks
args.append(chunk.strip('"'))
else:
# Clean unquoted chunks and further split on spaces
chunk = re.sub(r" +", " ", chunk).strip()
if chunk:
args += chunk.split(" ")
return args
def argbuild(
cmd: str, mapping: Dict[str, str], append_missing_field: Optional[str] = None
) -> List[str]:
"""
Turn a command template string into list of args
suitable for subprocess.run() by replacing fields with values
using Python's str.format_map() function.
:param cmd: command to be turned into list of args
:param mapping: fields and their replacements
:param append_missing_field: if this field wasn't used in cmd, pass it as last arg
:returns: list of args
If `append_missing_field` is specified, it must be in `mapping`
Examples:
>>> argbuild('gedit --new-window', {'fn': '/foo/bar', 'ln': 12})
['gedit', '--new-window']
>>> argbuild('gedit --new-window {fn} {ln}', {'fn': '/foo/bar', 'ln': 12})
['gedit', '--new-window', '/foo/bar', '12']
>>> argbuild('gedit {ln}', {'fn': '/foo/bar', 'ln': 12}, append_missing_field='fn')
['gedit', '12', '/foo/bar']
"""
append_field_used = False
if append_missing_field:
append_map = dict((k, "{" + k + "}") for k, v in mapping.items())
append_map[append_missing_field] = mapping[append_missing_field]
args = []
for arg in argsplit(cmd):
# Track if append_missing_field was used
if append_missing_field and not append_field_used:
# Try replacing the append field and see if string changes
append_field_used = arg != arg.format_map(append_map)
args.append(arg.format_map(mapping))
if append_missing_field and not append_field_used:
args.append(mapping[append_missing_field])
return args
|
[
"re.sub",
"re.compile"
] |
[((145, 170), 're.compile', 're.compile', (['"""(\\\\".+?\\\\")"""'], {}), '(\'(\\\\".+?\\\\")\')\n', (155, 170), False, 'import re\n'), ((193, 217), 're.compile', 're.compile', (['"""^\\\\".+\\\\"?"""'], {}), '(\'^\\\\".+\\\\"?\')\n', (203, 217), False, 'import re\n'), ((1095, 1119), 're.sub', 're.sub', (['""" +"""', '""" """', 'chunk'], {}), "(' +', ' ', chunk)\n", (1101, 1119), False, 'import re\n')]
|
import unittest
from coldtype.pens.cairopen import CairoPen
from pathlib import Path
from coldtype.color import hsl
from coldtype.geometry import Rect
from coldtype.text.composer import StSt, Font
from coldtype.pens.datpen import DATPen, DATPens
from PIL import Image
import drawBot as db
import imagehash
import contextlib
co = Font.Cacheable("assets/ColdtypeObviously-VF.ttf")
renders = Path("test/renders/cairo")
renders.mkdir(parents=True, exist_ok=True)
def hash_img(path):
if path.exists():
return (
imagehash.colorhash(Image.open(path)),
imagehash.average_hash(Image.open(path)))
else:
return -1
@contextlib.contextmanager
def test_image(test:unittest.TestCase, path, rect=Rect(300, 300)):
img = (renders / path)
hash_before = hash_img(img)
if img.exists():
img.unlink()
yield(img, rect)
hash_after = hash_img(img)
test.assertEqual(hash_after, hash_before)
test.assertEqual(img.exists(), True)
class TestCairoPen(unittest.TestCase):
def test_cairo_pdf(self):
r = Rect(300, 300)
pdf = renders / "test_cairo.pdf"
dp = (StSt("CDEL", co, 100, wdth=0.5)
.pens()
.align(r))
CairoPen.Composite(dp, r, pdf)
self.assertEqual(len(dp), 4)
self.assertEqual(type(dp), DATPens)
def test_cairo_png(self):
with test_image(self, "test_cairo.png") as (i, r):
rr = Rect(0, 0, 100, 100)
dp = (DATPen()
.define(r=rr, c=75)
.gs("$r↗ $r↓|↘|$c $r↖|↙|$c")
.align(r)
.scale(1.2)
.rotate(180)
.f(hsl(0.5, a=0.1))
.s(hsl(0.9))
.sw(5))
CairoPen.Composite(dp, r, i)
self.assertEqual(len(dp.value), 4)
self.assertEqual(type(dp), DATPen)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"coldtype.text.composer.Font.Cacheable",
"coldtype.color.hsl",
"coldtype.pens.cairopen.CairoPen.Composite",
"PIL.Image.open",
"coldtype.pens.datpen.DATPen",
"pathlib.Path",
"coldtype.geometry.Rect",
"coldtype.text.composer.StSt"
] |
[((332, 381), 'coldtype.text.composer.Font.Cacheable', 'Font.Cacheable', (['"""assets/ColdtypeObviously-VF.ttf"""'], {}), "('assets/ColdtypeObviously-VF.ttf')\n", (346, 381), False, 'from coldtype.text.composer import StSt, Font\n'), ((393, 419), 'pathlib.Path', 'Path', (['"""test/renders/cairo"""'], {}), "('test/renders/cairo')\n", (397, 419), False, 'from pathlib import Path\n'), ((735, 749), 'coldtype.geometry.Rect', 'Rect', (['(300)', '(300)'], {}), '(300, 300)\n', (739, 749), False, 'from coldtype.geometry import Rect\n'), ((1928, 1943), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1941, 1943), False, 'import unittest\n'), ((1080, 1094), 'coldtype.geometry.Rect', 'Rect', (['(300)', '(300)'], {}), '(300, 300)\n', (1084, 1094), False, 'from coldtype.geometry import Rect\n'), ((1233, 1263), 'coldtype.pens.cairopen.CairoPen.Composite', 'CairoPen.Composite', (['dp', 'r', 'pdf'], {}), '(dp, r, pdf)\n', (1251, 1263), False, 'from coldtype.pens.cairopen import CairoPen\n'), ((1456, 1476), 'coldtype.geometry.Rect', 'Rect', (['(0)', '(0)', '(100)', '(100)'], {}), '(0, 0, 100, 100)\n', (1460, 1476), False, 'from coldtype.geometry import Rect\n'), ((1769, 1797), 'coldtype.pens.cairopen.CairoPen.Composite', 'CairoPen.Composite', (['dp', 'r', 'i'], {}), '(dp, r, i)\n', (1787, 1797), False, 'from coldtype.pens.cairopen import CairoPen\n'), ((555, 571), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (565, 571), False, 'from PIL import Image\n'), ((610, 626), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (620, 626), False, 'from PIL import Image\n'), ((1150, 1181), 'coldtype.text.composer.StSt', 'StSt', (['"""CDEL"""', 'co', '(100)'], {'wdth': '(0.5)'}), "('CDEL', co, 100, wdth=0.5)\n", (1154, 1181), False, 'from coldtype.text.composer import StSt, Font\n'), ((1723, 1731), 'coldtype.color.hsl', 'hsl', (['(0.9)'], {}), '(0.9)\n', (1726, 1731), False, 'from coldtype.color import hsl\n'), ((1687, 1702), 'coldtype.color.hsl', 'hsl', (['(0.5)'], {'a': '(0.1)'}), '(0.5, a=0.1)\n', (1690, 1702), False, 'from coldtype.color import hsl\n'), ((1495, 1503), 'coldtype.pens.datpen.DATPen', 'DATPen', ([], {}), '()\n', (1501, 1503), False, 'from coldtype.pens.datpen import DATPen, DATPens\n')]
|
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.decomposition import PCA, TruncatedSVD, FastICA
from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection
import abc
class ColumnBasedFeatureGenerationStrategyAbstract(BaseEstimator, TransformerMixin):
"""Provides abstraction for features generation"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def fit(self, train):
"""Required Method"""
@abc.abstractmethod
def transform(self, train):
"""Required Method"""
@abc.abstractmethod
def featurename(self, colname1, colname2):
"""Required Method"""
@abc.abstractmethod
def equivalent_featurenames(self, colname1, colname2):
"""Required Method. Used to reflect commutativity."""
class SumFeatureGenerationStrategy(ColumnBasedFeatureGenerationStrategyAbstract):
def fit(self, train, val, test, colname1, colname2):
train[self.featurename(colname1, colname2)] = train[[colname1, colname2]].sum(axis=1)
val[self.featurename(colname1, colname2)] = val[[colname1, colname2]].sum(axis=1)
test[self.featurename(colname1, colname2)] = test[[colname1, colname2]].sum(axis=1)
return (train, val, test)
def featurename(self, colname1, colname2):
return "{}_sum_{}".format(colname1, colname2)
def equivalent_featurenames(self, colname1, colname2):
return [self.featurename(colname1, colname2), self.featurename(colname2, colname1)]
class DiffFeatureGenerationStrategy(ColumnBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, colname1, colname2):
train[self.featurename(colname1, colname2)]=train[colname1]-train[colname2]
val[self.featurename(colname1, colname2)]=train[colname1]-val[colname2]
test[self.featurename(colname1, colname2)]=test[colname1]-test[colname2]
return (train, val, test)
def featurename(self, colname1, colname2):
return "{}_diff_{}".format(colname1, colname2)
def equivalent_featurenames(self, colname1, colname2):
return [self.featurename(colname1, colname2)]
class ProdFeatureGenerationStrategy(ColumnBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, colname1, colname2):
train[self.featurename(colname1, colname2)]=train[colname1]*train[colname2]
val[self.featurename(colname1, colname2)]=val[colname1]*val[colname2]
test[self.featurename(colname1, colname2)]=test[colname1]*test[colname2]
return (train, val, test)
def featurename(self, colname1, colname2):
return "{}_prod_{}".format(colname1, colname2)
def equivalent_featurenames(self, colname1, colname2):
return [self.featurename(colname1, colname2), self.featurename(colname2, colname1)]
class DivFeatureGenerationStrategy(ColumnBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, colname1, colname2):
train[self.featurename(colname1, colname2)]=train[colname1]/train[colname2]
val[self.featurename(colname1, colname2)]=val[colname1]/val[colname2]
test[self.featurename(colname1, colname2)]=test[colname1]/test[colname2]
return (train, val, test)
def featurename(self, colname1, colname2):
return "{}_div_{}".format(colname1, colname2)
def equivalent_featurenames(self, colname1, colname2):
return [self.featurename(colname1, colname2)]
class AvgFeatureGenerationStrategy(ColumnBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, colname1, colname2):
train[self.featurename(colname1, colname2)]=train[[colname1, colname2]].mean(axis=1)
val[self.featurename(colname1, colname2)]=val[[colname1, colname2]].mean(axis=1)
test[self.featurename(colname1, colname2)]=test[[colname1, colname2]].mean(axis=1)
return (train, val, test)
def featurename(self, colname1, colname2):
return "{}_avg_{}".format(colname1, colname2)
def equivalent_featurenames(self, colname1, colname2):
return [self.featurename(colname1, colname2), self.featurename(colname2, colname1)]
class MaxFeatureGenerationStrategy(ColumnBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, colname1, colname2):
train[self.featurename(colname1, colname2)]=train[[colname1, colname2]].max(axis=1)
val[self.featurename(colname1, colname2)]=val[[colname1, colname2]].max(axis=1)
test[self.featurename(colname1, colname2)]=test[[colname1, colname2]].max(axis=1)
return (train, val, test)
def featurename(self, colname1, colname2):
return "{}_max_{}".format(colname1, colname2)
def equivalent_featurenames(self, colname1, colname2):
return [self.featurename(colname1, colname2), self.featurename(colname2, colname1)]
class MinFeatureGenerationStrategy(ColumnBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, colname1, colname2):
train[self.featurename(colname1, colname2)]=train[[colname1, colname2]].min(axis=1)
val[self.featurename(colname1, colname2)]=val[[colname1, colname2]].min(axis=1)
test[self.featurename(colname1, colname2)]=test[[colname1, colname2]].min(axis=1)
return (train, val, test)
def featurename(self, colname1, colname2):
return "{}_min_{}".format(colname1, colname2)
def equivalent_featurenames(self, colname1, colname2):
return [self.featurename(colname1, colname2), self.featurename(colname2, colname1)]
# Features based on decomposition methods
class DecompositionBasedFeatureGenerationStrategyAbstract(object):
"""Provides abstraction for features generation"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def generate(self, train, val, test):
"""Required Method"""
@abc.abstractmethod
def featurename(self, idx):
"""Required Method"""
@abc.abstractmethod
def equivalent_featurenames(self, idx):
"""Required Method. Used to reflect commutativity."""
class PCAFeatureGenerationStrategy(DecompositionBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, n_comps):
decomposer = PCA(n_components=n_comps, random_state=1234)
results_train = decomposer.fit_transform(train)
results_val = decomposer.fit_transform(val)
results_test = decomposer.transform(test)
for i in range(1, n_comps + 1):
train[self.featurename(i)] = results_train[:, i - 1]
val[self.featurename(i)] = results_val[:, i - 1]
test[self.featurename(i)] = results_test[:, i - 1]
return (train, val, test)
def featurename(self, idx):
return "pca_{}".format(str(idx))
def equivalent_featurenames(self, idx):
return [self.featurename(idx)]
class TSVDFeatureGenerationStrategy(DecompositionBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, n_comps):
decomposer = TruncatedSVD(n_components=n_comps, random_state=1234)
results_train = decomposer.fit_transform(train)
results_val = decomposer.fit_transform(val)
results_test = decomposer.transform(test)
for i in range(1, n_comps + 1):
train[self.featurename(i)] = results_train[:, i - 1]
val[self.featurename(i)] = results_val[:, i - 1]
test[self.featurename(i)] = results_test[:, i - 1]
return (train, val, test)
def featurename(self, idx):
return "tsvd_{}".format(str(idx))
def equivalent_featurenames(self, idx):
return [self.featurename(idx)]
class ICAFeatureGenerationStrategy(DecompositionBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, n_comps):
decomposer = FastICA(n_components=n_comps, random_state=1234)
results_train = decomposer.fit_transform(train)
results_val = decomposer.fit_transform(val)
results_test = decomposer.transform(test)
for i in range(1, n_comps + 1):
train[self.featurename(i)] = results_train[:, i - 1]
val[self.featurename(i)] = results_val[:, i - 1]
test[self.featurename(i)] = results_test[:, i - 1]
return (train, val, test)
def featurename(self, idx):
return "ica_{}".format(str(idx))
def equivalent_featurenames(self, idx):
return [self.featurename(idx)]
class GRPFeatureGenerationStrategy(DecompositionBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, n_comps):
decomposer = GaussianRandomProjection(n_components=n_comps, random_state=1234)
results_train = decomposer.fit_transform(train)
results_val = decomposer.fit_transform(val)
results_test = decomposer.transform(test)
for i in range(1, n_comps + 1):
train[self.featurename(i)] = results_train[:, i - 1]
val[self.featurename(i)] = results_val[:, i - 1]
test[self.featurename(i)] = results_test[:, i - 1]
return (train, val, test)
def featurename(self, idx):
return "grp_{}".format(str(idx))
def equivalent_featurenames(self, idx):
return [self.featurename(idx)]
class SRPFeatureGenerationStrategy(DecompositionBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, n_comps):
decomposer = SparseRandomProjection(n_components=n_comps, random_state=1234)
results_train = decomposer.fit_transform(train)
results_val = decomposer.fit_transform(val)
results_test = decomposer.transform(test)
for i in range(1, n_comps + 1):
train[self.featurename(i)] = results_train[:, i - 1]
val[self.featurename(i)] = results_val[:, i - 1]
test[self.featurename(i)] = results_test[:, i - 1]
return (train, val, test)
def featurename(self, idx):
return "grp_{}".format(str(idx))
def equivalent_featurenames(self, idx):
return [self.featurename(idx)]
|
[
"sklearn.decomposition.FastICA",
"sklearn.random_projection.GaussianRandomProjection",
"sklearn.decomposition.TruncatedSVD",
"sklearn.random_projection.SparseRandomProjection",
"sklearn.decomposition.PCA"
] |
[((6255, 6299), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_comps', 'random_state': '(1234)'}), '(n_components=n_comps, random_state=1234)\n', (6258, 6299), False, 'from sklearn.decomposition import PCA, TruncatedSVD, FastICA\n'), ((7042, 7095), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': 'n_comps', 'random_state': '(1234)'}), '(n_components=n_comps, random_state=1234)\n', (7054, 7095), False, 'from sklearn.decomposition import PCA, TruncatedSVD, FastICA\n'), ((7838, 7886), 'sklearn.decomposition.FastICA', 'FastICA', ([], {'n_components': 'n_comps', 'random_state': '(1234)'}), '(n_components=n_comps, random_state=1234)\n', (7845, 7886), False, 'from sklearn.decomposition import PCA, TruncatedSVD, FastICA\n'), ((8628, 8693), 'sklearn.random_projection.GaussianRandomProjection', 'GaussianRandomProjection', ([], {'n_components': 'n_comps', 'random_state': '(1234)'}), '(n_components=n_comps, random_state=1234)\n', (8652, 8693), False, 'from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection\n'), ((9435, 9498), 'sklearn.random_projection.SparseRandomProjection', 'SparseRandomProjection', ([], {'n_components': 'n_comps', 'random_state': '(1234)'}), '(n_components=n_comps, random_state=1234)\n', (9457, 9498), False, 'from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection\n')]
|
# Imports modules
import argparse
import torch
from torchvision import transforms,datasets,models
from PIL import Image
import numpy as np
def get_input_args_train():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type = str, default = 'flowers',
help='dataset directory')
parser.add_argument('--save_dir', type = str, default = '/home/workspace/ImageClassifier/',
help = 'path to the folder for saving checkpoints')
parser.add_argument('--arch',type = str, default = 'densenet',
help = 'NN Model Architecture vgg or densenet. default = densenet')
parser.add_argument('--learning_rate',type = float, default = 0.001,
help = 'value of learning rate')
parser.add_argument('--hidden_units',type = int, default = 512,
help = 'number of hidden units')
parser.add_argument('--epochs',type = int, default = 10,
help = 'number of iterations for training network')
parser.add_argument('--gpu', type = bool, default = 'False',
help='device to run your model : gpu or cpu. Default = False i.e cpu')
return parser.parse_args()
def get_input_args_predict():
parser = argparse.ArgumentParser()
parser.add_argument('--image_path', type = str, default = '/home/workspace/ImageClassifier/flowers/test/1/image_06743.jpg',
help = 'path to image')
parser.add_argument('--checkpoint',type = str, default = 'checkpoint.pth',
help = 'trained model checkpoint')
parser.add_argument('--top_k',type = int, default = 3,
help = 'number of classes with highest prob.')
parser.add_argument('--category_names', default = 'cat_to_name.json',
help = 'mapping of categories to real names file')
parser.add_argument('--gpu', type = bool, default = 'False',
help='device to run your model : gpu or cpu.Default = False i.e cpu')
return parser.parse_args()
def process_data(train_dir, test_dir, valid_dir):
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
trainsets = datasets.ImageFolder(train_dir, transform = train_transforms)
testsets = datasets.ImageFolder(test_dir, transform = test_transforms)
validsets = datasets.ImageFolder(valid_dir, transform = test_transforms)
trainloader = torch.utils.data.DataLoader(trainsets, batch_size=64, shuffle=True)
testloader = torch.utils.data.DataLoader(testsets, batch_size=64)
validloader = torch.utils.data.DataLoader(validsets, batch_size=64)
return trainloader, testloader, validloader, trainsets
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
image = Image.open(image)
if image.size[0] > image.size[1]:
aspect = image.size[1] / 256
new_size = (image.size[0] / aspect, 256)
else:
aspect = image.size[0] / 256
new_size = (256, image.size[1] / aspect)
image.thumbnail(new_size, Image.ANTIALIAS)
# crop out center of image
width, height = image.size # Get dimensions
left = (width - 224) / 2
top = (height - 224) / 2
right = (width + 224) / 2
bottom = (height + 224) / 2
image = image.crop((left, top, right, bottom))
np_image = np.array(image)
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = np_image / 255.0
np_image = (np_image - mean)/std
np_image = np.transpose(np_image, (2, 0, 1))
return np_image
|
[
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.RandomRotation",
"torchvision.transforms.Normalize",
"numpy.transpose",
"PIL.Image.open",
"torchvision.datasets.ImageFolder",
"numpy.array",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.RandomResizedCrop",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor"
] |
[((188, 213), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (211, 213), False, 'import argparse\n'), ((1301, 1326), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1324, 1326), False, 'import argparse\n'), ((3097, 3156), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['train_dir'], {'transform': 'train_transforms'}), '(train_dir, transform=train_transforms)\n', (3117, 3156), False, 'from torchvision import transforms, datasets, models\n'), ((3174, 3231), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['test_dir'], {'transform': 'test_transforms'}), '(test_dir, transform=test_transforms)\n', (3194, 3231), False, 'from torchvision import transforms, datasets, models\n'), ((3250, 3308), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['valid_dir'], {'transform': 'test_transforms'}), '(valid_dir, transform=test_transforms)\n', (3270, 3308), False, 'from torchvision import transforms, datasets, models\n'), ((3354, 3421), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainsets'], {'batch_size': '(64)', 'shuffle': '(True)'}), '(trainsets, batch_size=64, shuffle=True)\n', (3381, 3421), False, 'import torch\n'), ((3439, 3491), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testsets'], {'batch_size': '(64)'}), '(testsets, batch_size=64)\n', (3466, 3491), False, 'import torch\n'), ((3510, 3563), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['validsets'], {'batch_size': '(64)'}), '(validsets, batch_size=64)\n', (3537, 3563), False, 'import torch\n'), ((3798, 3815), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (3808, 3815), False, 'from PIL import Image\n'), ((4374, 4389), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (4382, 4389), True, 'import numpy as np\n'), ((4401, 4432), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (4409, 4432), True, 'import numpy as np\n'), ((4443, 4474), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (4451, 4474), True, 'import numpy as np\n'), ((4568, 4601), 'numpy.transpose', 'np.transpose', (['np_image', '(2, 0, 1)'], {}), '(np_image, (2, 0, 1))\n', (4580, 4601), True, 'import numpy as np\n'), ((2276, 2305), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(30)'], {}), '(30)\n', (2301, 2305), False, 'from torchvision import transforms, datasets, models\n'), ((2350, 2383), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (2378, 2383), False, 'from torchvision import transforms, datasets, models\n'), ((2428, 2461), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2459, 2461), False, 'from torchvision import transforms, datasets, models\n'), ((2506, 2527), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2525, 2527), False, 'from torchvision import transforms, datasets, models\n'), ((2572, 2638), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2592, 2638), False, 'from torchvision import transforms, datasets, models\n'), ((2747, 2769), 'torchvision.transforms.Resize', 'transforms.Resize', (['(255)'], {}), '(255)\n', (2764, 2769), False, 'from torchvision import transforms, datasets, models\n'), ((2813, 2839), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (2834, 2839), False, 'from torchvision import transforms, datasets, models\n'), ((2883, 2904), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2902, 2904), False, 'from torchvision import transforms, datasets, models\n'), ((2948, 3014), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2968, 3014), False, 'from torchvision import transforms, datasets, models\n')]
|
from pyjoystick.sdl2 import sdl2, Key, Joystick, ControllerEventLoop, get_mapping, set_mapping
if __name__ == '__main__':
import time
import argparse
devices = Joystick.get_joysticks()
print("Devices:", devices)
monitor = devices[0]
monitor_keytypes = [Key.AXIS]
for k, v in get_mapping(monitor).items():
print(k, ":", v)
set_mapping(monitor, {'lefttrigger': Key(Key.BUTTON, 0), 'righttrigger': Key(Key.BUTTON, 1),
'a': Key(Key.AXIS, 2), 'b': Key(Key.AXIS, 5)})
print()
print("New mapping:")
for k, v in get_mapping(monitor).items():
print(k, ":", v)
#################################
def print_add(joy):
print('Added', joy)
def print_remove(joy):
print('Removed', joy)
def key_received(key):
# Make joystick key and event key values match
monitor.update_key(key)
# Get mapping name
key_name = key.joystick.key_mapping.get(key, None)
if not key_name:
return
if key_name == 'a':
# A button pressed do action
print('Action on button A')
else:
print('Key:', key_name, 'Value:', key.value, 'Joystick:', key.joystick)
ControllerEventLoop(print_add, print_remove, key_received).run()
|
[
"pyjoystick.sdl2.ControllerEventLoop",
"pyjoystick.sdl2.Joystick.get_joysticks",
"pyjoystick.sdl2.Key",
"pyjoystick.sdl2.get_mapping"
] |
[((174, 198), 'pyjoystick.sdl2.Joystick.get_joysticks', 'Joystick.get_joysticks', ([], {}), '()\n', (196, 198), False, 'from pyjoystick.sdl2 import sdl2, Key, Joystick, ControllerEventLoop, get_mapping, set_mapping\n'), ((1183, 1241), 'pyjoystick.sdl2.ControllerEventLoop', 'ControllerEventLoop', (['print_add', 'print_remove', 'key_received'], {}), '(print_add, print_remove, key_received)\n', (1202, 1241), False, 'from pyjoystick.sdl2 import sdl2, Key, Joystick, ControllerEventLoop, get_mapping, set_mapping\n'), ((307, 327), 'pyjoystick.sdl2.get_mapping', 'get_mapping', (['monitor'], {}), '(monitor)\n', (318, 327), False, 'from pyjoystick.sdl2 import sdl2, Key, Joystick, ControllerEventLoop, get_mapping, set_mapping\n'), ((404, 422), 'pyjoystick.sdl2.Key', 'Key', (['Key.BUTTON', '(0)'], {}), '(Key.BUTTON, 0)\n', (407, 422), False, 'from pyjoystick.sdl2 import sdl2, Key, Joystick, ControllerEventLoop, get_mapping, set_mapping\n'), ((440, 458), 'pyjoystick.sdl2.Key', 'Key', (['Key.BUTTON', '(1)'], {}), '(Key.BUTTON, 1)\n', (443, 458), False, 'from pyjoystick.sdl2 import sdl2, Key, Joystick, ControllerEventLoop, get_mapping, set_mapping\n'), ((491, 507), 'pyjoystick.sdl2.Key', 'Key', (['Key.AXIS', '(2)'], {}), '(Key.AXIS, 2)\n', (494, 507), False, 'from pyjoystick.sdl2 import sdl2, Key, Joystick, ControllerEventLoop, get_mapping, set_mapping\n'), ((514, 530), 'pyjoystick.sdl2.Key', 'Key', (['Key.AXIS', '(5)'], {}), '(Key.AXIS, 5)\n', (517, 530), False, 'from pyjoystick.sdl2 import sdl2, Key, Joystick, ControllerEventLoop, get_mapping, set_mapping\n'), ((588, 608), 'pyjoystick.sdl2.get_mapping', 'get_mapping', (['monitor'], {}), '(monitor)\n', (599, 608), False, 'from pyjoystick.sdl2 import sdl2, Key, Joystick, ControllerEventLoop, get_mapping, set_mapping\n')]
|
import unittest
from fp.traindata_samplers import CompleteData
from fp.missingvalue_handlers import CompleteCaseAnalysis
from fp.scalers import NamedStandardScaler
from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree
from fp.pre_processors import NoPreProcessing
from fp.post_processors import NoPostProcessing
from fp.dataset_experiments import AdultDatasetWhiteMaleExperiment, AdultDatasetMaleExperiment, AdultDatasetWhiteExperiment
from fp.dataset_experiments import PropublicaDatasetWhiteExperiment, GermanCreditDatasetSexExperiment, RicciRaceExperiment
from fp.dataset_experiments import GiveMeSomeCreditExperiment
class TestSuiteDatasets(unittest.TestCase):
def test_AdultDatasetWhiteMaleExperiment(self):
self.experiment = AdultDatasetWhiteMaleExperiment(
fixed_random_seed = 0xabcd,
train_data_sampler = CompleteData(),
missing_value_handler = CompleteCaseAnalysis(),
numeric_attribute_scaler = NamedStandardScaler(),
learners = [NonTunedLogisticRegression(), NonTunedDecisionTree()],
pre_processors = [NoPreProcessing()],
post_processors = [NoPostProcessing()]
)
self.experiment.run()
def test_AdultDatasetMaleExperiment(self):
self.experiment = AdultDatasetMaleExperiment(
fixed_random_seed = 0xabcd,
train_data_sampler = CompleteData(),
missing_value_handler = CompleteCaseAnalysis(),
numeric_attribute_scaler = NamedStandardScaler(),
learners = [NonTunedLogisticRegression(), NonTunedDecisionTree()],
pre_processors = [NoPreProcessing()],
post_processors = [NoPostProcessing()]
)
self.experiment.run()
def test_AdultDatasetWhiteExperiment(self):
self.experiment = AdultDatasetWhiteExperiment(
fixed_random_seed = 0xabcd,
train_data_sampler = CompleteData(),
missing_value_handler = CompleteCaseAnalysis(),
numeric_attribute_scaler = NamedStandardScaler(),
learners = [NonTunedLogisticRegression(), NonTunedDecisionTree()],
pre_processors = [NoPreProcessing()],
post_processors = [NoPostProcessing()]
)
self.experiment.run()
def test_PropublicaDatasetWhiteExperiment(self):
self.experiment = PropublicaDatasetWhiteExperiment(
fixed_random_seed = 0xabcd,
train_data_sampler = CompleteData(),
missing_value_handler = CompleteCaseAnalysis(),
numeric_attribute_scaler = NamedStandardScaler(),
learners = [NonTunedLogisticRegression(), NonTunedDecisionTree()],
pre_processors = [NoPreProcessing()],
post_processors = [NoPostProcessing()]
)
self.experiment.run()
def test_GermanCreditDatasetSexExperiment(self):
self.experiment = GermanCreditDatasetSexExperiment(
fixed_random_seed = 0xabcd,
train_data_sampler = CompleteData(),
missing_value_handler = CompleteCaseAnalysis(),
numeric_attribute_scaler = NamedStandardScaler(),
learners = [NonTunedLogisticRegression(), NonTunedDecisionTree()],
pre_processors = [NoPreProcessing()],
post_processors = [NoPostProcessing()]
)
self.experiment.run()
def test_RicciRaceExperiment(self):
self.experiment = RicciRaceExperiment(
fixed_random_seed = 0xabcd,
train_data_sampler = CompleteData(),
missing_value_handler = CompleteCaseAnalysis(),
numeric_attribute_scaler = NamedStandardScaler(),
learners = [NonTunedLogisticRegression(), NonTunedDecisionTree()],
pre_processors = [NoPreProcessing()],
post_processors = [NoPostProcessing()]
)
self.experiment.run()
def test_GiveMeSomeCreditExperiment(self):
self.experiment = GiveMeSomeCreditExperiment(
fixed_random_seed = 0xabcd,
train_data_sampler = CompleteData(),
missing_value_handler = CompleteCaseAnalysis(),
numeric_attribute_scaler = NamedStandardScaler(),
learners = [NonTunedLogisticRegression(), NonTunedDecisionTree()],
pre_processors = [NoPreProcessing()],
post_processors = [NoPostProcessing()]
)
self.experiment.run()
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"fp.missingvalue_handlers.CompleteCaseAnalysis",
"fp.learners.NonTunedLogisticRegression",
"fp.scalers.NamedStandardScaler",
"fp.learners.NonTunedDecisionTree",
"fp.post_processors.NoPostProcessing",
"fp.pre_processors.NoPreProcessing",
"fp.traindata_samplers.CompleteData"
] |
[((4582, 4597), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4595, 4597), False, 'import unittest\n'), ((892, 906), 'fp.traindata_samplers.CompleteData', 'CompleteData', ([], {}), '()\n', (904, 906), False, 'from fp.traindata_samplers import CompleteData\n'), ((945, 967), 'fp.missingvalue_handlers.CompleteCaseAnalysis', 'CompleteCaseAnalysis', ([], {}), '()\n', (965, 967), False, 'from fp.missingvalue_handlers import CompleteCaseAnalysis\n'), ((1009, 1030), 'fp.scalers.NamedStandardScaler', 'NamedStandardScaler', ([], {}), '()\n', (1028, 1030), False, 'from fp.scalers import NamedStandardScaler\n'), ((1437, 1451), 'fp.traindata_samplers.CompleteData', 'CompleteData', ([], {}), '()\n', (1449, 1451), False, 'from fp.traindata_samplers import CompleteData\n'), ((1490, 1512), 'fp.missingvalue_handlers.CompleteCaseAnalysis', 'CompleteCaseAnalysis', ([], {}), '()\n', (1510, 1512), False, 'from fp.missingvalue_handlers import CompleteCaseAnalysis\n'), ((1554, 1575), 'fp.scalers.NamedStandardScaler', 'NamedStandardScaler', ([], {}), '()\n', (1573, 1575), False, 'from fp.scalers import NamedStandardScaler\n'), ((1984, 1998), 'fp.traindata_samplers.CompleteData', 'CompleteData', ([], {}), '()\n', (1996, 1998), False, 'from fp.traindata_samplers import CompleteData\n'), ((2037, 2059), 'fp.missingvalue_handlers.CompleteCaseAnalysis', 'CompleteCaseAnalysis', ([], {}), '()\n', (2057, 2059), False, 'from fp.missingvalue_handlers import CompleteCaseAnalysis\n'), ((2101, 2122), 'fp.scalers.NamedStandardScaler', 'NamedStandardScaler', ([], {}), '()\n', (2120, 2122), False, 'from fp.scalers import NamedStandardScaler\n'), ((2541, 2555), 'fp.traindata_samplers.CompleteData', 'CompleteData', ([], {}), '()\n', (2553, 2555), False, 'from fp.traindata_samplers import CompleteData\n'), ((2594, 2616), 'fp.missingvalue_handlers.CompleteCaseAnalysis', 'CompleteCaseAnalysis', ([], {}), '()\n', (2614, 2616), False, 'from fp.missingvalue_handlers import CompleteCaseAnalysis\n'), ((2658, 2679), 'fp.scalers.NamedStandardScaler', 'NamedStandardScaler', ([], {}), '()\n', (2677, 2679), False, 'from fp.scalers import NamedStandardScaler\n'), ((3098, 3112), 'fp.traindata_samplers.CompleteData', 'CompleteData', ([], {}), '()\n', (3110, 3112), False, 'from fp.traindata_samplers import CompleteData\n'), ((3151, 3173), 'fp.missingvalue_handlers.CompleteCaseAnalysis', 'CompleteCaseAnalysis', ([], {}), '()\n', (3171, 3173), False, 'from fp.missingvalue_handlers import CompleteCaseAnalysis\n'), ((3215, 3236), 'fp.scalers.NamedStandardScaler', 'NamedStandardScaler', ([], {}), '()\n', (3234, 3236), False, 'from fp.scalers import NamedStandardScaler\n'), ((3629, 3643), 'fp.traindata_samplers.CompleteData', 'CompleteData', ([], {}), '()\n', (3641, 3643), False, 'from fp.traindata_samplers import CompleteData\n'), ((3682, 3704), 'fp.missingvalue_handlers.CompleteCaseAnalysis', 'CompleteCaseAnalysis', ([], {}), '()\n', (3702, 3704), False, 'from fp.missingvalue_handlers import CompleteCaseAnalysis\n'), ((3746, 3767), 'fp.scalers.NamedStandardScaler', 'NamedStandardScaler', ([], {}), '()\n', (3765, 3767), False, 'from fp.scalers import NamedStandardScaler\n'), ((4182, 4196), 'fp.traindata_samplers.CompleteData', 'CompleteData', ([], {}), '()\n', (4194, 4196), False, 'from fp.traindata_samplers import CompleteData\n'), ((4235, 4257), 'fp.missingvalue_handlers.CompleteCaseAnalysis', 'CompleteCaseAnalysis', ([], {}), '()\n', (4255, 4257), False, 'from fp.missingvalue_handlers import CompleteCaseAnalysis\n'), ((4299, 4320), 'fp.scalers.NamedStandardScaler', 'NamedStandardScaler', ([], {}), '()\n', (4318, 4320), False, 'from fp.scalers import NamedStandardScaler\n'), ((1057, 1085), 'fp.learners.NonTunedLogisticRegression', 'NonTunedLogisticRegression', ([], {}), '()\n', (1083, 1085), False, 'from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree\n'), ((1087, 1109), 'fp.learners.NonTunedDecisionTree', 'NonTunedDecisionTree', ([], {}), '()\n', (1107, 1109), False, 'from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree\n'), ((1143, 1160), 'fp.pre_processors.NoPreProcessing', 'NoPreProcessing', ([], {}), '()\n', (1158, 1160), False, 'from fp.pre_processors import NoPreProcessing\n'), ((1195, 1213), 'fp.post_processors.NoPostProcessing', 'NoPostProcessing', ([], {}), '()\n', (1211, 1213), False, 'from fp.post_processors import NoPostProcessing\n'), ((1602, 1630), 'fp.learners.NonTunedLogisticRegression', 'NonTunedLogisticRegression', ([], {}), '()\n', (1628, 1630), False, 'from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree\n'), ((1632, 1654), 'fp.learners.NonTunedDecisionTree', 'NonTunedDecisionTree', ([], {}), '()\n', (1652, 1654), False, 'from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree\n'), ((1688, 1705), 'fp.pre_processors.NoPreProcessing', 'NoPreProcessing', ([], {}), '()\n', (1703, 1705), False, 'from fp.pre_processors import NoPreProcessing\n'), ((1740, 1758), 'fp.post_processors.NoPostProcessing', 'NoPostProcessing', ([], {}), '()\n', (1756, 1758), False, 'from fp.post_processors import NoPostProcessing\n'), ((2149, 2177), 'fp.learners.NonTunedLogisticRegression', 'NonTunedLogisticRegression', ([], {}), '()\n', (2175, 2177), False, 'from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree\n'), ((2179, 2201), 'fp.learners.NonTunedDecisionTree', 'NonTunedDecisionTree', ([], {}), '()\n', (2199, 2201), False, 'from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree\n'), ((2235, 2252), 'fp.pre_processors.NoPreProcessing', 'NoPreProcessing', ([], {}), '()\n', (2250, 2252), False, 'from fp.pre_processors import NoPreProcessing\n'), ((2287, 2305), 'fp.post_processors.NoPostProcessing', 'NoPostProcessing', ([], {}), '()\n', (2303, 2305), False, 'from fp.post_processors import NoPostProcessing\n'), ((2706, 2734), 'fp.learners.NonTunedLogisticRegression', 'NonTunedLogisticRegression', ([], {}), '()\n', (2732, 2734), False, 'from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree\n'), ((2736, 2758), 'fp.learners.NonTunedDecisionTree', 'NonTunedDecisionTree', ([], {}), '()\n', (2756, 2758), False, 'from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree\n'), ((2792, 2809), 'fp.pre_processors.NoPreProcessing', 'NoPreProcessing', ([], {}), '()\n', (2807, 2809), False, 'from fp.pre_processors import NoPreProcessing\n'), ((2844, 2862), 'fp.post_processors.NoPostProcessing', 'NoPostProcessing', ([], {}), '()\n', (2860, 2862), False, 'from fp.post_processors import NoPostProcessing\n'), ((3263, 3291), 'fp.learners.NonTunedLogisticRegression', 'NonTunedLogisticRegression', ([], {}), '()\n', (3289, 3291), False, 'from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree\n'), ((3293, 3315), 'fp.learners.NonTunedDecisionTree', 'NonTunedDecisionTree', ([], {}), '()\n', (3313, 3315), False, 'from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree\n'), ((3349, 3366), 'fp.pre_processors.NoPreProcessing', 'NoPreProcessing', ([], {}), '()\n', (3364, 3366), False, 'from fp.pre_processors import NoPreProcessing\n'), ((3401, 3419), 'fp.post_processors.NoPostProcessing', 'NoPostProcessing', ([], {}), '()\n', (3417, 3419), False, 'from fp.post_processors import NoPostProcessing\n'), ((3794, 3822), 'fp.learners.NonTunedLogisticRegression', 'NonTunedLogisticRegression', ([], {}), '()\n', (3820, 3822), False, 'from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree\n'), ((3824, 3846), 'fp.learners.NonTunedDecisionTree', 'NonTunedDecisionTree', ([], {}), '()\n', (3844, 3846), False, 'from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree\n'), ((3880, 3897), 'fp.pre_processors.NoPreProcessing', 'NoPreProcessing', ([], {}), '()\n', (3895, 3897), False, 'from fp.pre_processors import NoPreProcessing\n'), ((3932, 3950), 'fp.post_processors.NoPostProcessing', 'NoPostProcessing', ([], {}), '()\n', (3948, 3950), False, 'from fp.post_processors import NoPostProcessing\n'), ((4347, 4375), 'fp.learners.NonTunedLogisticRegression', 'NonTunedLogisticRegression', ([], {}), '()\n', (4373, 4375), False, 'from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree\n'), ((4377, 4399), 'fp.learners.NonTunedDecisionTree', 'NonTunedDecisionTree', ([], {}), '()\n', (4397, 4399), False, 'from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree\n'), ((4433, 4450), 'fp.pre_processors.NoPreProcessing', 'NoPreProcessing', ([], {}), '()\n', (4448, 4450), False, 'from fp.pre_processors import NoPreProcessing\n'), ((4485, 4503), 'fp.post_processors.NoPostProcessing', 'NoPostProcessing', ([], {}), '()\n', (4501, 4503), False, 'from fp.post_processors import NoPostProcessing\n')]
|
# -*- coding: utf-8 -*-
"""antimarkdown.handlers -- Element handlers for converting HTML Elements/subtrees to Markdown text.
"""
from collections import deque
from antimarkdown import nodes
def render(*domtrees):
if not domtrees:
return ''
root = nodes.Root()
for dom in domtrees:
build_render_tree(root, dom)
lines = str(root).rstrip().splitlines()
# Strip leading empty lines
while lines and not lines[0].strip():
lines.pop(0)
return nodes.normalize('\n'.join(lines))
def build_render_tree(root, domtree):
"""Process an ElementTree domtree and build a render tree.
"""
opened = set()
stack = deque([domtree])
blackboard = {}
render_tree = root
current_node = render_tree
while stack:
domtree = stack.pop()
if domtree not in opened:
# Open the domtree
# Build the render node.
node_class = getattr(nodes, domtree.tag.upper(), nodes.Node)
current_node = node_class(current_node, domtree, blackboard)
stack.append(domtree)
# Queue children
for el in reversed(domtree):
stack.append(el)
opened.add(domtree)
else:
# Close the domtree
current_node = current_node.parent
return root
|
[
"antimarkdown.nodes.Root",
"collections.deque"
] |
[((267, 279), 'antimarkdown.nodes.Root', 'nodes.Root', ([], {}), '()\n', (277, 279), False, 'from antimarkdown import nodes\n'), ((670, 686), 'collections.deque', 'deque', (['[domtree]'], {}), '([domtree])\n', (675, 686), False, 'from collections import deque\n')]
|
"""
Fits PSPL model with parallax using EMCEE sampler.
"""
import os
import sys
import numpy as np
try:
import emcee
except ImportError as err:
print(err)
print("\nEMCEE could not be imported.")
print("Get it from: http://dfm.io/emcee/current/user/install/")
print("and re-run the script")
sys.exit(1)
import matplotlib.pyplot as plt
import MulensModel as mm
# Define likelihood functions
def ln_like(theta, event, parameters_to_fit):
""" likelihood function """
for key, val in enumerate(parameters_to_fit):
setattr(event.model.parameters, val, theta[key])
return -0.5 * event.get_chi2()
def ln_prior(theta, parameters_to_fit):
"""priors - we only reject obviously wrong models"""
if theta[parameters_to_fit.index("t_E")] < 0.:
return -np.inf
return 0.0
def ln_prob(theta, event, parameters_to_fit):
""" combines likelihood and priors"""
ln_prior_ = ln_prior(theta, parameters_to_fit)
if not np.isfinite(ln_prior_):
return -np.inf
ln_like_ = ln_like(theta, event, parameters_to_fit)
# In the cases that source fluxes are negative we want to return
# these as if they were not in priors.
if np.isnan(ln_like_):
return -np.inf
return ln_prior_ + ln_like_
# Read the data
file_name = os.path.join(
mm.DATA_PATH, "photometry_files", "OB05086",
"starBLG234.6.I.218982.dat")
my_data = mm.MulensData(file_name=file_name, add_2450000=True)
coords = "18:04:45.71 -26:59:15.2"
# Starting parameters:
params = dict()
params['t_0'] = 2453628.3
params['t_0_par'] = 2453628.
params['u_0'] = 0.37 # Change sign of u_0 to find the other solution.
params['t_E'] = 100.
params['pi_E_N'] = 0.
params['pi_E_E'] = 0.
my_model = mm.Model(params, coords=coords)
my_event = mm.Event(datasets=my_data, model=my_model)
# Which parameters we want to fit?
parameters_to_fit = ["t_0", "u_0", "t_E", "pi_E_N", "pi_E_E"]
# And remember to provide dispersions to draw starting set of points
sigmas = [0.01, 0.001, 0.1, 0.01, 0.01]
# Initializations for EMCEE
n_dim = len(parameters_to_fit)
n_walkers = 40
n_steps = 500
n_burn = 150
# Including the set of n_walkers starting points:
start_1 = [params[p] for p in parameters_to_fit]
start = [start_1 + np.random.randn(n_dim) * sigmas
for i in range(n_walkers)]
# Run emcee (this can take some time):
sampler = emcee.EnsembleSampler(
n_walkers, n_dim, ln_prob, args=(my_event, parameters_to_fit))
sampler.run_mcmc(start, n_steps)
# Remove burn-in samples and reshape:
samples = sampler.chain[:, n_burn:, :].reshape((-1, n_dim))
# Results:
results = np.percentile(samples, [16, 50, 84], axis=0)
print("Fitted parameters:")
for i in range(n_dim):
r = results[1, i]
print("{:.5f} {:.5f} {:.5f}".format(r, results[2, i]-r, r-results[0, i]))
# We extract best model parameters and chi2 from my_event:
print("\nSmallest chi2 model:")
best = [my_event.best_chi2_parameters[p] for p in parameters_to_fit]
print(*[repr(b) if isinstance(b, float) else b.value for b in best])
print(my_event.best_chi2)
# Now let's plot 3 models
plt.figure()
model_0 = mm.Model({'t_0': 2453628.29062, 'u_0': 0.37263, 't_E': 102.387105})
model_1 = mm.Model(
{'t_0': 2453630.35507, 'u_0': 0.488817, 't_E': 93.611301,
'pi_E_N': 0.2719, 'pi_E_E': 0.1025, 't_0_par': params['t_0_par']},
coords=coords)
model_2 = mm.Model(
{'t_0': 2453630.67778, 'u_0': -0.415677, 't_E': 110.120755,
'pi_E_N': -0.2972, 'pi_E_E': 0.1103, 't_0_par': params['t_0_par']},
coords=coords)
model_0.set_datasets([my_data])
model_1.set_datasets([my_data])
model_2.set_datasets([my_data])
t_1 = 2453200.
t_2 = 2453950.
plot_params = {'lw': 2.5, 'alpha': 0.3, 'subtract_2450000': True,
't_start': t_1, 't_stop': t_2}
my_event.plot_data(subtract_2450000=True)
model_0.plot_lc(label='no pi_E', **plot_params)
model_1.plot_lc(label='pi_E, u_0>0', **plot_params)
model_2.plot_lc(label='pi_E, u_0<0', color='black', ls='dashed', **plot_params)
plt.xlim(t_1-2450000., t_2-2450000.)
plt.legend(loc='best')
plt.title('Data and 3 fitted models')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"MulensModel.MulensData",
"MulensModel.Model",
"matplotlib.pyplot.show",
"numpy.random.randn",
"emcee.EnsembleSampler",
"matplotlib.pyplot.legend",
"numpy.isfinite",
"numpy.isnan",
"numpy.percentile",
"matplotlib.pyplot.figure",
"sys.exit",
"os.path.join",
"MulensModel.Event"
] |
[((1306, 1396), 'os.path.join', 'os.path.join', (['mm.DATA_PATH', '"""photometry_files"""', '"""OB05086"""', '"""starBLG234.6.I.218982.dat"""'], {}), "(mm.DATA_PATH, 'photometry_files', 'OB05086',\n 'starBLG234.6.I.218982.dat')\n", (1318, 1396), False, 'import os\n'), ((1412, 1464), 'MulensModel.MulensData', 'mm.MulensData', ([], {'file_name': 'file_name', 'add_2450000': '(True)'}), '(file_name=file_name, add_2450000=True)\n', (1425, 1464), True, 'import MulensModel as mm\n'), ((1743, 1774), 'MulensModel.Model', 'mm.Model', (['params'], {'coords': 'coords'}), '(params, coords=coords)\n', (1751, 1774), True, 'import MulensModel as mm\n'), ((1786, 1828), 'MulensModel.Event', 'mm.Event', ([], {'datasets': 'my_data', 'model': 'my_model'}), '(datasets=my_data, model=my_model)\n', (1794, 1828), True, 'import MulensModel as mm\n'), ((2374, 2462), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['n_walkers', 'n_dim', 'ln_prob'], {'args': '(my_event, parameters_to_fit)'}), '(n_walkers, n_dim, ln_prob, args=(my_event,\n parameters_to_fit))\n', (2395, 2462), False, 'import emcee\n'), ((2618, 2662), 'numpy.percentile', 'np.percentile', (['samples', '[16, 50, 84]'], {'axis': '(0)'}), '(samples, [16, 50, 84], axis=0)\n', (2631, 2662), True, 'import numpy as np\n'), ((3097, 3109), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3107, 3109), True, 'import matplotlib.pyplot as plt\n'), ((3120, 3187), 'MulensModel.Model', 'mm.Model', (["{'t_0': 2453628.29062, 'u_0': 0.37263, 't_E': 102.387105}"], {}), "({'t_0': 2453628.29062, 'u_0': 0.37263, 't_E': 102.387105})\n", (3128, 3187), True, 'import MulensModel as mm\n'), ((3198, 3350), 'MulensModel.Model', 'mm.Model', (["{'t_0': 2453630.35507, 'u_0': 0.488817, 't_E': 93.611301, 'pi_E_N': 0.2719,\n 'pi_E_E': 0.1025, 't_0_par': params['t_0_par']}"], {'coords': 'coords'}), "({'t_0': 2453630.35507, 'u_0': 0.488817, 't_E': 93.611301, 'pi_E_N':\n 0.2719, 'pi_E_E': 0.1025, 't_0_par': params['t_0_par']}, coords=coords)\n", (3206, 3350), True, 'import MulensModel as mm\n'), ((3371, 3530), 'MulensModel.Model', 'mm.Model', (["{'t_0': 2453630.67778, 'u_0': -0.415677, 't_E': 110.120755, 'pi_E_N': -\n 0.2972, 'pi_E_E': 0.1103, 't_0_par': params['t_0_par']}"], {'coords': 'coords'}), "({'t_0': 2453630.67778, 'u_0': -0.415677, 't_E': 110.120755,\n 'pi_E_N': -0.2972, 'pi_E_E': 0.1103, 't_0_par': params['t_0_par']},\n coords=coords)\n", (3379, 3530), True, 'import MulensModel as mm\n'), ((4000, 4042), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(t_1 - 2450000.0)', '(t_2 - 2450000.0)'], {}), '(t_1 - 2450000.0, t_2 - 2450000.0)\n', (4008, 4042), True, 'import matplotlib.pyplot as plt\n'), ((4037, 4059), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (4047, 4059), True, 'import matplotlib.pyplot as plt\n'), ((4060, 4097), 'matplotlib.pyplot.title', 'plt.title', (['"""Data and 3 fitted models"""'], {}), "('Data and 3 fitted models')\n", (4069, 4097), True, 'import matplotlib.pyplot as plt\n'), ((4098, 4108), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4106, 4108), True, 'import matplotlib.pyplot as plt\n'), ((1200, 1218), 'numpy.isnan', 'np.isnan', (['ln_like_'], {}), '(ln_like_)\n', (1208, 1218), True, 'import numpy as np\n'), ((315, 326), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (323, 326), False, 'import sys\n'), ((977, 999), 'numpy.isfinite', 'np.isfinite', (['ln_prior_'], {}), '(ln_prior_)\n', (988, 999), True, 'import numpy as np\n'), ((2256, 2278), 'numpy.random.randn', 'np.random.randn', (['n_dim'], {}), '(n_dim)\n', (2271, 2278), True, 'import numpy as np\n')]
|
import errno
import mimetypes
from datetime import datetime
import os
import six
from passlib.apps import django10_context as pwd_context
try:
import ujson as json
except:
import json as json
def mkdir(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def to_int(value, default=0):
if type(value) == list:
if len(value) > 0:
value = value[0]
else:
return default
try:
value = int(value)
except:
value = default
return value
def json_dumps(data):
return json.dumps(data)
def json_loads(data):
if isinstance(data, str):
return json.loads(data)
else:
return None
def now_date():
return datetime(datetime.now().year, datetime.now().month, datetime.now().day)
def unique_list(target):
seen = set()
return [x for x in target if not (x in seen or seen.add(x))]
def encode_multipart_formdata(fields=None, files=None):
if fields is None:
fields = {}
if files is None:
files = {}
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n' if six.PY2 else b'\r\n'
L = []
for (key, value) in fields.items():
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
if six.PY2:
filename = filename.encode("utf8")
L.append('--' + BOUNDARY)
L.append(
'Content-Disposition: form-data; name="%s"; filename="%s"' % (
key, filename
)
)
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
if six.PY3:
for i in range(len(L)):
if isinstance(L[i], int):
L[i] = str(L[i])
if isinstance(L[i], str):
L[i] = str.encode(L[i])
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
class PasswordHelper(object):
@staticmethod
def get_hash(text):
return pwd_context.encrypt(text)
@staticmethod
def verify_hash(text, hashed_text):
try:
return pwd_context.verify(text, hashed_text)
except:
return False
class FileHelper(object):
@staticmethod
def file_ext(filename):
split = filename.rsplit('.', 1)
if len(split) > 1:
extension = str(split[1])
return extension.lower()
return ""
class InvalidArgumentException(Exception):
message = ''
def __init__(self, message):
super().__init__()
self.message = message
|
[
"os.makedirs",
"json.loads",
"os.path.isdir",
"passlib.apps.django10_context.encrypt",
"json.dumps",
"passlib.apps.django10_context.verify",
"datetime.datetime.now",
"mimetypes.guess_type"
] |
[((675, 691), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (685, 691), True, 'import json as json\n'), ((238, 255), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (249, 255), False, 'import os\n'), ((761, 777), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (771, 777), True, 'import json as json\n'), ((2425, 2450), 'passlib.apps.django10_context.encrypt', 'pwd_context.encrypt', (['text'], {}), '(text)\n', (2444, 2450), True, 'from passlib.apps import django10_context as pwd_context\n'), ((846, 860), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (858, 860), False, 'from datetime import datetime\n'), ((867, 881), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (879, 881), False, 'from datetime import datetime\n'), ((889, 903), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (901, 903), False, 'from datetime import datetime\n'), ((2272, 2302), 'mimetypes.guess_type', 'mimetypes.guess_type', (['filename'], {}), '(filename)\n', (2292, 2302), False, 'import mimetypes\n'), ((2542, 2579), 'passlib.apps.django10_context.verify', 'pwd_context.verify', (['text', 'hashed_text'], {}), '(text, hashed_text)\n', (2560, 2579), True, 'from passlib.apps import django10_context as pwd_context\n'), ((324, 343), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (337, 343), False, 'import os\n')]
|
import board
import displayio
from adafruit_display_shapes.circle import Circle
import time
from pong_helpers import AutoPaddle, ManualBall
# width and height variables used to know where the bototm and right edge of the screen are.
SCREEN_WIDTH = 160
SCREEN_HEIGHT = 128
# FPS (Frames per second) setting, raise or lower this to make the game faster or slower
FPS = 60
# what fraction of a second to wait in order to achieve the desired FPS setting
FPS_DELAY = 1 / FPS
# Make the display context
splash = displayio.Group(max_size=10)
board.DISPLAY.show(splash)
# Make a background color fill
color_bitmap = displayio.Bitmap(SCREEN_WIDTH, SCREEN_HEIGHT, 1)
color_palette = displayio.Palette(1)
color_palette[0] = 0xFFFFFF
bg_sprite = displayio.TileGrid(color_bitmap, x=0, y=0, pixel_shader=color_palette)
splash.append(bg_sprite)
# hold the time we last updated the game state.
# Also represents the last executed "frame" for our FPS setting.
last_update_time = 0
# create left paddle object
# width: 5, height: 30
# x: 1, y: 0
left_paddle = AutoPaddle(5,30,1,0)
# add it to screen group
splash.append(left_paddle.rect)
# create right paddle object
# width: 5, height: 30
# x: 6 pixels inside the right edge
# y: 36 pixels above the bottom edge.
# 30 because it is the paddle height, 6 because it's "a few more" to move away from the edge.
right_paddle = AutoPaddle(5,30,SCREEN_WIDTH-6,SCREEN_HEIGHT-30-6)
# add it to screen group
splash.append(right_paddle.rect)
# create ball
# diameter: 3
# x: center of the screen
# y: center of the screen
ball = ManualBall(3, int(SCREEN_WIDTH/2), int(SCREEN_HEIGHT/2))
# add it to screen group
splash.append(ball.circle)
# variable to hold current time
now = 0
# debug variable to count loops inbetween updates/frames
loops_since_update = 0
# update() function will get called from main loop
# at an appropriate interval to match FPS setting.
def update():
# call update on all game objects
left_paddle.update()
right_paddle.update()
ball.update(left_paddle, right_paddle)
while True:
# update time variable
now = time.monotonic()
# check if the delay time has passed since the last game update
if last_update_time + FPS_DELAY <= now:
# call update
update()
# set the last update time to now
last_update_time = now
#print(loops_since_update)
# reset debug loop counter
loops_since_update = 0
else:
# update debug loop counter
loops_since_update += 1
|
[
"displayio.Group",
"displayio.Bitmap",
"displayio.Palette",
"board.DISPLAY.show",
"displayio.TileGrid",
"pong_helpers.AutoPaddle",
"time.monotonic"
] |
[((512, 540), 'displayio.Group', 'displayio.Group', ([], {'max_size': '(10)'}), '(max_size=10)\n', (527, 540), False, 'import displayio\n'), ((541, 567), 'board.DISPLAY.show', 'board.DISPLAY.show', (['splash'], {}), '(splash)\n', (559, 567), False, 'import board\n'), ((615, 663), 'displayio.Bitmap', 'displayio.Bitmap', (['SCREEN_WIDTH', 'SCREEN_HEIGHT', '(1)'], {}), '(SCREEN_WIDTH, SCREEN_HEIGHT, 1)\n', (631, 663), False, 'import displayio\n'), ((680, 700), 'displayio.Palette', 'displayio.Palette', (['(1)'], {}), '(1)\n', (697, 700), False, 'import displayio\n'), ((741, 811), 'displayio.TileGrid', 'displayio.TileGrid', (['color_bitmap'], {'x': '(0)', 'y': '(0)', 'pixel_shader': 'color_palette'}), '(color_bitmap, x=0, y=0, pixel_shader=color_palette)\n', (759, 811), False, 'import displayio\n'), ((1052, 1075), 'pong_helpers.AutoPaddle', 'AutoPaddle', (['(5)', '(30)', '(1)', '(0)'], {}), '(5, 30, 1, 0)\n', (1062, 1075), False, 'from pong_helpers import AutoPaddle, ManualBall\n'), ((1374, 1433), 'pong_helpers.AutoPaddle', 'AutoPaddle', (['(5)', '(30)', '(SCREEN_WIDTH - 6)', '(SCREEN_HEIGHT - 30 - 6)'], {}), '(5, 30, SCREEN_WIDTH - 6, SCREEN_HEIGHT - 30 - 6)\n', (1384, 1433), False, 'from pong_helpers import AutoPaddle, ManualBall\n'), ((2112, 2128), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2126, 2128), False, 'import time\n')]
|
import numpy as np
import cv2
import heapq
import statistics
import math
def get_norm(t1 , t2):
(xa, ya, za) = t1
(xb, yb, zb) = t2
return math.sqrt((xa-xb)^2 + (ya-yb)^2 + (za-zb)^2)
def popularity(image,k):
(m,n,_) = image.shape
d = {}
for i in range(m):
for j in range(n):
t = tuple(image[i,j])
if t in d:
d[t] += 1
else:
d[t] = 1
top_k_colors =heapq.nlargest(k, d, key=d.get)
return top_k_colors
def popularity_quant(image, k):
finalImage = image.copy()
color_map = popularity(image, k)
(m,n,_) = image.shape
for i in range(m):
for j in range(n):
t = tuple(image[i,j])
min_dist = 100000000.0
for col in color_map:
dist = get_norm(t, col)
if min_dist > dist :
min_dist = dist
min_col = col
finalImage[i,j] = np.asarray(min_col)
return finalImage
test_image = cv2.imread('test1.png')
img = popularity_quant(test_image, 10)
cv2.imshow('Popularity Cut image',img)
cv2.waitKey()
cv2.destroyAllWindows()
cv2.imwrite('popularity_test1.png', img)
|
[
"math.sqrt",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imwrite",
"numpy.asarray",
"heapq.nlargest",
"cv2.imread",
"cv2.imshow"
] |
[((1057, 1080), 'cv2.imread', 'cv2.imread', (['"""test1.png"""'], {}), "('test1.png')\n", (1067, 1080), False, 'import cv2\n'), ((1121, 1160), 'cv2.imshow', 'cv2.imshow', (['"""Popularity Cut image"""', 'img'], {}), "('Popularity Cut image', img)\n", (1131, 1160), False, 'import cv2\n'), ((1160, 1173), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1171, 1173), False, 'import cv2\n'), ((1174, 1197), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1195, 1197), False, 'import cv2\n'), ((1199, 1239), 'cv2.imwrite', 'cv2.imwrite', (['"""popularity_test1.png"""', 'img'], {}), "('popularity_test1.png', img)\n", (1210, 1239), False, 'import cv2\n'), ((152, 206), 'math.sqrt', 'math.sqrt', (['(xa - xb ^ 2 + (ya - yb) ^ 2 + (za - zb) ^ 2)'], {}), '(xa - xb ^ 2 + (ya - yb) ^ 2 + (za - zb) ^ 2)\n', (161, 206), False, 'import math\n'), ((455, 486), 'heapq.nlargest', 'heapq.nlargest', (['k', 'd'], {'key': 'd.get'}), '(k, d, key=d.get)\n', (469, 486), False, 'import heapq\n'), ((1001, 1020), 'numpy.asarray', 'np.asarray', (['min_col'], {}), '(min_col)\n', (1011, 1020), True, 'import numpy as np\n')]
|
import boto3
import csv
import logging
import io
import os
import requests
import scrapy
from datetime import date, datetime, timedelta
from jailscraper import app_config, utils
from jailscraper.models import InmatePage
# Quiet down, Boto!
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.getLogger('s3transfer').setLevel(logging.CRITICAL)
ONE_DAY = timedelta(days=1)
class InmatesSpider(scrapy.Spider):
name = "inmates"
def __init__(self, category=None, *args, **kwargs):
super(InmatesSpider, self).__init__(*args, **kwargs)
if app_config.USE_S3_STORAGE:
s3 = boto3.resource('s3')
self._bucket = s3.Bucket(app_config.S3_BUCKET)
self._today = datetime.combine(date.today(), datetime.min.time())
self._yesterday = self._today - ONE_DAY
def start_requests(self):
for url in self._generate_urls():
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
inmate = InmatePage(response.body)
if app_config.USE_LOCAL_STORAGE:
self._save_local(response, inmate)
if app_config.USE_S3_STORAGE:
self._save_to_s3(response, inmate)
yield {
'Age_At_Booking': inmate.age_at_booking,
'Bail_Amount': inmate.bail_amount,
'Booking_Date': inmate.booking_date,
'Booking_Id': inmate.booking_id,
'Charges': inmate.charges,
'Court_Date': inmate.court_date,
'Court_Location': inmate.court_location,
'Gender': inmate.gender,
'Inmate_Hash': inmate.inmate_hash,
'Height': inmate.height,
'Housing_Location': inmate.housing_location,
'Race': inmate.race,
'Weight': inmate.weight,
'Incomplete': self._is_complete_record(inmate)
}
def _generate_urls(self):
"""Make URLs."""
f = self._get_seed_file()
data = list(csv.DictReader(f))
urls = [app_config.INMATE_URL_TEMPLATE.format(row['Booking_Id']) for row in data]
dates = [datetime.strptime(row['Booking_Date'], '%Y-%m-%d') for row in data]
last_date = max(dates) + ONE_DAY
self._start_date = last_date
# Scan the universe of URLs
while last_date < self._today:
next_query = last_date.strftime('%Y-%m%d')
for num in range(1, app_config.MAX_DEFAULT_JAIL_NUMBER + 1):
jailnumber = '{0}{1:03d}'.format(next_query, num)
urls.append(app_config.INMATE_URL_TEMPLATE.format(jailnumber))
last_date = last_date + ONE_DAY
return urls
def _get_seed_file(self):
"""Returns data from seed file as array of lines."""
if app_config.USE_S3_STORAGE:
return self._get_s3_seed_file()
else:
return self._get_local_seed_file()
def _get_s3_seed_file(self):
"""Get seed file from S3. Return file-like object."""
urls = utils.get_manifest()
seed_url = urls.pop()
seed_response = requests.get(seed_url)
return io.StringIO(seed_response.text)
def _get_local_seed_file(self):
"""Get seed file from local file system. Return file-like object."""
try:
files = sorted(os.listdir('data/daily'))
except FileNotFoundError:
files = []
if not len(files):
self.log('No seed file found.')
return app_config.FALLBACK_START_DATE, []
last_file = os.path.join('data/daily', files[-1])
f = open(last_file)
self.log('Used {0} from local file system to seed scrape.'.format(last_file))
return f
def _save_local(self, response, inmate):
"""Save scraped page to local filesystem."""
os.makedirs('data/raw', exist_ok=True)
filepath = os.path.join('data/raw', self._generate_page_filename(inmate))
with open(filepath, 'wb') as f:
f.write(response.body)
self.log('Wrote {0} to local file system'.format(filepath))
def _save_to_s3(self, response, inmate):
"""Save scraped page to s3."""
key = '{0}/raw/{1}'.format(app_config.TARGET, self._generate_page_filename(inmate))
if key.startswith('/'):
key = key[1:]
f = io.BytesIO(response.body)
self._bucket.upload_fileobj(f, key)
self.log('Uploaded s3://{0}/{1}'.format(app_config.S3_BUCKET, key))
def _generate_page_filename(self, inmate):
"""Make a scraped page filename."""
name = '{0}-{1}.html'.format(self._today.strftime('%Y-%m-%d'), inmate.booking_id)
return name
def _is_complete_record(self, inmate):
"""Was this scrape run daily?"""
booking_date = datetime.strptime(inmate.booking_date, '%Y-%m-%d')
return booking_date < self._yesterday
|
[
"io.StringIO",
"io.BytesIO",
"os.makedirs",
"scrapy.Request",
"csv.DictReader",
"jailscraper.models.InmatePage",
"datetime.date.today",
"datetime.datetime.strptime",
"boto3.resource",
"datetime.timedelta",
"requests.get",
"datetime.datetime.min.time",
"jailscraper.app_config.INMATE_URL_TEMPLATE.format",
"os.path.join",
"os.listdir",
"logging.getLogger",
"jailscraper.utils.get_manifest"
] |
[((423, 440), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (432, 440), False, 'from datetime import date, datetime, timedelta\n'), ((242, 268), 'logging.getLogger', 'logging.getLogger', (['"""boto3"""'], {}), "('boto3')\n", (259, 268), False, 'import logging\n'), ((296, 325), 'logging.getLogger', 'logging.getLogger', (['"""botocore"""'], {}), "('botocore')\n", (313, 325), False, 'import logging\n'), ((353, 384), 'logging.getLogger', 'logging.getLogger', (['"""s3transfer"""'], {}), "('s3transfer')\n", (370, 384), False, 'import logging\n'), ((1060, 1085), 'jailscraper.models.InmatePage', 'InmatePage', (['response.body'], {}), '(response.body)\n', (1070, 1085), False, 'from jailscraper.models import InmatePage\n'), ((3070, 3090), 'jailscraper.utils.get_manifest', 'utils.get_manifest', ([], {}), '()\n', (3088, 3090), False, 'from jailscraper import app_config, utils\n'), ((3145, 3167), 'requests.get', 'requests.get', (['seed_url'], {}), '(seed_url)\n', (3157, 3167), False, 'import requests\n'), ((3183, 3214), 'io.StringIO', 'io.StringIO', (['seed_response.text'], {}), '(seed_response.text)\n', (3194, 3214), False, 'import io\n'), ((3600, 3637), 'os.path.join', 'os.path.join', (['"""data/daily"""', 'files[-1]'], {}), "('data/daily', files[-1])\n", (3612, 3637), False, 'import os\n'), ((3876, 3914), 'os.makedirs', 'os.makedirs', (['"""data/raw"""'], {'exist_ok': '(True)'}), "('data/raw', exist_ok=True)\n", (3887, 3914), False, 'import os\n'), ((4387, 4412), 'io.BytesIO', 'io.BytesIO', (['response.body'], {}), '(response.body)\n', (4397, 4412), False, 'import io\n'), ((4843, 4893), 'datetime.datetime.strptime', 'datetime.strptime', (['inmate.booking_date', '"""%Y-%m-%d"""'], {}), "(inmate.booking_date, '%Y-%m-%d')\n", (4860, 4893), False, 'from datetime import date, datetime, timedelta\n'), ((673, 693), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (687, 693), False, 'import boto3\n'), ((792, 804), 'datetime.date.today', 'date.today', ([], {}), '()\n', (802, 804), False, 'from datetime import date, datetime, timedelta\n'), ((806, 825), 'datetime.datetime.min.time', 'datetime.min.time', ([], {}), '()\n', (823, 825), False, 'from datetime import date, datetime, timedelta\n'), ((2036, 2053), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (2050, 2053), False, 'import csv\n'), ((2072, 2128), 'jailscraper.app_config.INMATE_URL_TEMPLATE.format', 'app_config.INMATE_URL_TEMPLATE.format', (["row['Booking_Id']"], {}), "(row['Booking_Id'])\n", (2109, 2128), False, 'from jailscraper import app_config, utils\n'), ((2163, 2213), 'datetime.datetime.strptime', 'datetime.strptime', (["row['Booking_Date']", '"""%Y-%m-%d"""'], {}), "(row['Booking_Date'], '%Y-%m-%d')\n", (2180, 2213), False, 'from datetime import date, datetime, timedelta\n'), ((966, 1010), 'scrapy.Request', 'scrapy.Request', ([], {'url': 'url', 'callback': 'self.parse'}), '(url=url, callback=self.parse)\n', (980, 1010), False, 'import scrapy\n'), ((3370, 3394), 'os.listdir', 'os.listdir', (['"""data/daily"""'], {}), "('data/daily')\n", (3380, 3394), False, 'import os\n'), ((2608, 2657), 'jailscraper.app_config.INMATE_URL_TEMPLATE.format', 'app_config.INMATE_URL_TEMPLATE.format', (['jailnumber'], {}), '(jailnumber)\n', (2645, 2657), False, 'from jailscraper import app_config, utils\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import os
import tensorflow as tf
import zipfile as zp
import subprocess
import glob
import json
from PIL import Image
from collections import OrderedDict
import shutil
import stat
import sys
def convert_to_dlc(script_path, frozen_model_file, snpe_root, input_node='input', output_node='output', image_size=224):
print('converting ' + frozen_model_file + ' to snpe dlc format')
sys.stdout.flush()
model_name_ = os.path.splitext(os.path.split(frozen_model_file)[1])[0]
dlc_path = 'models/{}.dlc'.format(model_name_)
dlc_full_path = os.path.join(snpe_root, 'benchmarks', dlc_path)
# if os.path.exists(dlc_full_path):
# return dlc_path
if not os.path.exists(os.path.dirname(dlc_full_path)):
os.makedirs(os.path.dirname(dlc_full_path))
cmd = [script_path,
'--graph', os.path.abspath(frozen_model_file),
'--input_dim', input_node, '1,{0},{0},3'.format(image_size),
'--out_node', output_node,
'--allow_unconsumed_nodes',
'--dlc', dlc_full_path]
subprocess.call(cmd)
print()
sys.stdout.flush()
return dlc_path
# print('INFO: Creating ' + DLC_QUANTIZED_FILENAME + ' quantized model')
# data_cropped_dir = os.path.join(os.path.join(model_dir, 'data'), 'cropped')
# cmd = ['snpe-dlc-quantize',
# '--input_dlc', os.path.join(dlc_dir, DLC_FILENAME),
# '--input_list', os.path.join(data_cropped_dir, RAW_LIST_FILE),
# '--output_dlc', os.path.join(dlc_dir, DLC_QUANTIZED_FILENAME)]
# subprocess.call(cmd)
def __get_img_raw(img_file):
img_file = os.path.abspath(img_file)
img = Image.open(img_file)
img_ndarray = np.array(img) # read it
if len(img_ndarray.shape) != 3:
raise RuntimeError('Image shape' + str(img_ndarray.shape))
if img_ndarray.shape[2] != 3:
raise RuntimeError('Require image with rgb but channel is %d' % img_ndarray.shape[2])
# reverse last dimension: rgb -> bgr
return img_ndarray
def __create_mean_raw(img_raw, mean_rgb):
if img_raw.shape[2] != 3:
raise RuntimeError('Require image with rgb but channel is %d' % img_raw.shape[2])
img_dim = (img_raw.shape[0], img_raw.shape[1])
mean_raw_r = np.empty(img_dim)
mean_raw_r.fill(mean_rgb[0])
mean_raw_g = np.empty(img_dim)
mean_raw_g.fill(mean_rgb[1])
mean_raw_b = np.empty(img_dim)
mean_raw_b.fill(mean_rgb[2])
# create with c, h, w shape first
tmp_transpose_dim = (img_raw.shape[2], img_raw.shape[0], img_raw.shape[1])
mean_raw = np.empty(tmp_transpose_dim)
mean_raw[0] = mean_raw_r
mean_raw[1] = mean_raw_g
mean_raw[2] = mean_raw_b
# back to h, w, c
mean_raw = np.transpose(mean_raw, (1, 2, 0))
return mean_raw.astype(np.float32)
def __create_raw_img(img_file, mean_rgb, div, req_bgr_raw, save_uint8):
img_raw = __get_img_raw(img_file)
mean_raw = __create_mean_raw(img_raw, mean_rgb)
snpe_raw = img_raw - mean_raw
snpe_raw = snpe_raw.astype(np.float32)
# scalar data divide
snpe_raw /= div
if req_bgr_raw:
snpe_raw = snpe_raw[..., ::-1]
if save_uint8:
snpe_raw = snpe_raw.astype(np.uint8)
else:
snpe_raw = snpe_raw.astype(np.float32)
img_file = os.path.abspath(img_file)
filename, ext = os.path.splitext(img_file)
snpe_raw_filename = filename
snpe_raw_filename += '.raw'
snpe_raw.tofile(snpe_raw_filename)
return 0
def __resize_square_to_jpg(src, dst, size):
src_img = Image.open(src)
# If black and white image, convert to rgb (all 3 channels the same)
if len(np.shape(src_img)) == 2: src_img = src_img.convert(mode='RGB')
# center crop to square
width, height = src_img.size
short_dim = min(height, width)
crop_coord = (
(width - short_dim) / 2,
(height - short_dim) / 2,
(width + short_dim) / 2,
(height + short_dim) / 2
)
img = src_img.crop(crop_coord)
# resize to alexnet size
dst_img = img.resize((size, size), Image.ANTIALIAS)
# save output - save determined from file extension
dst_img.save(dst)
return 0
def convert_img(src, dest, size):
print("converting images...")
for root, dirs, files in os.walk(src):
for jpgs in files:
src_image = os.path.join(root, jpgs)
if '.jpg' in src_image:
print(src_image)
dest_image = os.path.join(dest, jpgs)
__resize_square_to_jpg(src_image, dest_image, size)
for root, dirs, files in os.walk(dest):
for jpgs in files:
src_image = os.path.join(root, jpgs)
print(src_image)
mean_rgb = (128, 128, 128)
__create_raw_img(src_image, mean_rgb, 128, False, False)
def create_file_list(input_dir, output_filename, ext_pattern, print_out=True, rel_path=True):
input_dir = os.path.abspath(input_dir)
output_filename = os.path.abspath(output_filename)
output_dir = os.path.dirname(output_filename)
if not os.path.isdir(input_dir):
raise RuntimeError('input_dir %s is not a directory' % input_dir)
if not os.path.isdir(output_dir):
raise RuntimeError('output_filename %s directory does not exist' % output_dir)
glob_path = os.path.join(input_dir, ext_pattern)
file_list = glob.glob(glob_path)
if rel_path:
file_list = [os.path.relpath(file_path, output_dir) for file_path in file_list]
if len(file_list) <= 0:
if print_out:
print('no results with %s' % glob_path)
else:
with open(output_filename, 'w') as f:
f.write('\n'.join(file_list))
if print_out:
print('%s created listing %d files.' % (output_filename, len(file_list)))
def prepare_data_images(image_size, snpe_root):
# make a copy of the image files from the alex net model data dir
image_dir_relative_path = 'models/alexnet/data'
image_dir = os.path.join(snpe_root, image_dir_relative_path)
data_cropped_dir = os.path.join(image_dir, 'cropped_%s' % image_size)
raw_list = os.path.join(image_dir, 'target_raw_list_%s.txt' % image_size)
if not os.path.exists(raw_list):
os.makedirs(data_cropped_dir)
print('creating inception style raw image data')
convert_img(image_dir, data_cropped_dir, image_size)
print('Create file lists')
create_file_list(data_cropped_dir, raw_list, '*.raw')
print()
sys.stdout.flush()
return data_cropped_dir, raw_list
# generate bench config json file
def gen_config(dlc_path, input_list_file, input_data, processors_, runs):
name = os.path.splitext(os.path.basename(dlc_path))[0]
config = OrderedDict()
config['Name'] = name
config['HostRootPath'] = name
config['HostResultsDir'] = os.path.join(name, 'results')
config['DevicePath'] = '/data/local/tmp/snpebm'
config['Devices'] = ["123"]
config['Runs'] = runs
model = OrderedDict()
model['Name'] = name
model['Dlc'] = dlc_path
model['InputList'] = input_list_file
model['Data'] = [input_data]
config['Model'] = model
config['Runtimes'] = processors_
config['Measurements'] = ['timing'] # ['timing', 'mem']
return config
def write_config(config, save_path):
with open(save_path, 'w') as f:
json.dump(config, f, indent=4)
def check_processor_arg(processor_str):
default = "GPU,DSP,CPU,GPU_FP16"
processor_list = default.split(',')
parsed_processors = []
for p in processor_str.split(','):
if p not in processor_list:
print("please use either GPU, DSP or CPU or any combination of them, seperated by comma(',')")
print("e.g. -p GPU,DSP means running on GPU and DSP; -p CPU means only running on CPU")
exit(-1)
else:
parsed_processors.append(p)
return parsed_processors
"""
caution1: rename data/snpe-1.31.0.522 to data/snpe-1.31.0
caution2: manually change executable permission on the phone through adb:
adb shell "chmod a+x /data/local/tmp/snpebm/artifacts/arm-android-clang6.0/bin/snpe*"
python snpe/run_snpe.py --model data/resnet_v1_50/resnet_v1_50.frozen.pb --snpe_sdk data/snpe-1.31.0.zip 2>&1 | tee run_resnet50.log
(test for pip install tensorflow=1.14)
"""
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-sdk", "--snpe_sdk", type=str, default="data/snpe-1.15.0.zip",
help="path to snpe sdk zip file")
parser.add_argument("-p", "--processors", type=check_processor_arg, default="GPU,DSP,CPU",
help="processor to use, use GPU,DSP,CPU or any combination of them (separated by comma)")
parser.add_argument("-n", "--runs", type=int, default=10,
help="number of times to repeat the run")
parser.add_argument("-ndk", "--android_ndk", type=str,
help="path to android ndk")
parser.add_argument("-m", "--model", type=str, default="data/mobilenet_v1/mobilenet_v1_1.0_224.frozen.pb",
help="frozen tensorflow model")
parser.add_argument("-s", "--image_size", type=int, default=224,
help="input image size")
parser.add_argument("-i", "--input_node", type=str, default='input',
help="input node name in the model")
parser.add_argument("-o", "--output_node", type=str, default='output',
help="output node name in the model")
parser.add_argument("-t", "--show_time", action='store_true',
help="show time in csv")
return parser.parse_args()
if __name__ == '__main__':
web_url = "https://developer.qualcomm.com/software/snapdragon-neural-processing-engine-ai"
tf_path = os.path.dirname(tf.__file__)
args = parse_args()
snpe_sdk_file = args.snpe_sdk
snpe_dir = os.path.dirname(snpe_sdk_file)
snpe_sdk_path = os.path.abspath(os.path.splitext(snpe_sdk_file)[0])
snpe_name = os.path.basename(snpe_sdk_path)
if not os.path.exists(snpe_sdk_file):
print("please download SNPE SDK from:", web_url)
exit(-1)
elif not os.path.exists(snpe_sdk_path):
print("extracting snpe to:", snpe_sdk_path, "...")
zp_ref = zp.ZipFile(snpe_sdk_file, 'r')
zp_ref.extractall(snpe_dir)
zp_ref.close()
print("snpe sdk extraction done.")
else:
print("found snpe sdk at:", snpe_sdk_path)
sys.stdout.flush()
print()
sys.stdout.flush()
ndk_path = os.environ.get("ANDROID_NDK", None) or args.android_ndk
if not ndk_path:
print("please set ndk path either by specify -ndk or set 'export ANDROID_NDK=path/to/android-ndk'")
exit(-1)
# may install pkg deps
if not os.path.exists('/tmp/{}_deps_checked'.format(snpe_name)):
# print("copying libs from ndk to snpe sdk...")
#
# shutil.copy('{}/sources/cxx-stl/gnu-libstdc++/4.9/libs/arm64-v8a/libgnustl_shared.so'.format(ndk_path),
# '{}/lib/aarch64-linux-gcc4.9'.format(snpe_sdk_path))
#
# shutil.copy('{}/sources/cxx-stl/gnu-libstdc++/4.9/libs/armeabi-v7a/libgnustl_shared.so'.format(ndk_path),
# '{}/lib/arm-android-gcc4.9'.format(snpe_sdk_path))
# print("gcc libs copied.")
# print()
# sys.stdout.flush()
print("checking package dependencies...")
check_cmd = 'yes | bash {}/bin/dependencies.sh'.format(snpe_sdk_path)
subprocess.call(check_cmd, shell=True)
print("checking python dependencies...")
check_cmd = 'yes | bash {}/bin/check_python_depends.sh'.format(snpe_sdk_path)
subprocess.call(check_cmd, shell=True)
for os_type in ["arm-android-gcc4.9", "arm-android-clang6.0", "x86_64-linux-clang"]:
bin_dir = "{}/bin/{}".format(snpe_sdk_path, os_type)
if not os.path.exists(bin_dir):
continue
for bin_file in os.listdir(bin_dir):
script_file_path = os.path.join("{}/bin/{}".format(snpe_sdk_path, os_type), bin_file)
print('set script:', script_file_path, ' to executable')
sys.stdout.flush()
st = os.stat(script_file_path)
os.chmod(script_file_path, st.st_mode | stat.S_IEXEC)
open('/tmp/{}_deps_checked'.format(snpe_name), 'a').close()
os.environ["SNPE_ROOT"] = snpe_sdk_path
py_path = os.environ.get("PYTHONPATH", "")
os.environ["PYTHONPATH"] = "{0}/lib/python:{1}".format(snpe_sdk_path, py_path)
os.environ["TENSORFLOW_HOME"] = tf_path
bin_path = os.environ.get("PATH", "")
os.environ["PATH"] = "{}/bin/x86_64-linux-clang:{}".format(snpe_sdk_path, bin_path)
model_file = args.model
if not os.path.exists(model_file):
print(model_file, "not exist!")
exit(-1)
convert_dlc_script = "{}/bin/x86_64-linux-clang/snpe-tensorflow-to-dlc".format(snpe_sdk_path)
dlc_file = convert_to_dlc(convert_dlc_script, model_file, snpe_sdk_path,
args.input_node, args.output_node, args.image_size)
data_dir, raw_file_list = prepare_data_images(args.image_size, snpe_sdk_path)
print('generating benchmark configuration...')
sys.stdout.flush()
config = gen_config(dlc_file, raw_file_list, data_dir, args.processors, args.runs)
model_name = os.path.splitext(os.path.split(model_file)[1])[0]
config_path = os.path.join('{}/benchmarks'.format(snpe_sdk_path), "{}.json".format(model_name))
write_config(config, config_path)
print('benchmark configuration generated.')
print()
sys.stdout.flush()
print('running benchmark on {}...'.format(' '.join(args.processors)))
print()
sys.stdout.flush()
bench_cmd = ['python', 'snpe_bench.py', '-c', config_path, '-a']
subprocess.call(bench_cmd, cwd='{}/benchmarks'.format(snpe_sdk_path))
stats_file = model_file.replace('.pb', '.csv')
shutil.copy('{0}/benchmarks/{1}/results/latest_results/benchmark_stats_{1}.csv'.format(snpe_sdk_path, model_name),
stats_file)
print('benchmark results saved to:', stats_file)
if args.show_time:
import csv
with open(stats_file, 'r') as f, open('{}.txt'.format(stats_file), 'w') as f2:
reader = csv.reader(f)
next(reader)
for row in reader:
if 'Total Inference Time' in row:
gpu_time = float(row[3])/1000
dsp_time = float(row[9])/1000
cpu_time = float(row[18])/1000
header = 'GPU, DSP, CPU'
print(header)
f2.write(header + '\n')
time_str = '{:4.2f}, {:4.2f}, {:4.2f}'.format(gpu_time, dsp_time, cpu_time)
print(time_str)
f2.write(time_str + '\n')
break
print('all done.')
|
[
"csv.reader",
"argparse.ArgumentParser",
"numpy.empty",
"os.walk",
"numpy.shape",
"sys.stdout.flush",
"glob.glob",
"os.path.join",
"os.path.abspath",
"os.path.dirname",
"numpy.transpose",
"os.path.exists",
"json.dump",
"os.chmod",
"os.stat",
"os.path.basename",
"subprocess.call",
"os.listdir",
"zipfile.ZipFile",
"os.makedirs",
"os.path.isdir",
"PIL.Image.open",
"os.environ.get",
"numpy.array",
"os.path.splitext",
"os.path.relpath",
"collections.OrderedDict",
"os.path.split"
] |
[((532, 550), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (548, 550), False, 'import sys\n'), ((697, 744), 'os.path.join', 'os.path.join', (['snpe_root', '"""benchmarks"""', 'dlc_path'], {}), "(snpe_root, 'benchmarks', dlc_path)\n", (709, 744), False, 'import os\n'), ((1193, 1213), 'subprocess.call', 'subprocess.call', (['cmd'], {}), '(cmd)\n', (1208, 1213), False, 'import subprocess\n'), ((1230, 1248), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1246, 1248), False, 'import sys\n'), ((1752, 1777), 'os.path.abspath', 'os.path.abspath', (['img_file'], {}), '(img_file)\n', (1767, 1777), False, 'import os\n'), ((1788, 1808), 'PIL.Image.open', 'Image.open', (['img_file'], {}), '(img_file)\n', (1798, 1808), False, 'from PIL import Image\n'), ((1827, 1840), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1835, 1840), True, 'import numpy as np\n'), ((2379, 2396), 'numpy.empty', 'np.empty', (['img_dim'], {}), '(img_dim)\n', (2387, 2396), True, 'import numpy as np\n'), ((2447, 2464), 'numpy.empty', 'np.empty', (['img_dim'], {}), '(img_dim)\n', (2455, 2464), True, 'import numpy as np\n'), ((2515, 2532), 'numpy.empty', 'np.empty', (['img_dim'], {}), '(img_dim)\n', (2523, 2532), True, 'import numpy as np\n'), ((2698, 2725), 'numpy.empty', 'np.empty', (['tmp_transpose_dim'], {}), '(tmp_transpose_dim)\n', (2706, 2725), True, 'import numpy as np\n'), ((2850, 2883), 'numpy.transpose', 'np.transpose', (['mean_raw', '(1, 2, 0)'], {}), '(mean_raw, (1, 2, 0))\n', (2862, 2883), True, 'import numpy as np\n'), ((3408, 3433), 'os.path.abspath', 'os.path.abspath', (['img_file'], {}), '(img_file)\n', (3423, 3433), False, 'import os\n'), ((3454, 3480), 'os.path.splitext', 'os.path.splitext', (['img_file'], {}), '(img_file)\n', (3470, 3480), False, 'import os\n'), ((3659, 3674), 'PIL.Image.open', 'Image.open', (['src'], {}), '(src)\n', (3669, 3674), False, 'from PIL import Image\n'), ((4386, 4398), 'os.walk', 'os.walk', (['src'], {}), '(src)\n', (4393, 4398), False, 'import os\n'), ((4697, 4710), 'os.walk', 'os.walk', (['dest'], {}), '(dest)\n', (4704, 4710), False, 'import os\n'), ((5037, 5063), 'os.path.abspath', 'os.path.abspath', (['input_dir'], {}), '(input_dir)\n', (5052, 5063), False, 'import os\n'), ((5086, 5118), 'os.path.abspath', 'os.path.abspath', (['output_filename'], {}), '(output_filename)\n', (5101, 5118), False, 'import os\n'), ((5136, 5168), 'os.path.dirname', 'os.path.dirname', (['output_filename'], {}), '(output_filename)\n', (5151, 5168), False, 'import os\n'), ((5424, 5460), 'os.path.join', 'os.path.join', (['input_dir', 'ext_pattern'], {}), '(input_dir, ext_pattern)\n', (5436, 5460), False, 'import os\n'), ((5477, 5497), 'glob.glob', 'glob.glob', (['glob_path'], {}), '(glob_path)\n', (5486, 5497), False, 'import glob\n'), ((6109, 6157), 'os.path.join', 'os.path.join', (['snpe_root', 'image_dir_relative_path'], {}), '(snpe_root, image_dir_relative_path)\n', (6121, 6157), False, 'import os\n'), ((6182, 6232), 'os.path.join', 'os.path.join', (['image_dir', "('cropped_%s' % image_size)"], {}), "(image_dir, 'cropped_%s' % image_size)\n", (6194, 6232), False, 'import os\n'), ((6248, 6310), 'os.path.join', 'os.path.join', (['image_dir', "('target_raw_list_%s.txt' % image_size)"], {}), "(image_dir, 'target_raw_list_%s.txt' % image_size)\n", (6260, 6310), False, 'import os\n'), ((6619, 6637), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6635, 6637), False, 'import sys\n'), ((6858, 6871), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6869, 6871), False, 'from collections import OrderedDict\n'), ((6963, 6992), 'os.path.join', 'os.path.join', (['name', '"""results"""'], {}), "(name, 'results')\n", (6975, 6992), False, 'import os\n'), ((7116, 7129), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7127, 7129), False, 'from collections import OrderedDict\n'), ((8487, 8512), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8510, 8512), False, 'import argparse\n'), ((9947, 9975), 'os.path.dirname', 'os.path.dirname', (['tf.__file__'], {}), '(tf.__file__)\n', (9962, 9975), False, 'import os\n'), ((10052, 10082), 'os.path.dirname', 'os.path.dirname', (['snpe_sdk_file'], {}), '(snpe_sdk_file)\n', (10067, 10082), False, 'import os\n'), ((10171, 10202), 'os.path.basename', 'os.path.basename', (['snpe_sdk_path'], {}), '(snpe_sdk_path)\n', (10187, 10202), False, 'import os\n'), ((10677, 10695), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10693, 10695), False, 'import sys\n'), ((12639, 12671), 'os.environ.get', 'os.environ.get', (['"""PYTHONPATH"""', '""""""'], {}), "('PYTHONPATH', '')\n", (12653, 12671), False, 'import os\n'), ((12815, 12841), 'os.environ.get', 'os.environ.get', (['"""PATH"""', '""""""'], {}), "('PATH', '')\n", (12829, 12841), False, 'import os\n'), ((13453, 13471), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13469, 13471), False, 'import sys\n'), ((13830, 13848), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13846, 13848), False, 'import sys\n'), ((13940, 13958), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13956, 13958), False, 'import sys\n'), ((969, 1003), 'os.path.abspath', 'os.path.abspath', (['frozen_model_file'], {}), '(frozen_model_file)\n', (984, 1003), False, 'import os\n'), ((5181, 5205), 'os.path.isdir', 'os.path.isdir', (['input_dir'], {}), '(input_dir)\n', (5194, 5205), False, 'import os\n'), ((5293, 5318), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (5306, 5318), False, 'import os\n'), ((6323, 6347), 'os.path.exists', 'os.path.exists', (['raw_list'], {}), '(raw_list)\n', (6337, 6347), False, 'import os\n'), ((6357, 6386), 'os.makedirs', 'os.makedirs', (['data_cropped_dir'], {}), '(data_cropped_dir)\n', (6368, 6386), False, 'import os\n'), ((7486, 7516), 'json.dump', 'json.dump', (['config', 'f'], {'indent': '(4)'}), '(config, f, indent=4)\n', (7495, 7516), False, 'import json\n'), ((10215, 10244), 'os.path.exists', 'os.path.exists', (['snpe_sdk_file'], {}), '(snpe_sdk_file)\n', (10229, 10244), False, 'import os\n'), ((10712, 10747), 'os.environ.get', 'os.environ.get', (['"""ANDROID_NDK"""', 'None'], {}), "('ANDROID_NDK', None)\n", (10726, 10747), False, 'import os\n'), ((11685, 11723), 'subprocess.call', 'subprocess.call', (['check_cmd'], {'shell': '(True)'}), '(check_cmd, shell=True)\n', (11700, 11723), False, 'import subprocess\n'), ((11868, 11906), 'subprocess.call', 'subprocess.call', (['check_cmd'], {'shell': '(True)'}), '(check_cmd, shell=True)\n', (11883, 11906), False, 'import subprocess\n'), ((12970, 12996), 'os.path.exists', 'os.path.exists', (['model_file'], {}), '(model_file)\n', (12984, 12996), False, 'import os\n'), ((838, 868), 'os.path.dirname', 'os.path.dirname', (['dlc_full_path'], {}), '(dlc_full_path)\n', (853, 868), False, 'import os\n'), ((891, 921), 'os.path.dirname', 'os.path.dirname', (['dlc_full_path'], {}), '(dlc_full_path)\n', (906, 921), False, 'import os\n'), ((3759, 3776), 'numpy.shape', 'np.shape', (['src_img'], {}), '(src_img)\n', (3767, 3776), True, 'import numpy as np\n'), ((4451, 4475), 'os.path.join', 'os.path.join', (['root', 'jpgs'], {}), '(root, jpgs)\n', (4463, 4475), False, 'import os\n'), ((4763, 4787), 'os.path.join', 'os.path.join', (['root', 'jpgs'], {}), '(root, jpgs)\n', (4775, 4787), False, 'import os\n'), ((5537, 5575), 'os.path.relpath', 'os.path.relpath', (['file_path', 'output_dir'], {}), '(file_path, output_dir)\n', (5552, 5575), False, 'import os\n'), ((6814, 6840), 'os.path.basename', 'os.path.basename', (['dlc_path'], {}), '(dlc_path)\n', (6830, 6840), False, 'import os\n'), ((10119, 10150), 'os.path.splitext', 'os.path.splitext', (['snpe_sdk_file'], {}), '(snpe_sdk_file)\n', (10135, 10150), False, 'import os\n'), ((10333, 10362), 'os.path.exists', 'os.path.exists', (['snpe_sdk_path'], {}), '(snpe_sdk_path)\n', (10347, 10362), False, 'import os\n'), ((10440, 10470), 'zipfile.ZipFile', 'zp.ZipFile', (['snpe_sdk_file', '"""r"""'], {}), "(snpe_sdk_file, 'r')\n", (10450, 10470), True, 'import zipfile as zp\n'), ((10642, 10660), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10658, 10660), False, 'import sys\n'), ((12163, 12182), 'os.listdir', 'os.listdir', (['bin_dir'], {}), '(bin_dir)\n', (12173, 12182), False, 'import os\n'), ((14505, 14518), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (14515, 14518), False, 'import csv\n'), ((586, 618), 'os.path.split', 'os.path.split', (['frozen_model_file'], {}), '(frozen_model_file)\n', (599, 618), False, 'import os\n'), ((4574, 4598), 'os.path.join', 'os.path.join', (['dest', 'jpgs'], {}), '(dest, jpgs)\n', (4586, 4598), False, 'import os\n'), ((12085, 12108), 'os.path.exists', 'os.path.exists', (['bin_dir'], {}), '(bin_dir)\n', (12099, 12108), False, 'import os\n'), ((12375, 12393), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (12391, 12393), False, 'import sys\n'), ((12415, 12440), 'os.stat', 'os.stat', (['script_file_path'], {}), '(script_file_path)\n', (12422, 12440), False, 'import os\n'), ((12457, 12510), 'os.chmod', 'os.chmod', (['script_file_path', '(st.st_mode | stat.S_IEXEC)'], {}), '(script_file_path, st.st_mode | stat.S_IEXEC)\n', (12465, 12510), False, 'import os\n'), ((13594, 13619), 'os.path.split', 'os.path.split', (['model_file'], {}), '(model_file)\n', (13607, 13619), False, 'import os\n')]
|
#!/usr/bin/env python3
"""hybrid-analysis.com worker for the ACT platform
Copyright 2021 the ACT project <<EMAIL>>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
"""
import argparse
import contextlib
import json
import sys
import traceback
import warnings
from functools import partialmethod
from logging import error, info
from typing import Any, Dict, Generator, List, Optional, Text
import requests
import act.api
from act.api.libs import cli
from act.workers.libs import worker
def parseargs() -> argparse.ArgumentParser:
"""Extract command lines argument"""
parser = worker.parseargs("ACT hybrid-analysis.com Client")
parser.add_argument(
"--feed", action="store_true", help="Download the public feed only, no lookup"
)
parser.add_argument(
"--apikey", default="", help="community apikey for hybrid-analysis.com"
)
parser.add_argument(
"--user-agent", default="Falcon Sandbox", help="User agent while talking to API"
)
parser.add_argument(
"--no-check-certificate",
action="store_true",
help="Do not check SSL certificate",
)
return parser
def download_feed(
user_agent: Text, proxies: Optional[Dict[Text, Text]], verify_ssl: bool = True
) -> Dict[Text, Any]:
"""Download the public feed and return a dictionary"""
url = "https://hybrid-analysis.com/feed?json"
with ssl_verification(verify=verify_ssl):
headers = {"User-Agent": user_agent}
response = requests.get(url, proxies=proxies, headers=headers)
if response.status_code != 200:
raise CommunicationError(
f"hybrid_analysis_feed.download_feed() could not download public feed, "
f"error calling {url}: Status = {response.status_code}"
)
try:
data: Dict[Text, Any] = response.json()
except json.decoder.JSONDecodeError as err:
raise CommunicationError(
f"hybrid_analysis_feed.download_feed() could not load public feed, "
f"error decoding json result from {url}: {err}"
)
return data
def handle_feed(
actapi: act.api.Act,
user_agent: Text,
proxies: Optional[Dict[Text, Text]] = None,
verify_ssl: bool = True,
output_format: Text = "json",
) -> None:
"""Download, parse and provide facts from the public feed of hybrid-analysis.com"""
feed = download_feed(user_agent, proxies, verify_ssl)
feeds_facts: List[act.api.fact.Fact] = []
for report in feed["data"]:
if not (report.get("isinteresting", False) or report.get("threatlevel", 0)):
continue
# store data if threatlevel > 0 or report is interesting
if "sha256" not in report:
continue
feeds_facts += handle_report(actapi, report)
for fact in feeds_facts:
act.api.helpers.handle_fact(fact, output_format=output_format)
def handle_hosts(
actapi: act.api.Act, content: Text, hosts: List[Text]
) -> List[act.api.fact.Fact]:
"""handle the hosts part of a hybrid-analysis report"""
feeds_facts: List[act.api.fact.Fact] = []
for host in hosts:
(ip_type, ip) = act.api.helpers.ip_obj(host)
chain = []
chain.append(
actapi.fact("connectsTo").source("content", content).destination("uri", "*")
)
chain.append(
actapi.fact("resolvesTo").source("fqdn", "*").destination(ip_type, ip)
)
chain.append(
actapi.fact("componentOf").source("fqdn", "*").destination("uri", "*")
)
feeds_facts += act.api.fact.fact_chain(*chain)
return feeds_facts
def handle_domains(
actapi: act.api.Act, content: Text, domains: List[Text]
) -> List[act.api.fact.Fact]:
"""Handle the domains part of a hybrid-analysis report"""
feeds_facts: List[act.api.fact.Fact] = []
for domain in domains:
chain = []
chain.append(
actapi.fact("connectsTo").source("content", content).destination("uri", "*")
)
chain.append(
actapi.fact("componentOf").source("fqdn", domain).destination("uri", "*")
)
feeds_facts += act.api.fact.fact_chain(*chain)
return feeds_facts
def handle_extracted_files(
actapi: act.api.Act, content: Text, extracted_files: List[Dict]
) -> List[act.api.fact.Fact]:
"""Handle the extracted_files part of a hybrid_analysis report"""
feeds_facts: List[act.api.fact.Fact] = []
for file in extracted_files:
chain = []
if "sha256" not in file:
continue
if not file["file_path"]:
info(f"{file} is missing file_path using name instead")
path = file["file_path"] if file["file_path"] else file["name"]
chain.append(
actapi.fact("componentOf").source("path", path).destination("uri", "*")
)
chain.append(
actapi.fact("at").source("content", file["sha256"]).destination("uri", "*")
)
feeds_facts += act.api.fact.fact_chain(*chain)
for hash_type in ["md5", "sha1", "sha256"]:
feeds_facts.append(
actapi.fact("represents")
.source("hash", file[hash_type])
.destination("content", file["sha256"])
)
feeds_facts.append(
actapi.fact("category", hash_type).source("hash", file[hash_type])
)
if (
content != file["sha256"]
): # the act platform does not accept same object on source and destination for write
feeds_facts.append(
actapi.fact("writes")
.source("content", content)
.destination("content", file["sha256"])
)
return feeds_facts
def handle_classification_tags(
actapi: act.api.Act, content: Text, classification_tags: List[Text]
) -> List[act.api.fact.Fact]:
"""handle the classification_tags part or a hybrid_analysis report"""
feeds_facts: List[act.api.fact.Fact] = []
for tag in classification_tags:
feeds_facts.append(
actapi.fact("classifiedAs")
.source("content", content)
.destination("tool", tag)
)
return feeds_facts
def handle_mitre_attcks(
actapi: act.api.Act, content: Text, mitre_attcks: List[Dict]
) -> List[act.api.fact.Fact]:
"""Handle the MITRE Att&ck part of the hybrid analysis report"""
feeds_facts: List[act.api.fact.Fact] = []
for attck in mitre_attcks:
chain = []
chain.append(
actapi.fact("classifiedAs")
.source("content", content)
.destination("tool", "*")
)
chain.append(
actapi.fact("implements")
.source("tool", "*")
.destination("technique", attck["technique"])
)
feeds_facts += act.api.fact.fact_chain(*chain)
return feeds_facts
def handle_process_list(
actapi: act.api.Act, content: Text, process_list: List[Dict]
) -> List[act.api.fact.Fact]:
"""Handle the process list part of the hybrid analysis report"""
feeds_facts: List[act.api.fact.Fact] = []
for proc in process_list:
chain = []
path = proc["normalizedpath"] if "normalizedpath" in proc else proc["name"]
chain.append(
actapi.fact("executes").source("content", content).destination("uri", "*")
)
chain.append(
actapi.fact("componentOf").source("path", path).destination("uri", "*")
)
feeds_facts += act.api.fact.fact_chain(*chain)
return feeds_facts
def handle_report(
actapi: act.api.Act, report: Dict[Text, Any]
) -> List[act.api.fact.Fact]:
"""Create facts from a report"""
feeds_facts: List[act.api.fact.Fact] = []
content = report["sha256"]
for hash_type in ["md5", "sha1", "sha256", "ssdeep", "imphash", "sha512"]:
if (
hash_type not in report
or not report[hash_type]
or report[hash_type] == "Unknown"
):
info(f"{hash_type} not set for content {content}")
continue
feeds_facts.append(
actapi.fact("represents")
.source("hash", report[hash_type])
.destination("content", content)
)
feeds_facts.append(
actapi.fact("category", hash_type).source("hash", report[hash_type])
)
feeds_facts += handle_hosts(actapi, content, report.get("hosts", []))
feeds_facts += handle_domains(actapi, content, report.get("domains", []))
feeds_facts += handle_extracted_files(
actapi, content, report.get("extracted_files", [])
)
feeds_facts += handle_classification_tags(
actapi, content, report.get("classification_tags", [])
)
# DISABLED DUE TO EXCESSIVE FACT CHAIN OBJECT. TO BE DISCUSSED
# feeds_facts += handle_mitre_attcks(actapi, content, report.get("mitre_attcks", []))
feeds_facts += handle_process_list(actapi, content, report.get("process_list", []))
return feeds_facts
def handle_hash(
actapi: act.api.Act,
apikey: Text,
hashdigest: Text,
user_agent: Text,
proxies: Optional[Dict[Text, Text]] = None,
verify_ssl: bool = True,
output_format: Text = "json",
) -> None:
"""Download, parse and provide facts from the public feed of hybrid-analysis.com"""
data = search_hash(apikey, hashdigest, user_agent, proxies, verify_ssl)
for report in data:
for fact in handle_report(actapi, report):
act.api.helpers.handle_fact(fact, output_format=output_format)
def search_hash(
apikey: Text,
hashdigest: Text,
user_agent: Text,
proxies: Optional[Dict[Text, Text]] = None,
verify_ssl: bool = True,
) -> List[Dict[Text, Any]]:
"""Search the hybrid-analysis api for a specific hash"""
url = "https://www.hybrid-analysis.com/api/v2/search/hash"
with ssl_verification(verify=verify_ssl):
headers = {
"User-Agent": user_agent,
"accept": "application/json",
"api-key": apikey,
"Content-Type": "application/x-www-form-urlencoded",
}
form_data = {"hash": hashdigest}
response = requests.post(url, proxies=proxies, headers=headers, data=form_data)
if response.status_code != 200:
print(response.text)
raise CommunicationError(
f"hybrid_analysis_feed.search_hash() could not search community API, "
f"error calling {url}: Status = {response.status_code}"
)
try:
data: List[Dict[Text, Any]] = response.json()
except json.decoder.JSONDecodeError as err:
raise CommunicationError(
f"hybrid_analysis_feed.search_hash() could not load search result, "
f"error decoding json result from {url}: {err}"
)
return data
def main() -> None:
"""main function"""
# Look for default ini file in "/etc/actworkers.ini" and ~/config/actworkers/actworkers.ini
# (or replace .config with $XDG_CONFIG_DIR if set)
args = cli.handle_args(parseargs())
actapi = worker.init_act(args)
# if not args.apikey:
# cli.fatal("You must specify --apikey on command line or in config file")
proxies = (
{"http": args.proxy_string, "https": args.proxy_string}
if args.proxy_string
else None
)
params = {
"actapi": actapi,
"user_agent": args.user_agent,
"proxies": proxies,
"verify_ssl": args.no_check_certificate,
"output_format": args.output_format,
}
if args.feed:
handle_feed(**params)
else:
params["apikey"] = args.apikey
for line in sys.stdin:
params["hashdigest"] = line.strip()
handle_hash(**params)
@contextlib.contextmanager
def ssl_verification(verify: bool = True) -> Generator[None, None, None]:
"""Monkey patch request to manage ssl verification. Can be used 'around' code
that uses requests internally"""
old_request = requests.Session.request
requests.Session.request = partialmethod(old_request, verify=verify) # type: ignore
warnings.filterwarnings("ignore", "Unverified HTTPS request")
yield
warnings.resetwarnings()
requests.Session.request = old_request # type: ignore
class CommunicationError(Exception):
"""CommunicationError is used to gather all communication errors into one"""
...
def main_log_error() -> None:
"""Main entry point, catching and logging errors"""
try:
main()
except Exception:
error("Unhandled exception: {}".format(traceback.format_exc()))
raise
if __name__ == "__main__":
main_log_error()
|
[
"warnings.filterwarnings",
"act.workers.libs.worker.parseargs",
"functools.partialmethod",
"warnings.resetwarnings",
"act.workers.libs.worker.init_act",
"logging.info",
"traceback.format_exc",
"requests.get",
"requests.post"
] |
[((1239, 1289), 'act.workers.libs.worker.parseargs', 'worker.parseargs', (['"""ACT hybrid-analysis.com Client"""'], {}), "('ACT hybrid-analysis.com Client')\n", (1255, 1289), False, 'from act.workers.libs import worker\n'), ((11795, 11816), 'act.workers.libs.worker.init_act', 'worker.init_act', (['args'], {}), '(args)\n', (11810, 11816), False, 'from act.workers.libs import worker\n'), ((12777, 12818), 'functools.partialmethod', 'partialmethod', (['old_request'], {'verify': 'verify'}), '(old_request, verify=verify)\n', (12790, 12818), False, 'from functools import partialmethod\n'), ((12840, 12901), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '"""Unverified HTTPS request"""'], {}), "('ignore', 'Unverified HTTPS request')\n", (12863, 12901), False, 'import warnings\n'), ((12916, 12940), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (12938, 12940), False, 'import warnings\n'), ((2148, 2199), 'requests.get', 'requests.get', (['url'], {'proxies': 'proxies', 'headers': 'headers'}), '(url, proxies=proxies, headers=headers)\n', (2160, 2199), False, 'import requests\n'), ((10899, 10967), 'requests.post', 'requests.post', (['url'], {'proxies': 'proxies', 'headers': 'headers', 'data': 'form_data'}), '(url, proxies=proxies, headers=headers, data=form_data)\n', (10912, 10967), False, 'import requests\n'), ((5274, 5329), 'logging.info', 'info', (['f"""{file} is missing file_path using name instead"""'], {}), "(f'{file} is missing file_path using name instead')\n", (5278, 5329), False, 'from logging import error, info\n'), ((8722, 8772), 'logging.info', 'info', (['f"""{hash_type} not set for content {content}"""'], {}), "(f'{hash_type} not set for content {content}')\n", (8726, 8772), False, 'from logging import error, info\n'), ((13312, 13334), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (13332, 13334), False, 'import traceback\n')]
|
from sss_beneficiarios_hospitales.data import DataBeneficiariosSSSHospital
def test_query_afiliado():
dbh = DataBeneficiariosSSSHospital(user='FAKE', password='<PASSWORD>')
res = dbh.query(dni='full-afiliado')
assert res['ok']
data = res['resultados']
assert data['title'] == "Superintendencia de Servicios de Salud"
assert data["afiliado"]
assert len(data['tablas']) == 2
for d in data['tablas']:
assert "name" in d
is_afiliacion = "AFILIACION" in [v for k, v in d.items() if k == 'name']
is_persona = "AFILIADO" in [v for k, v in d.items() if k == 'name']
assert is_afiliacion or is_persona
if is_afiliacion:
assert d['data']["Parentesco"] == "TITULAR"
assert d['data']["CUIL"] == "27-1XXXXX3-6"
assert d['data']["Tipo de documento"] == "DOCUMENTO UNICO"
assert d['data']["N\u00famero de documento"] == "1XXXXX3"
assert d['data']["Apellido y nombre"] == "FXXXL MARIA"
assert d['data']["Provincia"] == "CORDOBA"
assert d['data']["Fecha de nacimiento"] == "09-09-1961"
assert d['data']["Sexo"] == "Femenino"
if is_persona:
assert d['data']["CUIL titular"] == "27-1XXXXX3-6"
assert d['data']["CUIT de empleador"] == "33-63761744-9"
assert d['data']["CUIL titular"] == "27-1XXXXX3-6"
assert d['data']["Tipo de beneficiario"] == "JUBILADOS Y PENSIONADOS DE PAMI"
assert d['data']["C\u00f3digo de Obra Social"] == "5-0080-7"
assert d['data']["Denominaci\u00f3n Obra Social"] == "INSTITUTO NACIONAL DE SERVICIOS SOCIALES PARA JUBILADOS Y PENSIONADOS"
assert d['data']["Fecha Alta Obra Social"] == "01-08-2012"
def test_query_afiliado_con_empleador():
dbh = DataBeneficiariosSSSHospital(user='FAKE', password='<PASSWORD>')
res = dbh.query(dni='full-afiliado-con-empleador')
assert res['ok']
data = res['resultados']
assert data['title'] == "Superintendencia de Servicios de Salud"
assert data["afiliado"]
for d in data['tablas']:
assert "name" in d
is_afiliacion = "AFILIACION" in [v for k, v in d.items() if k == 'name']
is_persona = "AFILIADO" in [v for k, v in d.items() if k == 'name']
is_declarado = "DECLARADO_POR_EMPLEADOR" in [v for k, v in d.items() if k == 'name']
assert is_afiliacion or is_persona or is_declarado
if is_afiliacion:
assert d['data']["Parentesco"] == "TITULAR"
assert d['data']["CUIL"] == "27-1XXXXX3-6"
assert d['data']["Tipo de documento"] == "DOCUMENTO UNICO"
assert d['data']["N\u00famero de documento"] == "1XXXXX3"
assert d['data']["Apellido y nombre"] == "<NAME>"
assert d['data']["Provincia"] == "CAPITAL FEDERAL"
assert d['data']["Fecha de nacimiento"] == "25-05-1977"
assert d['data']["Sexo"] == "Masculino"
if is_persona:
assert d['data']["CUIL titular"] == "27-1XXXXX3-6"
assert d['data']["CUIT de empleador"] == "30-70818659-3"
assert d['data']["CUIL titular"] == "27-1XXXXX3-6"
assert d['data']["Tipo de beneficiario"] == "RELACION DE DEPENDENCIA"
assert d['data']["C\u00f3digo de Obra Social"] == "4-0080-0"
assert d['data']["Denominaci\u00f3n Obra Social"] == "OBRA SOCIAL DE EJECUTIVOS Y DEL PERSONAL DE DIRECCION DE EMPRESAS"
assert d['data']["Fecha Alta Obra Social"] == "01-06-1931"
if is_declarado:
assert d['data']["Tipo Beneficiario Declarado"] == "RELACION DE DEPENDENCIA (DDJJ SIJP)"
assert d['data']["Ultimo Per\u00edodo Declarado"] == "02-2020"
def test_query_no_afiliado():
dbh = DataBeneficiariosSSSHospital(user='FAKE', password='<PASSWORD>')
res = dbh.query(dni='full-sin-datos')
assert res['ok']
data = res['resultados']
assert data['title'] == "Superintendencia de Servicios de Salud"
assert data["afiliado"] == False
for d in data['tablas']:
assert "name" in d
is_persona = "NO_AFILIADO" in [v for k, v in d.items() if k == 'name']
assert is_persona
assert d['data']["Apellido y Nombre"] == "BXXXXXXXXS FXXXL <NAME>"
assert d['data']["Tipo Documento"] == "DU"
assert d['data']["Nro Documento"] == "2XXXXX1"
assert d['data']["CUIL"] == "202XXXXX18"
|
[
"sss_beneficiarios_hospitales.data.DataBeneficiariosSSSHospital"
] |
[((114, 178), 'sss_beneficiarios_hospitales.data.DataBeneficiariosSSSHospital', 'DataBeneficiariosSSSHospital', ([], {'user': '"""FAKE"""', 'password': '"""<PASSWORD>"""'}), "(user='FAKE', password='<PASSWORD>')\n", (142, 178), False, 'from sss_beneficiarios_hospitales.data import DataBeneficiariosSSSHospital\n'), ((1828, 1892), 'sss_beneficiarios_hospitales.data.DataBeneficiariosSSSHospital', 'DataBeneficiariosSSSHospital', ([], {'user': '"""FAKE"""', 'password': '"""<PASSWORD>"""'}), "(user='FAKE', password='<PASSWORD>')\n", (1856, 1892), False, 'from sss_beneficiarios_hospitales.data import DataBeneficiariosSSSHospital\n'), ((3819, 3883), 'sss_beneficiarios_hospitales.data.DataBeneficiariosSSSHospital', 'DataBeneficiariosSSSHospital', ([], {'user': '"""FAKE"""', 'password': '"""<PASSWORD>"""'}), "(user='FAKE', password='<PASSWORD>')\n", (3847, 3883), False, 'from sss_beneficiarios_hospitales.data import DataBeneficiariosSSSHospital\n')]
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: users/user.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from common import originator_pb2 as common_dot_originator__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='users/user.proto',
package='contracts.users',
syntax='proto3',
serialized_options=_b('Z1github.com/makkalot/eskit/generated/grpc/go/users'),
serialized_pb=_b('\n\x10users/user.proto\x12\x0f\x63ontracts.users\x1a\x17\x63ommon/originator.proto\"\x92\x01\n\x04User\x12\x30\n\noriginator\x18\x01 \x01(\x0b\x32\x1c.contracts.common.Originator\x12\r\n\x05\x65mail\x18\x02 \x01(\t\x12\x12\n\nfirst_name\x18\x03 \x01(\t\x12\x11\n\tlast_name\x18\x04 \x01(\t\x12\x0e\n\x06\x61\x63tive\x18\x05 \x01(\x08\x12\x12\n\nworkspaces\x18\x06 \x03(\tB3Z1github.com/makkalot/eskit/generated/grpc/go/usersb\x06proto3')
,
dependencies=[common_dot_originator__pb2.DESCRIPTOR,])
_USER = _descriptor.Descriptor(
name='User',
full_name='contracts.users.User',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='originator', full_name='contracts.users.User.originator', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='email', full_name='contracts.users.User.email', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='first_name', full_name='contracts.users.User.first_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='last_name', full_name='contracts.users.User.last_name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='active', full_name='contracts.users.User.active', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='workspaces', full_name='contracts.users.User.workspaces', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=63,
serialized_end=209,
)
_USER.fields_by_name['originator'].message_type = common_dot_originator__pb2._ORIGINATOR
DESCRIPTOR.message_types_by_name['User'] = _USER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
User = _reflection.GeneratedProtocolMessageType('User', (_message.Message,), dict(
DESCRIPTOR = _USER,
__module__ = 'users.user_pb2'
# @@protoc_insertion_point(class_scope:contracts.users.User)
))
_sym_db.RegisterMessage(User)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor"
] |
[((440, 466), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (464, 466), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((1412, 1758), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""originator"""', 'full_name': '"""contracts.users.User.originator"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='originator', full_name=\n 'contracts.users.User.originator', index=0, number=1, type=11, cpp_type\n =10, label=1, has_default_value=False, default_value=None, message_type\n =None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (1439, 1758), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2928, 3264), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""active"""', 'full_name': '"""contracts.users.User.active"""', 'index': '(4)', 'number': '(5)', 'type': '(8)', 'cpp_type': '(7)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(False)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='active', full_name=\n 'contracts.users.User.active', index=4, number=5, type=8, cpp_type=7,\n label=1, has_default_value=False, default_value=False, message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (2955, 3264), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3289, 3631), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""workspaces"""', 'full_name': '"""contracts.users.User.workspaces"""', 'index': '(5)', 'number': '(6)', 'type': '(9)', 'cpp_type': '(9)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='workspaces', full_name=\n 'contracts.users.User.workspaces', index=5, number=6, type=9, cpp_type=\n 9, label=3, has_default_value=False, default_value=[], message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (3316, 3631), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
import pandas as pd
file_romeo = open("./data/romeoandjuliet.csv", "r")
file_moby = open("./data/mobydick.csv", "r")
file_gatsby = open("./data/greatgatsby.csv", "r")
file_hamlet = open("./data/hamlet.csv", "r")
romeo = file_romeo.read()
moby = file_moby.read()
gatsby = file_gatsby.read()
hamlet = file_hamlet.read()
print(type(romeo))
print(romeo)
the_set = []
df_romeo = pd.read_csv("./data/romeoandjuliet.csv", sep=",")
print(df_romeo)
df_moby = pd.read_csv("./data/mobydick.csv", sep=",")
print(df_moby)
df_gatsby = pd.read_csv("./data/greatgatsby.csv", sep=",")
print(df_gatsby)
df_hamlet = pd.read_csv("./data/hamlet.csv", sep=",")
print(df_hamlet)
romeo_moby = pd.merge(df_romeo, df_moby, how="outer", on=["Word"], suffixes=('_Romeo', '_Moby'))
print(romeo_moby)
gatsby_hamlet = pd.merge(df_gatsby, df_hamlet, how="outer", on=["Word"], suffixes=('_Gatsby', '_Hamlet'))
print(gatsby_hamlet)
full = pd.merge(romeo_moby, gatsby_hamlet, how="outer", on=["Word"])
print(full)
pd.set_option("display.max_rows", None, "display.max_columns", None)
full = full.fillna(0)
print(full)
full.to_csv(path_or_buf="./data/matrix.csv")
#the_set = romeo.union(moby, gatsby, hamlet)
file_romeo.close()
file_moby.close()
file_gatsby.close()
file_hamlet.close()
|
[
"pandas.read_csv",
"pandas.merge",
"pandas.set_option"
] |
[((382, 431), 'pandas.read_csv', 'pd.read_csv', (['"""./data/romeoandjuliet.csv"""'], {'sep': '""","""'}), "('./data/romeoandjuliet.csv', sep=',')\n", (393, 431), True, 'import pandas as pd\n'), ((460, 503), 'pandas.read_csv', 'pd.read_csv', (['"""./data/mobydick.csv"""'], {'sep': '""","""'}), "('./data/mobydick.csv', sep=',')\n", (471, 503), True, 'import pandas as pd\n'), ((532, 578), 'pandas.read_csv', 'pd.read_csv', (['"""./data/greatgatsby.csv"""'], {'sep': '""","""'}), "('./data/greatgatsby.csv', sep=',')\n", (543, 578), True, 'import pandas as pd\n'), ((608, 649), 'pandas.read_csv', 'pd.read_csv', (['"""./data/hamlet.csv"""'], {'sep': '""","""'}), "('./data/hamlet.csv', sep=',')\n", (619, 649), True, 'import pandas as pd\n'), ((681, 768), 'pandas.merge', 'pd.merge', (['df_romeo', 'df_moby'], {'how': '"""outer"""', 'on': "['Word']", 'suffixes': "('_Romeo', '_Moby')"}), "(df_romeo, df_moby, how='outer', on=['Word'], suffixes=('_Romeo',\n '_Moby'))\n", (689, 768), True, 'import pandas as pd\n'), ((801, 895), 'pandas.merge', 'pd.merge', (['df_gatsby', 'df_hamlet'], {'how': '"""outer"""', 'on': "['Word']", 'suffixes': "('_Gatsby', '_Hamlet')"}), "(df_gatsby, df_hamlet, how='outer', on=['Word'], suffixes=(\n '_Gatsby', '_Hamlet'))\n", (809, 895), True, 'import pandas as pd\n'), ((920, 981), 'pandas.merge', 'pd.merge', (['romeo_moby', 'gatsby_hamlet'], {'how': '"""outer"""', 'on': "['Word']"}), "(romeo_moby, gatsby_hamlet, how='outer', on=['Word'])\n", (928, 981), True, 'import pandas as pd\n'), ((996, 1064), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None', '"""display.max_columns"""', 'None'], {}), "('display.max_rows', None, 'display.max_columns', None)\n", (1009, 1064), True, 'import pandas as pd\n')]
|
import connexion
from openapi_server.annotator.phi_types import PhiType
from openapi_server.get_annotations import get_annotations
from openapi_server.models.error import Error # noqa: E501
from openapi_server.models.text_location_annotation_request import TextLocationAnnotationRequest # noqa: E501
from openapi_server.models.text_location_annotation_response import TextLocationAnnotationResponse # noqa: E501
def create_text_location_annotations(): # noqa: E501
"""Annotate locations in a clinical note
Return the location annotations found in a clinical note # noqa: E501
:param text_location_annotation_request:
:type text_location_annotation_request: dict | bytes
:rtype: TextLocationAnnotationResponse
"""
res = None
status = None
if connexion.request.is_json:
try:
annotation_request = TextLocationAnnotationRequest.from_dict(connexion.request.get_json()) # noqa: E501
note = annotation_request.note
annotations = get_annotations(
note, phi_type=PhiType.LOCATION)
res = TextLocationAnnotationResponse(annotations)
status = 200
except Exception as error:
status = 500
res = Error("Internal error", status, str(error))
return res, status
|
[
"connexion.request.get_json",
"openapi_server.models.text_location_annotation_response.TextLocationAnnotationResponse",
"openapi_server.get_annotations.get_annotations"
] |
[((1012, 1060), 'openapi_server.get_annotations.get_annotations', 'get_annotations', (['note'], {'phi_type': 'PhiType.LOCATION'}), '(note, phi_type=PhiType.LOCATION)\n', (1027, 1060), False, 'from openapi_server.get_annotations import get_annotations\n'), ((1097, 1140), 'openapi_server.models.text_location_annotation_response.TextLocationAnnotationResponse', 'TextLocationAnnotationResponse', (['annotations'], {}), '(annotations)\n', (1127, 1140), False, 'from openapi_server.models.text_location_annotation_response import TextLocationAnnotationResponse\n'), ((899, 927), 'connexion.request.get_json', 'connexion.request.get_json', ([], {}), '()\n', (925, 927), False, 'import connexion\n')]
|
#!/usr/bin/env python
"""
Southern California Earthquake Center Broadband Platform
Copyright 2010-2016 Southern California Earthquake Center
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
import shutil
import matplotlib as mpl
mpl.use('AGG', warn=False)
import pylab
import numpy as np
# Import Broadband modules
import bband_utils
import install_cfg
from station_list import StationList
# Import plot config file
import plot_config
def create_boore_asc2smc(control_file, input_file,
data_column, num_headers,
extension_string):
"""
This function creates the control file for the asc2smc converter tool
"""
ctl_file = open(control_file, 'w')
ctl_file.write("!Control file for ASC2SMC ! first line\n")
ctl_file.write("! Revision of program involving a change in the "
"control file on this date:\n")
ctl_file.write(" 02/02/12\n")
ctl_file.write("!Name of summary file:\n")
ctl_file.write(" asc2smc.sum\n")
ctl_file.write("!n2skip (-1=headers preceded by !; 0=no headers; "
"otherwise number of headers to skip)\n")
ctl_file.write(" %d\n" % (num_headers))
ctl_file.write("!write headers to smc file "
"(even if n2skip > 0)? (Y/N)\n")
ctl_file.write(" Y\n")
ctl_file.write("!sps (0.0 = obtain from input file)\n")
ctl_file.write(" 0\n")
ctl_file.write("!N columns to read, column number for "
"time and data columns \n")
ctl_file.write("! (for files made using blpadflt, period is in "
"column 1 and sd, pv, pa, rv, \n")
ctl_file.write("! aa are in columns 2, 3, 4, 5, 6, respectively)\n")
ctl_file.write("! Note: if sps .ne. 0.0, then column number for time "
"is ignored (but a placeholder is\n")
ctl_file.write("! still needed--e.g., 1 1 1 (read one column, which "
"contains the data; 1 20 1 would be the same)\n")
ctl_file.write("! But note: if the data are not in the first column, "
"but only the data column is to be read\n")
ctl_file.write("! (because sps will be used to establish "
"the time values),\n")
ctl_file.write("! then ncolumns must be the column corresponding to "
"the data. For example, assume that\n")
ctl_file.write("! the data are in column 3 and that columns 1 and 2 "
"contain time and some other variable, but\n")
ctl_file.write("! the time column is not to be used (perhaps because "
"accumulated error in creating the column\n")
ctl_file.write("! leads to a slight shift in the time values). "
"Then the input line should be:\n")
ctl_file.write("! 3 1 3\n")
ctl_file.write("!\n")
ctl_file.write("! This program assumes one data point per row; if "
"there are more points (as, for example,\n")
ctl_file.write("! in files with N points per line), "
"use the program wrapped2asc).\n")
ctl_file.write("!\n")
ctl_file.write(" 3 1 %d\n" % (data_column))
ctl_file.write("!Xfactr\n")
ctl_file.write(" 1.0\n")
ctl_file.write("!Read input format (used if the format is such that "
"the values are not separated by spaces,\n")
ctl_file.write("!in which case a free format cannot be "
"used for input)?\n")
ctl_file.write(" N\n")
ctl_file.write("!If yes, specify a format; if not, "
"still need a placeholder\n")
ctl_file.write(" (3e13.5)\n")
ctl_file.write("!For output, use old (standard) smc format or new\n")
ctl_file.write('!higher precision format. Specify "high" for\n')
ctl_file.write("!high precision; any other word defaults to standard\n")
ctl_file.write("!precision (but some word is needed as "
"a placeholder, even if\n")
ctl_file.write("!standard precision is desired).\n")
ctl_file.write(" high\n")
ctl_file.write("!String to append to input file name "
"for the output filename.\n")
ctl_file.write(" %s\n" % (extension_string))
ctl_file.write('!Input file name (time,data pairs; "stop" in any '
'column to quit):\n')
ctl_file.write("%s\n" % (input_file))
ctl_file.write("STOP\n")
ctl_file.close()
def create_boore_smc2fs2(control_file, input_file, name_string):
"""
This function creates the control file for the smc2fs2 FAS tool
"""
ctl_file = open(control_file, 'w')
ctl_file.write('!Control file for program SMC2FS2\n')
ctl_file.write('! Revision of program involving a change in the control '
'file on this date:\n')
ctl_file.write(' 03/10/10\n')
ctl_file.write('! As many comment lines as desired, each '
'starting with "!"\n')
ctl_file.write('! The string "pp:" indicates a new set '
'of processing parameters\n')
ctl_file.write('! to be applied to the following smc files. '
'The parameters are given on the\n')
ctl_file.write('! lines following "pp:", until the next "pp:" line '
'or until "stop" is \n')
ctl_file.write('! encountered.\n')
ctl_file.write('! NOTE: Use the tapers with caution, '
'choosing them so that important signal\n')
ctl_file.write('! is not reduced by the tapering. '
'This can be particularly a problem with \n')
ctl_file.write('! analog data from relatively small earthquakes '
'that triggered near the \n')
ctl_file.write('! S-wave arrival. \n')
ctl_file.write('!\n')
ctl_file.write('! -----------------------------------------'
'------------------------------------\n')
ctl_file.write('!\n')
ctl_file.write('! Meaning of smoothing input parameters\n')
ctl_file.write('!\n')
ctl_file.write('! NO SMOOTHING\n')
ctl_file.write('! itype = 0\n')
ctl_file.write('! SMOOTHING OVER EQUALLY SPACED FREQUENCIES\n')
ctl_file.write('! itype = 1: box weighting function\n')
ctl_file.write('! smooth_param = width of box weighting function (Hz)\n')
ctl_file.write('! itype = 2: triangular weighting function\n')
ctl_file.write('! smooth_param = width of triangular '
'weighting function (Hz)\n')
ctl_file.write('! SMOOTHING OVER LOGARITHMICALLY SPACED FREQUENCIES\n')
ctl_file.write('! itype = 3: box weighting function\n')
ctl_file.write('! smooth_param = xi, which is the fraction of '
'a decade for the\n')
ctl_file.write('! box weighting function \n')
ctl_file.write('! itype = 4: triangular weighting function\n')
ctl_file.write('! smooth_param = xi, which is the fraction of '
'a decade for the\n')
ctl_file.write('! triangular weighting function \n')
ctl_file.write('! itype = 5: Konno and Ohmachi weighting function '
'(see BSSA 88, 228-241)\n')
ctl_file.write('! smooth_param = xi, which is the fraction '
'of a decade for which\n')
ctl_file.write('! the Konno and Ohmachi weighting '
'function is greater\n')
ctl_file.write('! than 0.043.(it is related to\n')
ctl_file.write('! their smoothing parameter b '
'by the equation\n')
ctl_file.write('! b = 4.0/smooth_param, so we have '
'this correspondence between\n')
ctl_file.write('! b and smooth_param\n')
ctl_file.write('! b smooth_param \n')
ctl_file.write('! 10 0.40\n')
ctl_file.write('! 20 0.20\n')
ctl_file.write('! 40 0.10\n')
ctl_file.write('! \n')
ctl_file.write('! b = 40 seems to be commonly used, '
'but I do not think that it\n')
ctl_file.write('! gives enough smoothing; '
'I PREFER SMOOTH_PARAM = 0.2, \n')
ctl_file.write('! corresponding to b = 20. \n')
ctl_file.write('!\n')
ctl_file.write('! ipow = power of FAS to be smoothed '
'(2 = smoothing energy spectrum)\n')
ctl_file.write('!\n')
ctl_file.write('! df_smooth: Note: need df_smooth for '
'linearly-spaced smoothers, \n')
ctl_file.write('! and generally it should be the df from the fft. '
'For general x data, it is\n')
ctl_file.write('! the spacing between x values, assumed to be constant, '
'The reason for\n')
ctl_file.write('! including it as an input parameter is to "fool" the\n')
ctl_file.write('! program to do smoothing over a specified '
'number of points by\n')
ctl_file.write('! setting df_smooth = 1 and smooth_param = number '
'of points (including \n')
ctl_file.write('! points with zero weight at ends; e.g., '
'smooth_param = 5 will \n')
ctl_file.write('! give a smoother with weights 0, 1/4, 2/4, 1/4, 0; '
'smooth_param\n')
ctl_file.write('! should be odd).\n')
ctl_file.write('!\n')
ctl_file.write('! ------------------------------------'
'-----------------------------------------\n')
ctl_file.write('! Meaning of frequency specification parameters:\n')
ctl_file.write('!\n')
ctl_file.write('!SPECIFY_FREQUENCIES? (y/n):\n')
ctl_file.write('! <enter Y or N>\n')
ctl_file.write('!FREQUENCY SPECIFICATION: \n')
ctl_file.write('! If specify_frequencies = Y, then enter the \n')
ctl_file.write('! number of frequencies, freq(1), freq(2)..., '
'freq(nfreq)\n')
ctl_file.write('! If specify_frequencies = N, then enter \n')
ctl_file.write('! f_low, f_high, log-spaced (0=N, 1=Y), freq_param\n')
ctl_file.write('! if freq_param = 0.0, there is no interpolation, '
'and the FFT frequencies \n')
ctl_file.write('! are used between f_low and f_high '
'(log-spaced is ignored).\n')
ctl_file.write('! if freq_param /= 0.0 and log-spaced = 0, '
'then freq_param is the spacing of the\n')
ctl_file.write('! interpolated frequencies '
'between f_low and f_high\n')
ctl_file.write('! if freq_param /= 0.0 and log-spaced = 1, '
'then freq_param is the number of \n')
ctl_file.write('! interpolated frequencies between f_low and '
'f_high (NOTE: f_low must be > 0.0)\n')
ctl_file.write('! ---------------------------------------'
'--------------------------------------\n')
ctl_file.write('!\n')
ctl_file.write('!Name of summary file:\n')
ctl_file.write(' smc2fs2.sum\n')
ctl_file.write('PP: new set of parameters\n')
ctl_file.write('!tskip, tlength\n')
ctl_file.write(' 0.0 2000.0\n')
ctl_file.write('!dc_remove?\n')
ctl_file.write(' .true. \n')
ctl_file.write('!Length of taper at beginning and end of time series, '
'before adding zeros\n')
ctl_file.write('! to make the number of points in '
'the record a power of two.\n')
ctl_file.write(' 0.0 0.0\n')
ctl_file.write('!signnpw2(<0, backup for npw2, no zpad):\n')
ctl_file.write(' +1.0\n')
ctl_file.write('!smoothing: itype, ipow, df_smooth '
'(0 = FFT df), smooth_param\n')
ctl_file.write('! (see above for the meaning of these input parameters):\n')
ctl_file.write(' 0 1 0.0 0.20\n')
ctl_file.write('!SPECIFY_FREQUENCIES? (y/n):\n')
ctl_file.write(' N\n')
ctl_file.write('!FREQUENCY SPECIFICATION\n')
ctl_file.write(' 0.01 100.0 0 0.0 \n')
ctl_file.write('!character string to append to filename:\n')
ctl_file.write(' %s\n' % (name_string))
ctl_file.write('!Output in smc format (Y,N)?\n')
ctl_file.write('! ***IMPORTANT NOTE: Output cannot be in smc '
'format if use log-spaced \n')
ctl_file.write('! frequencies because programs such as smc2asc '
'have not been modified\n')
ctl_file.write('! to deal with log-spaced frequency.\n')
ctl_file.write(' n\n')
ctl_file.write('!Files to process:\n')
ctl_file.write('%s\n' % (input_file))
ctl_file.write('stop\n')
ctl_file.close()
def read_fas_file(fas_file):
"""
Reads FAS file and returns freq and fas arrays
"""
freqs = []
fas = []
# Read input file
input_file = open(fas_file, 'r')
# Skip headers
for line in input_file:
line = line.strip()
# skip blank lines
if not line:
continue
if line.startswith("freq"):
break
for line in input_file:
line = line.strip()
# skip blank lines
if not line:
continue
pieces = line.split()
pieces = [float(piece) for piece in pieces]
freqs.append(pieces[0])
fas.append(pieces[1])
# All done!
input_file.close()
return freqs, fas
def plot_fas(freqs, ns_data, ew_data, eas_smoothed_data, fas_plot, station):
"""
Create a plot of both FAS components
"""
# Generate plot
# Set plot dims
pylab.gcf().set_size_inches(11, 8.5)
pylab.gcf().clf()
# Adjust title y-position
t = pylab.title("Station: %s" % (station), size=12)
pylab.plot(freqs, ns_data, 'b', lw=0.75, label="NS")
pylab.plot(freqs, ew_data, 'r', lw=0.75, label="EW")
pylab.plot(freqs, eas_smoothed_data, 'k', lw=1.25, label="Smoothed EAS")
pylab.legend(loc='upper right')
pylab.xscale('log')
pylab.yscale('log')
pylab.ylabel('Fourier Amplitude (cm/s)')
pylab.xlabel('Frequency (Hz)')
pylab.axis([0.01, 100, 0.001, 1000])
pylab.grid(True)
pylab.grid(b=True, which='major', linestyle='-', color='lightgray')
pylab.grid(b=True, which='minor', linewidth=0.5, color='gray')
# Save plot
pylab.savefig(fas_plot, format="png",
transparent=False, dpi=plot_config.dpi)
pylab.close()
def ko98_smoothing(freqs, data, delta_freq, bexp):
"""
# ** smoothing of a function y (equally-spaced, dx) with the "Konno-Ohmachi"
# ** function sin (alog10(f/fc)^exp) / alog10(f/fc)^exp) ^^4
# ** where fc is the frequency around which the smoothing is performed
# ** exp determines the exponent 10^(1/exp) is the half-width of the peak
# ** cf Konno & Ohmachi, 1998, BSSA 88-1, pp. 228-241
"""
nx = len(freqs)
data_smooth = np.zeros(nx)
fratio = np.power(10., (2.5 / bexp))
data_smooth[0] = data[0]
for index in range(1, nx):
freq = freqs[index]
# Added check to avoid division by zero later and NaNs in the output file
if freq == 0.0:
data_smooth[index] = data[index]
continue
fc1 = freq / fratio
fc2 = freq * fratio
index1 = int(fc1 / delta_freq)
index2 = int((fc2 / delta_freq) + 1)
if index1 <= 1:
index1 = 0
if index2 >= nx:
index2 = nx
a1 = 0.0
a2 = 0.0
for j in range(index1, index2):
if j != index:
# Extra check to avoid NaNs in output file
if freqs[j] == 0.0:
data_smooth[index] = data[index]
break
c1 = bexp * np.log10(freqs[j] / freq)
c1 = np.power(np.sin(c1) / c1, 4.0)
a2 = a2 + c1
a1 = a1 + c1 * data[j]
else:
a2 = a2 + 1.0
a1 = a1 + data[index]
data_smooth[index] = a1 / a2
return data_smooth
def calculate_smoothed_eas(ns_file, ew_file, output_file=None):
"""
Calculates the smoothed EAS at the same frequencies as specified in
the input files
"""
b_param = 188.5 # cm/s
# Read data
freqs, ns_data = read_fas_file(ns_file)
_, ew_data = read_fas_file(ew_file)
eas_data = []
# Calculate EAS
for ns_comp, ew_comp in zip(ns_data, ew_data):
eas_data.append(np.sqrt(0.5*(pow(ns_comp, 2) + pow(ew_comp, 2))))
# Calculate Smoothed EAS
smoothed_eas = ko98_smoothing(freqs, eas_data,
freqs[1]-freqs[0],
b_param)
# Write data file if output_file is provided
if output_file is not None:
out_file = open(output_file, 'w')
out_file.write("# Freq(Hz)\t FAS H1 (cm/s)\t FAS H2 (cm/s)\t "
"EAS (cm/s)\t Smoothed EAS, b=%f (cm/s)\n" %
(b_param))
for freq, fas_h1, fas_h2, eas, s_eas in zip(freqs, ns_data,
ew_data, eas_data,
smoothed_eas):
out_file.write("%2.7E\t%2.7E\t%2.7E\t%2.7E\t%2.7E\n" %
(freq, fas_h1, fas_h2, eas, s_eas))
out_file.close()
# All done!
return freqs, ns_data, ew_data, eas_data, smoothed_eas
class FAS(object):
"""
Implement FAS analisys for the Broadband Platform
"""
def __init__(self, i_r_stations, sim_id=0):
"""
Initializes class variables
"""
self.sim_id = sim_id
self.r_stations = i_r_stations
def run(self):
"""
Run FAS analysis codes
"""
print("FAS Calculation".center(80, '-'))
install = install_cfg.InstallCfg.getInstance()
sim_id = self.sim_id
sta_base = os.path.basename(os.path.splitext(self.r_stations)[0])
self.log = os.path.join(install.A_OUT_LOG_DIR, str(sim_id),
"%d.fas_%s.log" % (sim_id, sta_base))
a_statfile = os.path.join(install.A_IN_DATA_DIR,
str(sim_id),
self.r_stations)
a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id))
a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id))
a_outdir_fas = os.path.join(a_outdir, "FAS")
#
# Make sure the tmp and out directories exist
#
bband_utils.mkdirs([a_tmpdir, a_outdir, a_outdir_fas], print_cmd=False)
slo = StationList(a_statfile)
site_list = slo.getStationList()
# Save current directory
old_cwd = os.getcwd()
os.chdir(a_tmpdir)
for site in site_list:
print("==> Processing station: %s" % (site.scode))
# Copy acc file to tmpdata
acc_file = "%d.%s.acc.bbp" % (sim_id, site.scode)
shutil.copy2(os.path.join(a_outdir, acc_file),
os.path.join(a_tmpdir, acc_file))
asc2smc_control_file = "asc2smc.ctl"
smc2fs2_control_file = "smc2fs2.ctl"
header_lines = bband_utils.count_header_lines(os.path.join(a_tmpdir,
acc_file))
# Work on both NS and EW components
for comp, data_column in zip(["NS", "EW"], [2, 3]):
# First we convert from BBP to SMC format
create_boore_asc2smc(os.path.join(a_tmpdir,
asc2smc_control_file),
acc_file, data_column, header_lines,
".smc8.%s" % (comp))
cmd = ("%s << END >> %s 2>&1\n" %
(os.path.join(install.A_USGS_BIN_DIR, "asc2smc"),
self.log) +
"%s\n" % (asc2smc_control_file) +
"END\n")
bband_utils.runprog(cmd, False, abort_on_error=True)
# Then, we run the smc2fs2 FAS tool
smc_file = "%s.smc8.%s" % (acc_file, comp)
create_boore_smc2fs2(os.path.join(a_tmpdir,
smc2fs2_control_file),
smc_file, ".no_smooth.fs.col")
cmd = ("%s >> %s 2>&1\n" %
(os.path.join(install.A_USGS_BIN_DIR, "smc2fs2"),
self.log))
bband_utils.runprog(cmd, False, abort_on_error=True)
# Calculate EAS and smoothed EAS
ns_file = os.path.join(a_tmpdir,
"%s.smc8.NS.no_smooth.fs.col" % (acc_file))
ew_file = os.path.join(a_tmpdir,
"%s.smc8.EW.no_smooth.fs.col" % (acc_file))
output_file = os.path.join(a_outdir_fas,
"%s.smc8.smooth.fs.col" % (acc_file))
(freqs, ns_fas,
ew_fas, eas, smoothed_eas) = calculate_smoothed_eas(ns_file,
ew_file,
output_file)
# Create plot
fas_plot = os.path.join(a_outdir_fas,
"%d.%s.fas.png" % (sim_id, site.scode))
plot_fas(freqs, ns_fas, ew_fas, smoothed_eas, fas_plot, site.scode)
# All done, restore working directory
os.chdir(old_cwd)
print("FAS Calculation Completed".center(80, '-'))
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Usage: %s station_list sim_id" % (os.path.basename(sys.argv[0])))
sys.exit(1)
print("Testing Module: %s" % (os.path.basename(sys.argv[0])))
ME = FAS(sys.argv[1], sim_id=int(sys.argv[2]))
ME.run()
sys.exit(0)
|
[
"pylab.close",
"numpy.sin",
"pylab.gcf",
"os.path.join",
"os.chdir",
"pylab.title",
"numpy.power",
"pylab.ylabel",
"pylab.xlabel",
"bband_utils.mkdirs",
"numpy.log10",
"pylab.legend",
"os.path.basename",
"pylab.grid",
"pylab.xscale",
"pylab.savefig",
"matplotlib.use",
"install_cfg.InstallCfg.getInstance",
"sys.exit",
"pylab.axis",
"os.getcwd",
"bband_utils.runprog",
"numpy.zeros",
"os.path.splitext",
"pylab.yscale",
"pylab.plot",
"station_list.StationList"
] |
[((278, 304), 'matplotlib.use', 'mpl.use', (['"""AGG"""'], {'warn': '(False)'}), "('AGG', warn=False)\n", (285, 304), True, 'import matplotlib as mpl\n'), ((13765, 13810), 'pylab.title', 'pylab.title', (["('Station: %s' % station)"], {'size': '(12)'}), "('Station: %s' % station, size=12)\n", (13776, 13810), False, 'import pylab\n'), ((13818, 13870), 'pylab.plot', 'pylab.plot', (['freqs', 'ns_data', '"""b"""'], {'lw': '(0.75)', 'label': '"""NS"""'}), "(freqs, ns_data, 'b', lw=0.75, label='NS')\n", (13828, 13870), False, 'import pylab\n'), ((13875, 13927), 'pylab.plot', 'pylab.plot', (['freqs', 'ew_data', '"""r"""'], {'lw': '(0.75)', 'label': '"""EW"""'}), "(freqs, ew_data, 'r', lw=0.75, label='EW')\n", (13885, 13927), False, 'import pylab\n'), ((13932, 14004), 'pylab.plot', 'pylab.plot', (['freqs', 'eas_smoothed_data', '"""k"""'], {'lw': '(1.25)', 'label': '"""Smoothed EAS"""'}), "(freqs, eas_smoothed_data, 'k', lw=1.25, label='Smoothed EAS')\n", (13942, 14004), False, 'import pylab\n'), ((14009, 14040), 'pylab.legend', 'pylab.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (14021, 14040), False, 'import pylab\n'), ((14045, 14064), 'pylab.xscale', 'pylab.xscale', (['"""log"""'], {}), "('log')\n", (14057, 14064), False, 'import pylab\n'), ((14069, 14088), 'pylab.yscale', 'pylab.yscale', (['"""log"""'], {}), "('log')\n", (14081, 14088), False, 'import pylab\n'), ((14093, 14133), 'pylab.ylabel', 'pylab.ylabel', (['"""Fourier Amplitude (cm/s)"""'], {}), "('Fourier Amplitude (cm/s)')\n", (14105, 14133), False, 'import pylab\n'), ((14138, 14168), 'pylab.xlabel', 'pylab.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (14150, 14168), False, 'import pylab\n'), ((14173, 14209), 'pylab.axis', 'pylab.axis', (['[0.01, 100, 0.001, 1000]'], {}), '([0.01, 100, 0.001, 1000])\n', (14183, 14209), False, 'import pylab\n'), ((14214, 14230), 'pylab.grid', 'pylab.grid', (['(True)'], {}), '(True)\n', (14224, 14230), False, 'import pylab\n'), ((14235, 14302), 'pylab.grid', 'pylab.grid', ([], {'b': '(True)', 'which': '"""major"""', 'linestyle': '"""-"""', 'color': '"""lightgray"""'}), "(b=True, which='major', linestyle='-', color='lightgray')\n", (14245, 14302), False, 'import pylab\n'), ((14307, 14369), 'pylab.grid', 'pylab.grid', ([], {'b': '(True)', 'which': '"""minor"""', 'linewidth': '(0.5)', 'color': '"""gray"""'}), "(b=True, which='minor', linewidth=0.5, color='gray')\n", (14317, 14369), False, 'import pylab\n'), ((14391, 14468), 'pylab.savefig', 'pylab.savefig', (['fas_plot'], {'format': '"""png"""', 'transparent': '(False)', 'dpi': 'plot_config.dpi'}), "(fas_plot, format='png', transparent=False, dpi=plot_config.dpi)\n", (14404, 14468), False, 'import pylab\n'), ((14491, 14504), 'pylab.close', 'pylab.close', ([], {}), '()\n', (14502, 14504), False, 'import pylab\n'), ((14969, 14981), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (14977, 14981), True, 'import numpy as np\n'), ((14995, 15021), 'numpy.power', 'np.power', (['(10.0)', '(2.5 / bexp)'], {}), '(10.0, 2.5 / bexp)\n', (15003, 15021), True, 'import numpy as np\n'), ((22046, 22057), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (22054, 22057), False, 'import sys\n'), ((17905, 17941), 'install_cfg.InstallCfg.getInstance', 'install_cfg.InstallCfg.getInstance', ([], {}), '()\n', (17939, 17941), False, 'import install_cfg\n'), ((18499, 18528), 'os.path.join', 'os.path.join', (['a_outdir', '"""FAS"""'], {}), "(a_outdir, 'FAS')\n", (18511, 18528), False, 'import os\n'), ((18612, 18683), 'bband_utils.mkdirs', 'bband_utils.mkdirs', (['[a_tmpdir, a_outdir, a_outdir_fas]'], {'print_cmd': '(False)'}), '([a_tmpdir, a_outdir, a_outdir_fas], print_cmd=False)\n', (18630, 18683), False, 'import bband_utils\n'), ((18699, 18722), 'station_list.StationList', 'StationList', (['a_statfile'], {}), '(a_statfile)\n', (18710, 18722), False, 'from station_list import StationList\n'), ((18816, 18827), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (18825, 18827), False, 'import os\n'), ((18836, 18854), 'os.chdir', 'os.chdir', (['a_tmpdir'], {}), '(a_tmpdir)\n', (18844, 18854), False, 'import os\n'), ((21679, 21696), 'os.chdir', 'os.chdir', (['old_cwd'], {}), '(old_cwd)\n', (21687, 21696), False, 'import os\n'), ((21900, 21911), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (21908, 21911), False, 'import sys\n'), ((13667, 13678), 'pylab.gcf', 'pylab.gcf', ([], {}), '()\n', (13676, 13678), False, 'import pylab\n'), ((13708, 13719), 'pylab.gcf', 'pylab.gcf', ([], {}), '()\n', (13717, 13719), False, 'import pylab\n'), ((20782, 20846), 'os.path.join', 'os.path.join', (['a_tmpdir', "('%s.smc8.NS.no_smooth.fs.col' % acc_file)"], {}), "(a_tmpdir, '%s.smc8.NS.no_smooth.fs.col' % acc_file)\n", (20794, 20846), False, 'import os\n'), ((20906, 20970), 'os.path.join', 'os.path.join', (['a_tmpdir', "('%s.smc8.EW.no_smooth.fs.col' % acc_file)"], {}), "(a_tmpdir, '%s.smc8.EW.no_smooth.fs.col' % acc_file)\n", (20918, 20970), False, 'import os\n'), ((21034, 21096), 'os.path.join', 'os.path.join', (['a_outdir_fas', "('%s.smc8.smooth.fs.col' % acc_file)"], {}), "(a_outdir_fas, '%s.smc8.smooth.fs.col' % acc_file)\n", (21046, 21096), False, 'import os\n'), ((21441, 21507), 'os.path.join', 'os.path.join', (['a_outdir_fas', "('%d.%s.fas.png' % (sim_id, site.scode))"], {}), "(a_outdir_fas, '%d.%s.fas.png' % (sim_id, site.scode))\n", (21453, 21507), False, 'import os\n'), ((21946, 21975), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (21962, 21975), False, 'import os\n'), ((18007, 18040), 'os.path.splitext', 'os.path.splitext', (['self.r_stations'], {}), '(self.r_stations)\n', (18023, 18040), False, 'import os\n'), ((19076, 19108), 'os.path.join', 'os.path.join', (['a_outdir', 'acc_file'], {}), '(a_outdir, acc_file)\n', (19088, 19108), False, 'import os\n'), ((19135, 19167), 'os.path.join', 'os.path.join', (['a_tmpdir', 'acc_file'], {}), '(a_tmpdir, acc_file)\n', (19147, 19167), False, 'import os\n'), ((19325, 19357), 'os.path.join', 'os.path.join', (['a_tmpdir', 'acc_file'], {}), '(a_tmpdir, acc_file)\n', (19337, 19357), False, 'import os\n'), ((20129, 20181), 'bband_utils.runprog', 'bband_utils.runprog', (['cmd', '(False)'], {'abort_on_error': '(True)'}), '(cmd, False, abort_on_error=True)\n', (20148, 20181), False, 'import bband_utils\n'), ((20661, 20713), 'bband_utils.runprog', 'bband_utils.runprog', (['cmd', '(False)'], {'abort_on_error': '(True)'}), '(cmd, False, abort_on_error=True)\n', (20680, 20713), False, 'import bband_utils\n'), ((21860, 21889), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (21876, 21889), False, 'import os\n'), ((15823, 15848), 'numpy.log10', 'np.log10', (['(freqs[j] / freq)'], {}), '(freqs[j] / freq)\n', (15831, 15848), True, 'import numpy as np\n'), ((19637, 19681), 'os.path.join', 'os.path.join', (['a_tmpdir', 'asc2smc_control_file'], {}), '(a_tmpdir, asc2smc_control_file)\n', (19649, 19681), False, 'import os\n'), ((20330, 20374), 'os.path.join', 'os.path.join', (['a_tmpdir', 'smc2fs2_control_file'], {}), '(a_tmpdir, smc2fs2_control_file)\n', (20342, 20374), False, 'import os\n'), ((15879, 15889), 'numpy.sin', 'np.sin', (['c1'], {}), '(c1)\n', (15885, 15889), True, 'import numpy as np\n'), ((20561, 20608), 'os.path.join', 'os.path.join', (['install.A_USGS_BIN_DIR', '"""smc2fs2"""'], {}), "(install.A_USGS_BIN_DIR, 'smc2fs2')\n", (20573, 20608), False, 'import os\n'), ((19939, 19986), 'os.path.join', 'os.path.join', (['install.A_USGS_BIN_DIR', '"""asc2smc"""'], {}), "(install.A_USGS_BIN_DIR, 'asc2smc')\n", (19951, 19986), False, 'import os\n')]
|
""" module utils to method to files """
import logging
import hashlib
logger = logging.getLogger(__name__)
def write_file(path: str, source: str, mode="w") -> None:
""" write file in file system in unicode """
logger.debug("Gravando arquivo: %s", path)
with open(path, mode, encoding="utf-8") as f:
f.write(source)
def write_file_binary(path: str, source: bytes) -> None:
""" write file in file system in bytes """
logger.debug("Gravando arquivo binario: %s", path)
with open(path, "wb") as f:
f.write(source)
def sha1(mensagem: str) -> str:
""" generate sha1 hash """
_sum = hashlib.sha1()
_sum.update(mensagem.encode("utf-8"))
return _sum.hexdigest()
|
[
"hashlib.sha1",
"logging.getLogger"
] |
[((81, 108), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (98, 108), False, 'import logging\n'), ((632, 646), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (644, 646), False, 'import hashlib\n')]
|
import itertools as it
TEST1 = """
20
15
10
5
5
"""
INPUT = open('input17.txt').read()
# def count_ways(containers, total=150):
# ways = 0
# containers = sorted(containers, reverse=True)
# def count(containers, used, stack=0):
# print(containers, used, stack)
# for i in range(len(containers)):
# c = containers.pop(0)
# used.append(c)
# if sum(used) == total:
# ways += 1
# used.pop()
# print(containers, used, stack)
# return
# elif sum(used) < total:
# count(containers, used, stack=stack+1)
# elif sum(used) > total:
# containers.append(used.pop())
# print(containers, used, stack)
# return
# count(containers, [])
# return ways
def count_ways(containers, volume):
return sum((1 for c in range(2, len(containers)) for i in it.combinations(containers, c) if sum(i) == volume))
def find_min_ways(containers, volume):
for c in range(2, len(containers)):
ways = 0
for i in it.combinations(containers, c):
if sum(i) == volume:
ways += 1
if ways > 0:
return c, ways
print(count_ways([int(l) for l in TEST1.splitlines() if l], 25), 25)
print(count_ways([int(l) for l in INPUT.splitlines() if l], 150), 150)
print(find_min_ways([int(l) for l in INPUT.splitlines() if l], 150), 150)
|
[
"itertools.combinations"
] |
[((1133, 1163), 'itertools.combinations', 'it.combinations', (['containers', 'c'], {}), '(containers, c)\n', (1148, 1163), True, 'import itertools as it\n'), ((965, 995), 'itertools.combinations', 'it.combinations', (['containers', 'c'], {}), '(containers, c)\n', (980, 995), True, 'import itertools as it\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 23 21:14:48 2018
@author: ahmed
"""
# IMPORTATION
from pylab import *
#plt.style.use('dark_background')
#plt.style.use('ggplot')
import ephem as ep
# deux fonctions supplémentaires du module datetime sont nécessaires
from datetime import datetime , timedelta
# OBSERVATEUR
obs = ep.Observer()
# COORDONNÉES DE TUNIS
obs.lon, obs.lat, obs.elev = '10.08', '36.4', 100.0
obs.name = "SAT-TUNIS"
# MARS
mr = ep.Mars()
plt.figure(figsize=(10, 5))
for i in range (0 , 181):
# nous changeons la date d'un jour pendant six mois
dt = datetime (2018, 5, 1) + timedelta(i)
ds = "%d/%02d/%02d"%(dt.year, dt.month, dt.day)
print(" jour de l'année: ", i +1 , ds)
# fixer la date de l'observateur et calculer les coordonnées
obs.date = ds
mr.compute(obs)
ra = degrees(float(repr(mr.ra)))
de = degrees(float(repr(mr.dec)))
# on dessine des objets
plot([ra], [de], c = "red", marker = "o", alpha =.5)
# nous ajoutons une description de la date en moyenne tous les 10 jours
if (dt.day % 10) == 0: text(ra, de, ds, fontsize =8)
# conversion RA donné en degrés
# sur les formats heure, minute et seconde
def RAd2hms (x, loc):
h = x//15
m = int(((x - h * 15.0) / 15.0) * 60.0)
s = ((x - h *15 - m / 4.0) / 15.0) * 3600.0
return "%02dh%02dm%02ds"%(h, m, s)
# changement de déclinaison donné en degrés
# le format du degré, minute, second arc
def DEd2dms (x , loc ):
d = int(fabs(x))
m = int((fabs(x) - d)*60)
s = (fabs(x) - d - m /60.0)*3600.0
if x <0: d = -1 * d
return " %02dd%02dm%02ds"%(d, m, s)
# description du graphique
xlabel("ascension droite " + r"$\alpha$")
gca().xaxis.set_major_formatter(FuncFormatter(RAd2hms))
ylabel(" déclinaison " + r"$\delta$")
gca().yaxis.set_major_formatter(FuncFormatter(DEd2dms))
title("Mouvement retrograde de Mars - 6 mois en 2018 \n"+obs.name, fontweight='bold')
savefig("../figs/retrogradeMars.pdf"); savefig("../figs/retrogradeMars.png")
show()
|
[
"ephem.Observer",
"ephem.Mars",
"datetime.timedelta",
"datetime.datetime"
] |
[((352, 365), 'ephem.Observer', 'ep.Observer', ([], {}), '()\n', (363, 365), True, 'import ephem as ep\n'), ((477, 486), 'ephem.Mars', 'ep.Mars', ([], {}), '()\n', (484, 486), True, 'import ephem as ep\n'), ((606, 626), 'datetime.datetime', 'datetime', (['(2018)', '(5)', '(1)'], {}), '(2018, 5, 1)\n', (614, 626), False, 'from datetime import datetime, timedelta\n'), ((630, 642), 'datetime.timedelta', 'timedelta', (['i'], {}), '(i)\n', (639, 642), False, 'from datetime import datetime, timedelta\n')]
|
import os
import tempfile
import pytest
import subprocess
TEST_DIRECTORY = os.path.abspath(__file__+"/../")
DATA_DIRECTORY = os.path.join(TEST_DIRECTORY,"data")
GIT_TEST_REPOSITORY = DATA_DIRECTORY + "/test_repository/d3py.tar.gz"
|
[
"os.path.abspath",
"os.path.join"
] |
[((76, 110), 'os.path.abspath', 'os.path.abspath', (["(__file__ + '/../')"], {}), "(__file__ + '/../')\n", (91, 110), False, 'import os\n'), ((126, 162), 'os.path.join', 'os.path.join', (['TEST_DIRECTORY', '"""data"""'], {}), "(TEST_DIRECTORY, 'data')\n", (138, 162), False, 'import os\n')]
|
import pygame
from pygame.sprite import Sprite
class BoyLife(Sprite):
def __init__(self):
"""Инициализирует графическое отображение жизней."""
super().__init__()
self.image = pygame.image.load('img/heart.png')
self.width = self.image.get_width()
self.height = self.image.get_height()
self.image = pygame.transform.scale(self.image, (self.width // 30, self.height // 30))
self.rect = self.image.get_rect()
|
[
"pygame.image.load",
"pygame.transform.scale"
] |
[((205, 239), 'pygame.image.load', 'pygame.image.load', (['"""img/heart.png"""'], {}), "('img/heart.png')\n", (222, 239), False, 'import pygame\n'), ((351, 424), 'pygame.transform.scale', 'pygame.transform.scale', (['self.image', '(self.width // 30, self.height // 30)'], {}), '(self.image, (self.width // 30, self.height // 30))\n', (373, 424), False, 'import pygame\n')]
|
import pprint
import random
chessBoard = [[0 for j in range(8)] for i in range(8)]
chessBoard[0][0] = "R"
pprint.pprint(chessBoard)
#rook
def move():
x = 0
y = 0
getPosition = [0,0]
chessBoard[0][0] = 0
if random.uniform(0, 2) < 1:
x = int(random.uniform(0, 7))
else:
y = int(random.uniform(0, 7))
newPosition = (x,y)
chessBoard[x][y] = "R"
pprint.pprint(chessBoard)
move()
|
[
"pprint.pprint",
"random.uniform"
] |
[((106, 131), 'pprint.pprint', 'pprint.pprint', (['chessBoard'], {}), '(chessBoard)\n', (119, 131), False, 'import pprint\n'), ((394, 419), 'pprint.pprint', 'pprint.pprint', (['chessBoard'], {}), '(chessBoard)\n', (407, 419), False, 'import pprint\n'), ((227, 247), 'random.uniform', 'random.uniform', (['(0)', '(2)'], {}), '(0, 2)\n', (241, 247), False, 'import random\n'), ((269, 289), 'random.uniform', 'random.uniform', (['(0)', '(7)'], {}), '(0, 7)\n', (283, 289), False, 'import random\n'), ((317, 337), 'random.uniform', 'random.uniform', (['(0)', '(7)'], {}), '(0, 7)\n', (331, 337), False, 'import random\n')]
|
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InlineResponse2001Exports(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'export_id': 'int',
'started': 'datetime',
'finished': 'datetime',
'size_in_bytes': 'int',
'download_url': 'str',
'links': 'list[ResourceLink]'
}
attribute_map = {
'export_id': 'export_id',
'started': 'started',
'finished': 'finished',
'size_in_bytes': 'size_in_bytes',
'download_url': 'download_url',
'links': '_links'
}
def __init__(self, export_id=None, started=None, finished=None, size_in_bytes=None, download_url=None, links=None): # noqa: E501
"""InlineResponse2001Exports - a model defined in Swagger""" # noqa: E501
self._export_id = None
self._started = None
self._finished = None
self._size_in_bytes = None
self._download_url = None
self._links = None
self.discriminator = None
if export_id is not None:
self.export_id = export_id
if started is not None:
self.started = started
if finished is not None:
self.finished = finished
if size_in_bytes is not None:
self.size_in_bytes = size_in_bytes
if download_url is not None:
self.download_url = download_url
if links is not None:
self.links = links
@property
def export_id(self):
"""Gets the export_id of this InlineResponse2001Exports. # noqa: E501
The ID for the export. # noqa: E501
:return: The export_id of this InlineResponse2001Exports. # noqa: E501
:rtype: int
"""
return self._export_id
@export_id.setter
def export_id(self, export_id):
"""Sets the export_id of this InlineResponse2001Exports.
The ID for the export. # noqa: E501
:param export_id: The export_id of this InlineResponse2001Exports. # noqa: E501
:type: int
"""
self._export_id = export_id
@property
def started(self):
"""Gets the started of this InlineResponse2001Exports. # noqa: E501
Start time for the export. # noqa: E501
:return: The started of this InlineResponse2001Exports. # noqa: E501
:rtype: datetime
"""
return self._started
@started.setter
def started(self, started):
"""Sets the started of this InlineResponse2001Exports.
Start time for the export. # noqa: E501
:param started: The started of this InlineResponse2001Exports. # noqa: E501
:type: datetime
"""
self._started = started
@property
def finished(self):
"""Gets the finished of this InlineResponse2001Exports. # noqa: E501
If finished, the finish time for the export. # noqa: E501
:return: The finished of this InlineResponse2001Exports. # noqa: E501
:rtype: datetime
"""
return self._finished
@finished.setter
def finished(self, finished):
"""Sets the finished of this InlineResponse2001Exports.
If finished, the finish time for the export. # noqa: E501
:param finished: The finished of this InlineResponse2001Exports. # noqa: E501
:type: datetime
"""
self._finished = finished
@property
def size_in_bytes(self):
"""Gets the size_in_bytes of this InlineResponse2001Exports. # noqa: E501
The size of the uncompressed export in bytes. # noqa: E501
:return: The size_in_bytes of this InlineResponse2001Exports. # noqa: E501
:rtype: int
"""
return self._size_in_bytes
@size_in_bytes.setter
def size_in_bytes(self, size_in_bytes):
"""Sets the size_in_bytes of this InlineResponse2001Exports.
The size of the uncompressed export in bytes. # noqa: E501
:param size_in_bytes: The size_in_bytes of this InlineResponse2001Exports. # noqa: E501
:type: int
"""
self._size_in_bytes = size_in_bytes
@property
def download_url(self):
"""Gets the download_url of this InlineResponse2001Exports. # noqa: E501
If the export is finished, the download URL for an export. URLs are only valid for 90 days after the export completes. # noqa: E501
:return: The download_url of this InlineResponse2001Exports. # noqa: E501
:rtype: str
"""
return self._download_url
@download_url.setter
def download_url(self, download_url):
"""Sets the download_url of this InlineResponse2001Exports.
If the export is finished, the download URL for an export. URLs are only valid for 90 days after the export completes. # noqa: E501
:param download_url: The download_url of this InlineResponse2001Exports. # noqa: E501
:type: str
"""
self._download_url = download_url
@property
def links(self):
"""Gets the links of this InlineResponse2001Exports. # noqa: E501
A list of link types and descriptions for the API schema documents. # noqa: E501
:return: The links of this InlineResponse2001Exports. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this InlineResponse2001Exports.
A list of link types and descriptions for the API schema documents. # noqa: E501
:param links: The links of this InlineResponse2001Exports. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse2001Exports, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse2001Exports):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"six.iteritems"
] |
[((6494, 6527), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (6507, 6527), False, 'import six\n')]
|
import pytest
import numpy as np
import zmq
import h5py
import struct
import itertools
from .. import Writer
from .. import chunk_api
from ...messages import array as array_api
from .conftest import assert_chunk_allclose, assert_h5py_allclose
from zeeko.conftest import assert_canrecv
from ...tests.test_helpers import ZeekoTestBase, ZeekoMappingTests, OrderedDict
from ...messages.tests.test_receiver import ReceiverTests, ReceiverTestBase
@pytest.fixture
def notify(address2, context):
"""Notification socket"""
s = context.socket(zmq.PUSH)
s.bind(address2)
with s:
yield s
@pytest.fixture
def rnotify(address2, context, notify):
"""Recieve notifications."""
s = context.socket(zmq.PULL)
s.connect(address2)
with s:
yield s
@pytest.fixture
def n():
"""Number of arrays to publish."""
return 3
@pytest.fixture
def metadata_callback():
"""Return a metadata callback."""
def callback():
return {'meta':'data', 'n':5}
return callback
def test_writer_construction(filename):
"""Test construction"""
w = Writer(filename)
class WriterTestsBase(ReceiverTestBase):
"""Base class items for Writers."""
pytestmark = pytest.mark.usefixtures("rnotify")
cls = Writer
@pytest.fixture
def arrays(self, n, name, chunk_array, chunk_mask):
"""Return a list of chunks"""
cs = OrderedDict()
for i in range(n):
c = chunk_api.PyChunk("{0:s}{1:d}".format(name, i), np.random.randn(*chunk_array.shape), chunk_mask)
cs[c.name] = c
return cs
@pytest.fixture
def receiver(self, filename, metadata_callback):
"""The receiver object"""
obj = self.cls()
obj.metadata_callback = metadata_callback
with h5py.File(filename) as obj.file:
yield obj
@pytest.fixture
def writer(self, receiver):
"""Return a receiver"""
return receiver
def send_arrays(self, socket, arrays, framecount):
"""Send arrays."""
assert socket.poll(timeout=100, flags=zmq.POLLOUT)
array_api.send_array_packet_header(socket, "arrays", len(arrays), framecount, flags=zmq.SNDMORE)
chunks = list(arrays.values())
for chunk in chunks[:-1]:
chunk.send(socket, flags=zmq.SNDMORE)
chunks[-1].send(socket)
def recv_arrays(self, receiver, socket, arrays, flags=zmq.NOBLOCK):
"""Wrapper around receiving arrays."""
assert_canrecv(socket)
receiver.receive(socket, flags=flags)
for key in arrays:
assert receiver.event(key).is_set()
assert len(receiver) == len(arrays)
def send_unbundled_arrays(self, socket, arrays):
"""Send arrays as individual messages."""
array_api.send_array_packet_header(socket, "arrays", len(arrays), flags=zmq.SNDMORE)
chunks = list(arrays.values())
for chunk in chunks[:-1]:
chunk.send(socket, flags=zmq.SNDMORE)
chunks[-1].send(socket)
def recv_unbundled_arrays(self, receiver, socket, arrays, flags=zmq.NOBLOCK):
"""Receive unbundled arrays"""
count = 0
while socket.poll(timeout=100, flags=zmq.POLLIN):
assert_canrecv(socket)
receiver.receive(socket, flags=flags)
count += 1
for key in arrays:
assert receiver.event(key).is_set()
# assert count == len(arrays)
def make_modified_arrays(self, arrays):
"""Make modified arrays."""
return OrderedDict((cs.name, chunk_api.PyChunk(cs.name, cs.array * 2.0, cs.mask)) for cs in arrays.values())
def assert_receiver_arrays_allclose(self, receiver, arrays):
"""Assert receiver and arrays are all close."""
assert len(receiver) == len(arrays)
assert set(receiver.keys()) == set(arrays.keys())
for i, key in enumerate(receiver):
chunk = receiver[key]
assert_chunk_allclose(chunk, arrays[key])
class TestWriter(ReceiverTests, WriterTestsBase):
"""Test case for recorders."""
pass
class TestWriterMapping(ZeekoMappingTests, WriterTestsBase):
"""Test recorder behavior as a mapping."""
cls = Writer
@pytest.fixture
def mapping(self, chunksize, push, pull, arrays, framecount, filename, metadata_callback):
"""A client, set up for use as a mapping."""
obj = self.cls()
obj.metadata_callback = metadata_callback
with h5py.File(filename) as obj.file:
self.send_arrays(push, arrays, framecount)
self.recv_arrays(obj, pull, arrays)
yield obj
@pytest.fixture
def keys(self, arrays):
"""Return keys which should be availalbe."""
return arrays.keys()
|
[
"zeeko.conftest.assert_canrecv",
"h5py.File",
"pytest.mark.usefixtures",
"numpy.random.randn"
] |
[((1220, 1254), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""rnotify"""'], {}), "('rnotify')\n", (1243, 1254), False, 'import pytest\n'), ((2517, 2539), 'zeeko.conftest.assert_canrecv', 'assert_canrecv', (['socket'], {}), '(socket)\n', (2531, 2539), False, 'from zeeko.conftest import assert_canrecv\n'), ((1803, 1822), 'h5py.File', 'h5py.File', (['filename'], {}), '(filename)\n', (1812, 1822), False, 'import h5py\n'), ((3279, 3301), 'zeeko.conftest.assert_canrecv', 'assert_canrecv', (['socket'], {}), '(socket)\n', (3293, 3301), False, 'from zeeko.conftest import assert_canrecv\n'), ((4558, 4577), 'h5py.File', 'h5py.File', (['filename'], {}), '(filename)\n', (4567, 4577), False, 'import h5py\n'), ((1509, 1544), 'numpy.random.randn', 'np.random.randn', (['*chunk_array.shape'], {}), '(*chunk_array.shape)\n', (1524, 1544), True, 'import numpy as np\n')]
|
"""
Role tests
"""
import os
import pytest
from testinfra.utils.ansible_runner import AnsibleRunner
testinfra_hosts = AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize('name', [
('python-dev'),
('python-virtualenv'),
])
def test_packages(host, name):
"""
Test installed packages
"""
assert host.package(name).is_installed
|
[
"pytest.mark.parametrize",
"testinfra.utils.ansible_runner.AnsibleRunner"
] |
[((199, 267), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', "['python-dev', 'python-virtualenv']"], {}), "('name', ['python-dev', 'python-virtualenv'])\n", (222, 267), False, 'import pytest\n'), ((121, 173), 'testinfra.utils.ansible_runner.AnsibleRunner', 'AnsibleRunner', (["os.environ['MOLECULE_INVENTORY_FILE']"], {}), "(os.environ['MOLECULE_INVENTORY_FILE'])\n", (134, 173), False, 'from testinfra.utils.ansible_runner import AnsibleRunner\n')]
|
# SPDX-FileCopyrightText: 2021 <NAME>
# SPDX-License-Identifier: MIT
import board
from adafruit_led_animation.animation.sparkle import Sparkle
from adafruit_led_animation.color import PURPLE
from adafruit_led_animation.sequence import AnimationSequence
from adafruit_is31fl3741.adafruit_ledglasses import MUST_BUFFER, LED_Glasses
from adafruit_is31fl3741.led_glasses_animation import LED_Glasses_Animation
glasses = LED_Glasses(board.I2C(), allocate=MUST_BUFFER)
glasses.set_led_scaling(255)
glasses.global_current = 0xFE
glasses.enable = True
pixels = LED_Glasses_Animation(glasses)
anim2 = Sparkle(pixels, 0.05, PURPLE)
group = AnimationSequence(
anim2, advance_interval=5, auto_reset=True, auto_clear=True
)
while True:
group.animate()
|
[
"adafruit_led_animation.sequence.AnimationSequence",
"adafruit_led_animation.animation.sparkle.Sparkle",
"board.I2C",
"adafruit_is31fl3741.led_glasses_animation.LED_Glasses_Animation"
] |
[((557, 587), 'adafruit_is31fl3741.led_glasses_animation.LED_Glasses_Animation', 'LED_Glasses_Animation', (['glasses'], {}), '(glasses)\n', (578, 587), False, 'from adafruit_is31fl3741.led_glasses_animation import LED_Glasses_Animation\n'), ((598, 627), 'adafruit_led_animation.animation.sparkle.Sparkle', 'Sparkle', (['pixels', '(0.05)', 'PURPLE'], {}), '(pixels, 0.05, PURPLE)\n', (605, 627), False, 'from adafruit_led_animation.animation.sparkle import Sparkle\n'), ((637, 715), 'adafruit_led_animation.sequence.AnimationSequence', 'AnimationSequence', (['anim2'], {'advance_interval': '(5)', 'auto_reset': '(True)', 'auto_clear': '(True)'}), '(anim2, advance_interval=5, auto_reset=True, auto_clear=True)\n', (654, 715), False, 'from adafruit_led_animation.sequence import AnimationSequence\n'), ((431, 442), 'board.I2C', 'board.I2C', ([], {}), '()\n', (440, 442), False, 'import board\n')]
|
from pylearn2.blocks import Block
from pylearn2.utils.rng import make_theano_rng
from pylearn2.space import Conv2DSpace, VectorSpace
import theano
from theano.compile.mode import get_default_mode
class ScaleAugmentation(Block):
def __init__(self, space, seed=20150111, mean=1., std=.05, cpu_only=True):
self.rng = make_theano_rng(seed, which_method=['normal'])
self.mean = mean
self.std = std
self.space = space
self.cpu_only = cpu_only
super(ScaleAugmentation, self).__init__()
def create_theano_function(self):
if hasattr(self, 'f'):
return self.f
else:
X = self.space.make_theano_batch()
dim = X.ndim
arg = (dim-1)*('x',)
scale = self.rng.normal(size=[X.shape[0]], avg=self.mean, std=self.std)
scale = scale.dimshuffle(0,*arg)
out = X*scale
if self.cpu_only:
mode = get_default_mode().excluding('gpu')
else:
mode = get_default_mode()
return theano.function([X], out, mode=mode)
def perform(self, X):
f = self.create_theano_function()
return f(X)
def get_input_space(self):
return self.space
def get_output_space(self):
return self.space
|
[
"pylearn2.utils.rng.make_theano_rng",
"theano.compile.mode.get_default_mode",
"theano.function"
] |
[((328, 374), 'pylearn2.utils.rng.make_theano_rng', 'make_theano_rng', (['seed'], {'which_method': "['normal']"}), "(seed, which_method=['normal'])\n", (343, 374), False, 'from pylearn2.utils.rng import make_theano_rng\n'), ((1071, 1107), 'theano.function', 'theano.function', (['[X]', 'out'], {'mode': 'mode'}), '([X], out, mode=mode)\n', (1086, 1107), False, 'import theano\n'), ((1033, 1051), 'theano.compile.mode.get_default_mode', 'get_default_mode', ([], {}), '()\n', (1049, 1051), False, 'from theano.compile.mode import get_default_mode\n'), ((956, 974), 'theano.compile.mode.get_default_mode', 'get_default_mode', ([], {}), '()\n', (972, 974), False, 'from theano.compile.mode import get_default_mode\n')]
|
"""parses [PREDICT] section of config"""
import os
from pathlib import Path
import attr
from attr import converters, validators
from attr.validators import instance_of
from .validators import is_a_directory, is_a_file, is_valid_model_name
from .. import device
from ..converters import comma_separated_list, expanded_user_path
@attr.s
class PredictConfig:
"""class that represents [PREDICT] section of config.toml file
Attributes
----------
csv_path : str
path to where dataset was saved as a csv.
checkpoint_path : str
path to directory with checkpoint files saved by Torch, to reload model
labelmap_path : str
path to 'labelmap.json' file.
models : list
of model names. e.g., 'models = TweetyNet, GRUNet, ConvNet'
batch_size : int
number of samples per batch presented to models during training.
num_workers : int
Number of processes to use for parallel loading of data.
Argument to torch.DataLoader. Default is 2.
device : str
Device on which to work with model + data.
Defaults to 'cuda' if torch.cuda.is_available is True.
spect_scaler_path : str
path to a saved SpectScaler object used to normalize spectrograms.
If spectrograms were normalized and this is not provided, will give
incorrect results.
annot_csv_filename : str
name of .csv file containing predicted annotations.
Default is None, in which case the name of the dataset .csv
is used, with '.annot.csv' appended to it.
output_dir : str
path to location where .csv containing predicted annotation
should be saved. Defaults to current working directory.
min_segment_dur : float
minimum duration of segment, in seconds. If specified, then
any segment with a duration less than min_segment_dur is
removed from lbl_tb. Default is None, in which case no
segments are removed.
majority_vote : bool
if True, transform segments containing multiple labels
into segments with a single label by taking a "majority vote",
i.e. assign all time bins in the segment the most frequently
occurring label in the segment. This transform can only be
applied if the labelmap contains an 'unlabeled' label,
because unlabeled segments makes it possible to identify
the labeled segments. Default is False.
save_net_outputs : bool
if True, save 'raw' outputs of neural networks
before they are converted to annotations. Default is False.
Typically the output will be "logits"
to which a softmax transform might be applied.
For each item in the dataset--each row in the `csv_path` .csv--
the output will be saved in a separate file in `output_dir`,
with the extension `{MODEL_NAME}.output.npz`. E.g., if the input is a
spectrogram with `spect_path` filename `gy6or6_032312_081416.npz`,
and the network is `TweetyNet`, then the net output file
will be `gy6or6_032312_081416.tweetynet.output.npz`.
"""
# required, external files
checkpoint_path = attr.ib(converter=expanded_user_path, validator=is_a_file)
labelmap_path = attr.ib(converter=expanded_user_path, validator=is_a_file)
# required, model / dataloader
models = attr.ib(
converter=comma_separated_list,
validator=[instance_of(list), is_valid_model_name],
)
batch_size = attr.ib(converter=int, validator=instance_of(int))
# csv_path is actually 'required' but we can't enforce that here because cli.prep looks at
# what sections are defined to figure out where to add csv_path after it creates the csv
csv_path = attr.ib(
converter=converters.optional(expanded_user_path),
validator=validators.optional(is_a_file),
default=None,
)
# optional, transform
spect_scaler_path = attr.ib(
converter=converters.optional(expanded_user_path),
validator=validators.optional(is_a_file),
default=None,
)
# optional, data loader
num_workers = attr.ib(validator=instance_of(int), default=2)
device = attr.ib(validator=instance_of(str), default=device.get_default())
annot_csv_filename = attr.ib(
validator=validators.optional(instance_of(str)), default=None
)
output_dir = attr.ib(
converter=expanded_user_path,
validator=is_a_directory,
default=Path(os.getcwd()),
)
min_segment_dur = attr.ib(
validator=validators.optional(instance_of(float)), default=None
)
majority_vote = attr.ib(validator=instance_of(bool), default=True)
save_net_outputs = attr.ib(validator=instance_of(bool), default=False)
|
[
"attr.validators.instance_of",
"os.getcwd",
"attr.ib",
"attr.converters.optional",
"attr.validators.optional"
] |
[((3224, 3282), 'attr.ib', 'attr.ib', ([], {'converter': 'expanded_user_path', 'validator': 'is_a_file'}), '(converter=expanded_user_path, validator=is_a_file)\n', (3231, 3282), False, 'import attr\n'), ((3303, 3361), 'attr.ib', 'attr.ib', ([], {'converter': 'expanded_user_path', 'validator': 'is_a_file'}), '(converter=expanded_user_path, validator=is_a_file)\n', (3310, 3361), False, 'import attr\n'), ((3576, 3592), 'attr.validators.instance_of', 'instance_of', (['int'], {}), '(int)\n', (3587, 3592), False, 'from attr.validators import instance_of\n'), ((3825, 3864), 'attr.converters.optional', 'converters.optional', (['expanded_user_path'], {}), '(expanded_user_path)\n', (3844, 3864), False, 'from attr import converters, validators\n'), ((3884, 3914), 'attr.validators.optional', 'validators.optional', (['is_a_file'], {}), '(is_a_file)\n', (3903, 3914), False, 'from attr import converters, validators\n'), ((4022, 4061), 'attr.converters.optional', 'converters.optional', (['expanded_user_path'], {}), '(expanded_user_path)\n', (4041, 4061), False, 'from attr import converters, validators\n'), ((4081, 4111), 'attr.validators.optional', 'validators.optional', (['is_a_file'], {}), '(is_a_file)\n', (4100, 4111), False, 'from attr import converters, validators\n'), ((4206, 4222), 'attr.validators.instance_of', 'instance_of', (['int'], {}), '(int)\n', (4217, 4222), False, 'from attr.validators import instance_of\n'), ((4266, 4282), 'attr.validators.instance_of', 'instance_of', (['str'], {}), '(str)\n', (4277, 4282), False, 'from attr.validators import instance_of\n'), ((4711, 4728), 'attr.validators.instance_of', 'instance_of', (['bool'], {}), '(bool)\n', (4722, 4728), False, 'from attr.validators import instance_of\n'), ((4785, 4802), 'attr.validators.instance_of', 'instance_of', (['bool'], {}), '(bool)\n', (4796, 4802), False, 'from attr.validators import instance_of\n'), ((3479, 3496), 'attr.validators.instance_of', 'instance_of', (['list'], {}), '(list)\n', (3490, 3496), False, 'from attr.validators import instance_of\n'), ((4387, 4403), 'attr.validators.instance_of', 'instance_of', (['str'], {}), '(str)\n', (4398, 4403), False, 'from attr.validators import instance_of\n'), ((4544, 4555), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4553, 4555), False, 'import os\n'), ((4633, 4651), 'attr.validators.instance_of', 'instance_of', (['float'], {}), '(float)\n', (4644, 4651), False, 'from attr.validators import instance_of\n')]
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FeedType'
db.create_table('syndication_feedtype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('template', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('content_type', self.gf('django.db.models.fields.CharField')(default='Content-type: application/xml', unique=True, max_length=100)),
))
db.send_create_signal('syndication', ['FeedType'])
# Adding model 'Feed'
db.create_table('syndication_feed', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('feed_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['syndication.FeedType'])),
))
db.send_create_signal('syndication', ['Feed'])
# Adding M2M table for field site on 'Feed'
db.create_table('syndication_feed_site', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('feed', models.ForeignKey(orm['syndication.feed'], null=False)),
('site', models.ForeignKey(orm['sites.site'], null=False))
))
db.create_unique('syndication_feed_site', ['feed_id', 'site_id'])
def backwards(self, orm):
# Deleting model 'FeedType'
db.delete_table('syndication_feedtype')
# Deleting model 'Feed'
db.delete_table('syndication_feed')
# Removing M2M table for field site on 'Feed'
db.delete_table('syndication_feed_site')
models = {
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'syndication.feed': {
'Meta': {'object_name': 'Feed'},
'feed_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['syndication.FeedType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'})
},
'syndication.feedtype': {
'Meta': {'object_name': 'FeedType'},
'content_type': ('django.db.models.fields.CharField', [], {'default': "'Content-type: application/xml'", 'unique': 'True', 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'template': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
}
}
complete_apps = ['syndication']
|
[
"south.db.db.delete_table",
"south.db.db.create_unique",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"south.db.db.send_create_signal"
] |
[((720, 770), 'south.db.db.send_create_signal', 'db.send_create_signal', (['"""syndication"""', "['FeedType']"], {}), "('syndication', ['FeedType'])\n", (741, 770), False, 'from south.db import db\n'), ((1065, 1111), 'south.db.db.send_create_signal', 'db.send_create_signal', (['"""syndication"""', "['Feed']"], {}), "('syndication', ['Feed'])\n", (1086, 1111), False, 'from south.db import db\n'), ((1478, 1543), 'south.db.db.create_unique', 'db.create_unique', (['"""syndication_feed_site"""', "['feed_id', 'site_id']"], {}), "('syndication_feed_site', ['feed_id', 'site_id'])\n", (1494, 1543), False, 'from south.db import db\n'), ((1620, 1659), 'south.db.db.delete_table', 'db.delete_table', (['"""syndication_feedtype"""'], {}), "('syndication_feedtype')\n", (1635, 1659), False, 'from south.db import db\n'), ((1701, 1736), 'south.db.db.delete_table', 'db.delete_table', (['"""syndication_feed"""'], {}), "('syndication_feed')\n", (1716, 1736), False, 'from south.db import db\n'), ((1800, 1840), 'south.db.db.delete_table', 'db.delete_table', (['"""syndication_feed_site"""'], {}), "('syndication_feed_site')\n", (1815, 1840), False, 'from south.db import db\n'), ((1235, 1307), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'primary_key': '(True)', 'auto_created': '(True)'}), "(verbose_name='ID', primary_key=True, auto_created=True)\n", (1251, 1307), False, 'from django.db import models\n'), ((1331, 1385), 'django.db.models.ForeignKey', 'models.ForeignKey', (["orm['syndication.feed']"], {'null': '(False)'}), "(orm['syndication.feed'], null=False)\n", (1348, 1385), False, 'from django.db import models\n'), ((1409, 1457), 'django.db.models.ForeignKey', 'models.ForeignKey', (["orm['sites.site']"], {'null': '(False)'}), "(orm['sites.site'], null=False)\n", (1426, 1457), False, 'from django.db import models\n')]
|
"""Version 0.68.007
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2021-10-22 06:59:47.134546
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from sqlalchemy import Enum
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'e3<PASSWORD>4da580'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('airflow_tasks', sa.Column('sensor_soft_fail', sa.Integer(), nullable=True, comment='Setting this to 1 will add soft_fail=True on sensor'))
op.execute("ALTER TABLE `airflow_tasks` CHANGE COLUMN `sensor_soft_fail` `sensor_soft_fail` INTEGER NULL COMMENT 'Setting this to 1 will add soft_fail=True on sensor' AFTER `sensor_timeout_minutes`")
op.add_column('airflow_dag_sensors', sa.Column('sensor_soft_fail', sa.Integer(), nullable=True, comment='Setting this to 1 will add soft_fail=True on sensor'))
op.alter_column('airflow_custom_dags', 'schedule_interval',
existing_type=mysql.VARCHAR(length=32),
type_=mysql.VARCHAR(length=128),
comment='Time to execute dag',
existing_nullable=False)
op.alter_column('airflow_etl_dags', 'schedule_interval',
existing_type=mysql.VARCHAR(length=32),
type_=mysql.VARCHAR(length=128),
comment='Time to execute dag',
existing_nullable=False)
op.alter_column('airflow_export_dags', 'schedule_interval',
existing_type=mysql.VARCHAR(length=32),
type_=mysql.VARCHAR(length=128),
comment='Time to execute dag',
existing_nullable=False)
op.alter_column('airflow_import_dags', 'schedule_interval',
existing_type=mysql.VARCHAR(length=32),
type_=mysql.VARCHAR(length=128),
comment='Time to execute dag',
existing_nullable=False)
def downgrade():
op.drop_column('airflow_tasks', 'sensor_soft_fail')
op.drop_column('airflow_dag_sensors', 'sensor_soft_fail')
op.alter_column('airflow_custom_dags', 'schedule_interval',
existing_type=mysql.VARCHAR(length=128),
type_=mysql.VARCHAR(length=32),
comment='Time to execute dag',
existing_nullable=False)
op.alter_column('airflow_etl_dags', 'schedule_interval',
existing_type=mysql.VARCHAR(length=128),
type_=mysql.VARCHAR(length=32),
comment='Time to execute dag',
existing_nullable=False)
op.alter_column('airflow_export_dags', 'schedule_interval',
existing_type=mysql.VARCHAR(length=128),
type_=mysql.VARCHAR(length=32),
comment='Time to execute dag',
existing_nullable=False)
op.alter_column('airflow_import_dags', 'schedule_interval',
existing_type=mysql.VARCHAR(length=128),
type_=mysql.VARCHAR(length=32),
comment='Time to execute dag',
existing_nullable=False)
|
[
"sqlalchemy.dialects.mysql.VARCHAR",
"alembic.op.drop_column",
"alembic.op.execute",
"sqlalchemy.Integer"
] |
[((529, 738), 'alembic.op.execute', 'op.execute', (['"""ALTER TABLE `airflow_tasks` CHANGE COLUMN `sensor_soft_fail` `sensor_soft_fail` INTEGER NULL COMMENT \'Setting this to 1 will add soft_fail=True on sensor\' AFTER `sensor_timeout_minutes`"""'], {}), '(\n "ALTER TABLE `airflow_tasks` CHANGE COLUMN `sensor_soft_fail` `sensor_soft_fail` INTEGER NULL COMMENT \'Setting this to 1 will add soft_fail=True on sensor\' AFTER `sensor_timeout_minutes`"\n )\n', (539, 738), False, 'from alembic import op\n'), ((1703, 1754), 'alembic.op.drop_column', 'op.drop_column', (['"""airflow_tasks"""', '"""sensor_soft_fail"""'], {}), "('airflow_tasks', 'sensor_soft_fail')\n", (1717, 1754), False, 'from alembic import op\n'), ((1756, 1813), 'alembic.op.drop_column', 'op.drop_column', (['"""airflow_dag_sensors"""', '"""sensor_soft_fail"""'], {}), "('airflow_dag_sensors', 'sensor_soft_fail')\n", (1770, 1813), False, 'from alembic import op\n'), ((435, 447), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (445, 447), True, 'import sqlalchemy as sa\n'), ((798, 810), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (808, 810), True, 'import sqlalchemy as sa\n'), ((969, 993), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(32)'}), '(length=32)\n', (982, 993), False, 'from sqlalchemy.dialects import mysql\n'), ((1003, 1028), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(128)'}), '(length=128)\n', (1016, 1028), False, 'from sqlalchemy.dialects import mysql\n'), ((1165, 1189), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(32)'}), '(length=32)\n', (1178, 1189), False, 'from sqlalchemy.dialects import mysql\n'), ((1199, 1224), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(128)'}), '(length=128)\n', (1212, 1224), False, 'from sqlalchemy.dialects import mysql\n'), ((1364, 1388), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(32)'}), '(length=32)\n', (1377, 1388), False, 'from sqlalchemy.dialects import mysql\n'), ((1398, 1423), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(128)'}), '(length=128)\n', (1411, 1423), False, 'from sqlalchemy.dialects import mysql\n'), ((1563, 1587), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(32)'}), '(length=32)\n', (1576, 1587), False, 'from sqlalchemy.dialects import mysql\n'), ((1597, 1622), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(128)'}), '(length=128)\n', (1610, 1622), False, 'from sqlalchemy.dialects import mysql\n'), ((1892, 1917), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(128)'}), '(length=128)\n', (1905, 1917), False, 'from sqlalchemy.dialects import mysql\n'), ((1927, 1951), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(32)'}), '(length=32)\n', (1940, 1951), False, 'from sqlalchemy.dialects import mysql\n'), ((2088, 2113), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(128)'}), '(length=128)\n', (2101, 2113), False, 'from sqlalchemy.dialects import mysql\n'), ((2123, 2147), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(32)'}), '(length=32)\n', (2136, 2147), False, 'from sqlalchemy.dialects import mysql\n'), ((2287, 2312), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(128)'}), '(length=128)\n', (2300, 2312), False, 'from sqlalchemy.dialects import mysql\n'), ((2322, 2346), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(32)'}), '(length=32)\n', (2335, 2346), False, 'from sqlalchemy.dialects import mysql\n'), ((2486, 2511), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(128)'}), '(length=128)\n', (2499, 2511), False, 'from sqlalchemy.dialects import mysql\n'), ((2521, 2545), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(32)'}), '(length=32)\n', (2534, 2545), False, 'from sqlalchemy.dialects import mysql\n')]
|
from __future__ import print_function, division, absolute_import
import pickle
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pickle
def visualize_vertices(vertices:np.ndarray, bones:np.ndarray = None):
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(vertices[:-1:5,0], vertices[:-1:5,1], vertices[:-1:5,2], c='b')
print('%f to %f' % (vertices[:,2].min(), vertices[:,2].max()))
if bones is not None:
joints = []
for bone in bones:
joint = np.linalg.inv(bone['offset_matrix'])[0:3, 3]
joints.append(np.expand_dims(joint, axis=0))
joints = np.vstack(joints)
ax.scatter(joints[:,0], joints[:,1], joints[:,2], c='r')
print('%f to %f' % (joints[:,2].min(), joints[:,2].max()))
plt.show()
if __name__ == '__main__':
with open('mesh/model/preprocessed_right_hand.pkl', 'rb') as f:
mesh = pickle.load(f)
visualize_vertices(mesh['vertices'], mesh['bones'])
|
[
"matplotlib.pyplot.show",
"mpl_toolkits.mplot3d.Axes3D",
"numpy.expand_dims",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.linalg.inv",
"numpy.vstack"
] |
[((265, 277), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (275, 277), True, 'import matplotlib.pyplot as plt\n'), ((287, 298), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (293, 298), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((824, 834), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (832, 834), True, 'import matplotlib.pyplot as plt\n'), ((659, 676), 'numpy.vstack', 'np.vstack', (['joints'], {}), '(joints)\n', (668, 676), True, 'import numpy as np\n'), ((946, 960), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (957, 960), False, 'import pickle\n'), ((540, 576), 'numpy.linalg.inv', 'np.linalg.inv', (["bone['offset_matrix']"], {}), "(bone['offset_matrix'])\n", (553, 576), True, 'import numpy as np\n'), ((611, 640), 'numpy.expand_dims', 'np.expand_dims', (['joint'], {'axis': '(0)'}), '(joint, axis=0)\n', (625, 640), True, 'import numpy as np\n')]
|
"""
======================
Comparing CCA Variants
======================
A comparison of Kernel Canonical Correlation Analysis (KCCA) with three
different types of kernel to Deep Canonical Correlation Analysis (DCCA).
Each learns and computes kernels suitable for different situations. The point
of this tutorial is to illustrate, in toy examples, the rough intuition as to
when such methods work well and generate linearly correlated projections.
The simulated latent data has two signal dimensions draw from independent
Gaussians. Two views of data were derived from this.
- View 1: The latent data.
- View 2: A transformation of the latent data.
To each view, two additional independent Gaussian noise dimensions were added.
Each 2x2 grid of subplots in the figure corresponds to a transformation and
either the raw data or a CCA variant. The x-axes are the data from view 1
and the y-axes are the data from view 2. Plotted are the correlations between
the signal dimensions of the raw views and the top two components of each
view after a CCA variant transformation. Linearly correlated plots on the
diagonals of the 2x2 grids indicate that the CCA method was able to
successfully learn the underlying functional relationship between the two
views.
"""
from mvlearn.embed import KCCA, DCCA
from mvlearn.datasets import GaussianMixture
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
# Make Latents
n_samples = 200
centers = [[0, 1], [0, -1]]
covariances = 2*np.array([np.eye(2), np.eye(2)])
gm_train = GaussianMixture(n_samples, centers, covariances)
# Test
gm_test = GaussianMixture(n_samples, centers, covariances)
# Make 2 views
n_noise = 2
transforms = ['linear', 'poly', 'sin']
Xs_train = []
Xs_test = []
for transform in transforms:
gm_train.sample_views(transform=transform, n_noise=n_noise)
gm_test.sample_views(transform=transform, n_noise=n_noise)
Xs_train.append(gm_train.get_Xy()[0])
Xs_test.append(gm_test.get_Xy()[0])
# Plotting parameters
labels = gm_test.latent_[:, 0]
cmap = matplotlib.colors.ListedColormap(
sns.diverging_palette(240, 10, n=len(labels), center='light').as_hex())
cmap = 'coolwarm'
method_labels = \
['Raw Views', 'Linear KCCA', 'Polynomial KCCA', 'Gaussian KCCA', 'DCCA']
transform_labels = \
['Linear Transform', 'Polynomial Transform', 'Sinusoidal Transform']
input_size1, input_size2 = Xs_train[0][0].shape[1], Xs_train[0][1].shape[1]
outdim_size = min(Xs_train[0][0].shape[1], 2)
layer_sizes1 = [256, 256, outdim_size]
layer_sizes2 = [256, 256, outdim_size]
methods = [
KCCA(ktype='linear', reg=0.1, degree=2.0, constant=0.1, n_components=2),
KCCA(ktype='poly', reg=0.1, degree=2.0, constant=0.1, n_components=2),
KCCA(ktype='gaussian', reg=1.0, sigma=2.0, n_components=2),
DCCA(input_size1, input_size2, outdim_size, layer_sizes1, layer_sizes2,
epoch_num=400)
]
fig, axes = plt.subplots(3 * 2, 5 * 2, figsize=(20, 12))
sns.set_context('notebook')
for r, transform in enumerate(transforms):
axs = axes[2 * r:2 * r + 2, :2]
for i, ax in enumerate(axs.flatten()):
dim2 = int(i / 2)
dim1 = i % 2
ax.scatter(
Xs_test[r][0][:, dim1],
Xs_test[r][1][:, dim2],
cmap=cmap,
c=labels,
)
ax.set_xticks([], [])
ax.set_yticks([], [])
if dim1 == 0:
ax.set_ylabel(f"View 2 Dim {dim2+1}")
if dim1 == 0 and dim2 == 0:
ax.text(-0.5, -0.1, transform_labels[r], transform=ax.transAxes,
fontsize=18, rotation=90, verticalalignment='center')
if dim2 == 1 and r == len(transforms)-1:
ax.set_xlabel(f"View 1 Dim {dim1+1}")
if i == 0 and r == 0:
ax.set_title(method_labels[r],
{'position': (1.11, 1), 'fontsize': 18})
for c, method in enumerate(methods):
axs = axes[2*r: 2*r+2, 2*c+2:2*c+4]
Xs = method.fit(Xs_train[r]).transform(Xs_test[r])
for i, ax in enumerate(axs.flatten()):
dim2 = int(i / 2)
dim1 = i % 2
ax.scatter(
Xs[0][:, dim1],
Xs[1][:, dim2],
cmap=cmap,
c=labels,
)
if dim2 == 1 and r == len(transforms)-1:
ax.set_xlabel(f"View 1 Dim {dim1+1}")
if i == 0 and r == 0:
ax.set_title(method_labels[c + 1], {'position': (1.11, 1),
'fontsize': 18})
ax.axis("equal")
ax.set_xticks([], [])
ax.set_yticks([], [])
|
[
"mvlearn.embed.KCCA",
"mvlearn.embed.DCCA",
"mvlearn.datasets.GaussianMixture",
"numpy.eye",
"matplotlib.pyplot.subplots",
"seaborn.set_context"
] |
[((1558, 1606), 'mvlearn.datasets.GaussianMixture', 'GaussianMixture', (['n_samples', 'centers', 'covariances'], {}), '(n_samples, centers, covariances)\n', (1573, 1606), False, 'from mvlearn.datasets import GaussianMixture\n'), ((1625, 1673), 'mvlearn.datasets.GaussianMixture', 'GaussianMixture', (['n_samples', 'centers', 'covariances'], {}), '(n_samples, centers, covariances)\n', (1640, 1673), False, 'from mvlearn.datasets import GaussianMixture\n'), ((2932, 2976), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3 * 2)', '(5 * 2)'], {'figsize': '(20, 12)'}), '(3 * 2, 5 * 2, figsize=(20, 12))\n', (2944, 2976), True, 'import matplotlib.pyplot as plt\n'), ((2977, 3004), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {}), "('notebook')\n", (2992, 3004), True, 'import seaborn as sns\n'), ((2605, 2676), 'mvlearn.embed.KCCA', 'KCCA', ([], {'ktype': '"""linear"""', 'reg': '(0.1)', 'degree': '(2.0)', 'constant': '(0.1)', 'n_components': '(2)'}), "(ktype='linear', reg=0.1, degree=2.0, constant=0.1, n_components=2)\n", (2609, 2676), False, 'from mvlearn.embed import KCCA, DCCA\n'), ((2682, 2751), 'mvlearn.embed.KCCA', 'KCCA', ([], {'ktype': '"""poly"""', 'reg': '(0.1)', 'degree': '(2.0)', 'constant': '(0.1)', 'n_components': '(2)'}), "(ktype='poly', reg=0.1, degree=2.0, constant=0.1, n_components=2)\n", (2686, 2751), False, 'from mvlearn.embed import KCCA, DCCA\n'), ((2757, 2815), 'mvlearn.embed.KCCA', 'KCCA', ([], {'ktype': '"""gaussian"""', 'reg': '(1.0)', 'sigma': '(2.0)', 'n_components': '(2)'}), "(ktype='gaussian', reg=1.0, sigma=2.0, n_components=2)\n", (2761, 2815), False, 'from mvlearn.embed import KCCA, DCCA\n'), ((2821, 2911), 'mvlearn.embed.DCCA', 'DCCA', (['input_size1', 'input_size2', 'outdim_size', 'layer_sizes1', 'layer_sizes2'], {'epoch_num': '(400)'}), '(input_size1, input_size2, outdim_size, layer_sizes1, layer_sizes2,\n epoch_num=400)\n', (2825, 2911), False, 'from mvlearn.embed import KCCA, DCCA\n'), ((1524, 1533), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1530, 1533), True, 'import numpy as np\n'), ((1535, 1544), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1541, 1544), True, 'import numpy as np\n')]
|
import processing.uploaders as uploaders
PREFIX_UPLOADERS = [
{
'context_url': 'registry.local:5000/context-dir',
'prefix': 'registry.local:5000',
'mangle': True,
'expected_target_ref': 'registry.local:5000/registry-source_local:1.2.3',
},
{
'context_url': 'registry.local/context-dir',
'prefix': 'registry.local',
'mangle': False,
'expected_target_ref': 'registry.local/registry-source.local:1.2.3',
},
]
def test_prefix_uploader(job, oci_img):
img1 = oci_img(name='image_name', version='1.2.3', ref='registry-source.local:1.2.3')
job1 = job(oci_img=img1)
results = []
for uploader in PREFIX_UPLOADERS:
examinee = uploaders.PrefixUploader(
context_url=uploader['context_url'],
prefix=uploader['prefix'],
mangle=uploader['mangle'],
)
result = examinee.process(job1, target_as_source=False)
assert result.upload_request.target_ref == uploader['expected_target_ref']
results.append(result)
return results
def test_tag_suffix_uploader(job, oci_img):
for j in test_prefix_uploader(job, oci_img):
examinee = uploaders.TagSuffixUploader(
suffix='mod1',
separator='-',
)
result = examinee.process(j, target_as_source=True)
assert result.upload_request.target_ref == j.upload_request.target_ref + '-mod1'
|
[
"processing.uploaders.TagSuffixUploader",
"processing.uploaders.PrefixUploader"
] |
[((724, 844), 'processing.uploaders.PrefixUploader', 'uploaders.PrefixUploader', ([], {'context_url': "uploader['context_url']", 'prefix': "uploader['prefix']", 'mangle': "uploader['mangle']"}), "(context_url=uploader['context_url'], prefix=\n uploader['prefix'], mangle=uploader['mangle'])\n", (748, 844), True, 'import processing.uploaders as uploaders\n'), ((1200, 1257), 'processing.uploaders.TagSuffixUploader', 'uploaders.TagSuffixUploader', ([], {'suffix': '"""mod1"""', 'separator': '"""-"""'}), "(suffix='mod1', separator='-')\n", (1227, 1257), True, 'import processing.uploaders as uploaders\n')]
|
#!/usr/bin/env python3
import argparse
import shlex
import sys
from subprocess import run
from typing import TextIO
def find_common_ancestor_distance(
taxon: str, other_taxon: str, taxonomy_db_path: str, only_canonical: bool
):
canonical = "--only_canonical" if only_canonical else ""
cmd_str = f"taxonomy_util -d {taxonomy_db_path} common_ancestor_distance {canonical} '{other_taxon}' '{taxon}'"
cmd = shlex.split(cmd_str)
proc = run(cmd, encoding="utf8", capture_output=True)
return proc
def find_distances(gnm2tab_file: TextIO, taxon: str, taxonomy_db_path: str):
cmd = ["taxonomy_util", "-d", taxonomy_db_path, "get_id", taxon]
proc = run(cmd, capture_output=True, encoding="utf8")
if "not found in" in proc.stderr:
exit("Error: " + proc.stderr.strip())
for line in gnm2tab_file:
fields = line.split("\t")
(species_code, settings, other_taxon) = map(lambda el: el.strip(), fields[:3])
proc = find_common_ancestor_distance(taxon, other_taxon, taxonomy_db_path, True)
ancestor_info = proc.stdout.rstrip()
if proc.stderr != "":
print("Warning:", other_taxon, proc.stderr.rstrip(), file=sys.stderr)
else:
proc = find_common_ancestor_distance(
taxon, other_taxon, taxonomy_db_path, False
)
non_canonical_distance = proc.stdout.split("\t")[0]
print(
non_canonical_distance,
ancestor_info,
species_code,
settings,
other_taxon,
sep="\t",
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Find distance to common ancestor")
parser.add_argument(
"--taxonomy_db", required=True, help="NCBI Taxonomy database (SQLite format)"
)
parser.add_argument(
"--gnm2tab_file",
required=True,
type=argparse.FileType(),
help="gnm2tab file from spal",
)
parser.add_argument("taxon")
args = parser.parse_args()
find_distances(args.gnm2tab_file, args.taxon, args.taxonomy_db)
|
[
"subprocess.run",
"shlex.split",
"argparse.ArgumentParser",
"argparse.FileType"
] |
[((422, 442), 'shlex.split', 'shlex.split', (['cmd_str'], {}), '(cmd_str)\n', (433, 442), False, 'import shlex\n'), ((454, 500), 'subprocess.run', 'run', (['cmd'], {'encoding': '"""utf8"""', 'capture_output': '(True)'}), "(cmd, encoding='utf8', capture_output=True)\n", (457, 500), False, 'from subprocess import run\n'), ((676, 722), 'subprocess.run', 'run', (['cmd'], {'capture_output': '(True)', 'encoding': '"""utf8"""'}), "(cmd, capture_output=True, encoding='utf8')\n", (679, 722), False, 'from subprocess import run\n'), ((1663, 1734), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Find distance to common ancestor"""'}), "(description='Find distance to common ancestor')\n", (1686, 1734), False, 'import argparse\n'), ((1939, 1958), 'argparse.FileType', 'argparse.FileType', ([], {}), '()\n', (1956, 1958), False, 'import argparse\n')]
|
import psutil #Library to get System details
import time
import pyttsx3 # Library for text to speech Offline
from win10toast import ToastNotifier # also need to install win32api (This is for Notifications)
import threading # To make notification and speech work at same time
toaster = ToastNotifier()
x=pyttsx3.init()
x.setProperty('rate',130)
x.setProperty('volume',8)
count = 0
def show_notification(show_text):
toaster.show_toast(show_text,
icon_path='battery.ico',
duration=10)
# loop the toaster over some period of time
while toaster.notification_active():
time.sleep(0.1)
def monitor():
while (True):
time.sleep(10)
battery = psutil.sensors_battery()
plugged = battery.power_plugged
percent = int(battery.percent)
if percent == 100:
if plugged == True:
processThread = threading.Thread(target=show_notification, args=("Laptop Fully Charged",)) # <- note extra ','
processThread.start()
x.say("Laptop is Fully Charged Please plug out the cable")
x.runAndWait()
elif percent == 90:
if plugged == True:
if count == 0:
processThread = threading.Thread(target=show_notification, args=("Your Battery at 90% Please plug out the cable",)) # <- note extra ','
processThread.start()
x.say("Your battery at 90% ")
x.runAndWait()
count = count + 1
if __name__ == "__main__":
monitor()
|
[
"threading.Thread",
"pyttsx3.init",
"psutil.sensors_battery",
"time.sleep",
"win10toast.ToastNotifier"
] |
[((286, 301), 'win10toast.ToastNotifier', 'ToastNotifier', ([], {}), '()\n', (299, 301), False, 'from win10toast import ToastNotifier\n'), ((304, 318), 'pyttsx3.init', 'pyttsx3.init', ([], {}), '()\n', (316, 318), False, 'import pyttsx3\n'), ((626, 641), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (636, 641), False, 'import time\n'), ((681, 695), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (691, 695), False, 'import time\n'), ((712, 736), 'psutil.sensors_battery', 'psutil.sensors_battery', ([], {}), '()\n', (734, 736), False, 'import psutil\n'), ((895, 969), 'threading.Thread', 'threading.Thread', ([], {'target': 'show_notification', 'args': "('Laptop Fully Charged',)"}), "(target=show_notification, args=('Laptop Fully Charged',))\n", (911, 969), False, 'import threading\n'), ((1236, 1340), 'threading.Thread', 'threading.Thread', ([], {'target': 'show_notification', 'args': "('Your Battery at 90% Please plug out the cable',)"}), "(target=show_notification, args=(\n 'Your Battery at 90% Please plug out the cable',))\n", (1252, 1340), False, 'import threading\n')]
|
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import netaddr
from akanda.router.drivers import base
from akanda.router import utils
LOG = logging.getLogger(__name__)
class PingManager(base.Manager):
"""
A class which provide a facade to the system ping utility. Supports both
IPv4 and IPv6.
"""
exe_map = {
4: '/bin/ping',
6: '/bin/ping6'
}
def __init__(self, root_helper='sudo'):
"""
Initializes PingManager class.
:type root_helper: str
:param root_helper: System utility to escalate privileges.
"""
super(PingManager, self).__init__(root_helper)
def do(self, ip):
"""
Sends a single ICMP packet to <ip> using the systems ping utility.
:type ip: str
:param ip: The IP address to send ICMP packets to.
:rtype: bool. If <ip> responds to the ICMP packet, returns True else,
returns False
"""
version = netaddr.IPAddress(ip).version
args = ['-c', '1', ip]
try:
utils.execute([self.exe_map.get(version)] + args)
return True
except RuntimeError:
return False
|
[
"netaddr.IPAddress",
"logging.getLogger"
] |
[((719, 746), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (736, 746), False, 'import logging\n'), ((1557, 1578), 'netaddr.IPAddress', 'netaddr.IPAddress', (['ip'], {}), '(ip)\n', (1574, 1578), False, 'import netaddr\n')]
|
import os
code_lines = list()
notation_lines = list()
blank_lines = list()
def process_file(filename):
global code_lines
global notation_lines
global blank_lines
with open(filename, 'r') as file:
for line in file.readlines():
_line = line.strip()
if not _line:
blank_lines.append(_line)
elif _line.startswith('#'):
notation_lines.append(_line)
else:
code_lines.append(_line)
def show_result():
global code_lines
global notation_lines
global blank_lines
print('-'*20)
print('code:', len(code_lines))
for line in code_lines:
print(line)
print('-' * 20)
print('notation:', len(notation_lines))
for line in notation_lines:
print(line)
print('-' * 20)
print('blank:', len(blank_lines))
code_lines.clear()
notation_lines.clear()
blank_lines.clear()
def process_files(path='../6'):
files = os.listdir(path)
for file in files:
if file.endswith('.py'):
print('='*30)
print('current file:', os.path.join(path, file))
process_file(os.path.join(path, file))
show_result()
process_files()
|
[
"os.path.join",
"os.listdir"
] |
[((1028, 1044), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1038, 1044), False, 'import os\n'), ((1166, 1190), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (1178, 1190), False, 'import os\n'), ((1218, 1242), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (1230, 1242), False, 'import os\n')]
|
from ocr import OCR
ocr=OCR(image_folder="test/")
if __name__ == "__main__":
ocr.keras_ocr_works()
ocr.easyocr_model_works()
ocr.pytesseract_model_works()
|
[
"ocr.OCR"
] |
[((25, 50), 'ocr.OCR', 'OCR', ([], {'image_folder': '"""test/"""'}), "(image_folder='test/')\n", (28, 50), False, 'from ocr import OCR\n')]
|
import os
from setuptools import setup
def read(fname):
"""
Read README.md as long description if found.
Otherwise just return short description.
"""
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return "Simple git management application to be used in Kapsi hosting."
setup(
name="kapsi_git_manager",
version="0.1.0",
author="<NAME>",
author_email="<EMAIL>",
description=("Simple git management application to be used in Kapsi hosting."),
license="MIT",
keywords="git management kapsi",
url="http://packages.python.org/kapsi_git_manager",
packages=['kapsi_git_manager', ],
package_data={'kapsi_git_manager': ['license.txt', 'templates/*.html']},
include_package_data=True,
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
install_requires=[
'Flask',
'flup<=1.0.2',
'Flask-HTTPAuth',
'GitPython',
'passlib'
],
)
|
[
"os.path.dirname"
] |
[((210, 235), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (225, 235), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 7 10:57:53 2020
@author: pnter
"""
import torch
import gpytorch
# from gpytorch.utils.memoize import add_to_cache, is_in_cache
from gpytorch.lazy.root_lazy_tensor import RootLazyTensor
import copy
from UtilityFunctions import updateInverseCovarWoodbury
from math import inf
'''
Implements the Local Gaussian Process Regression Model as described by Nguyen-tuong et al.
Note that the kernel used in the original paper Local Gaussian Process Regression for Real Time Online Model Learning uses the RBF kernel
Parameters:
likelihoodFn: The function which, when called, instantiates a new likelihood of the type which should be used for all child models
kernel: The kernel function used to construct the covariances matrices
w_gen: The threshold distance for generation of a new child model
'''
class LocalGPModel:
def __init__(self, likelihoodFn, kernel, inheritKernel=True, **kwargs):
#Initialize a list to contain local child models
self.children = []
self.w_gen = kwargs['w_gen'] if 'w_gen' in kwargs else .5
self.covar_module = kernel
self.mean_module = kwargs['mean'] if 'mean' in kwargs else gpytorch.means.ConstantMean
self.likelihood = likelihoodFn
self.inheritKernel = inheritKernel
#Number of training iterations used each time child model is updated
#This should be roughly proportional to the number of observations.
#By default, we will use 30. As number of data goes up, this may increase
self.training_iter = 30
#Default output dimension is 1 (scalar)
self.outputDim = 1 if 'outputDim' not in kwargs else kwargs['outputDim']
#If numInducingInputs is given, use variational GP models for child models
if 'numInducingPoints' in kwargs:
self.numInducingPoints = kwargs['numInducingPoints']
assert(type(self.numInducingPoints)==int)
assert(self.numInducingPoints>0)
self.objectiveFunctionClass = gpytorch.mlls.VariationalELBO
else:
self.numInducingPoints = None
#If maxChildren in kwargs, set self.maxChildren. Else, set to inf
if 'maxChildren' in kwargs:
self.maxChildren = kwargs['maxChildren']
else:
self.maxChildren = inf
#If M=# of closest models for prediction is given, set parameter
if 'M' in kwargs:
self.M = kwargs['M']
else:
self.M = None
'''
Update the LocalGPModel with a pair {x,y}.
'''
def update(self, x, y):
#If no child model have been created yet, instantiate a new child with {x,y} and record the output dimension
if len(self.children)==0:
self.createChild(x,y)
self.outputDim = int(y.shape[-1])
#If child models exist, find the the child whose center is closest to x
else:
closestChildIndex,minDist = self.getClosestChild(x)
#Get the mask of any points for which the closest model is not similar enough
genNewModelIndices = (minDist < self.w_gen) if minDist.dim()>0 else (minDist < self.w_gen).unsqueeze(0)
x_gen = x[genNewModelIndices,:]
y_gen = y[genNewModelIndices]
#Now generate a new model, if needed.
if x_gen.shape[0] > 0:
self.createChild(x_gen[0,:].unsqueeze(0), y_gen[0].unsqueeze(0))
#We then recursively call update() without the point which generated
#the model and return, in case some points would be assigned the newly generated model
if x.shape[0] > 1:
x_minus = torch.cat([x[0:genNewModelIndices[0]], x[genNewModelIndices[0]:]])
y_minus = torch.cat([y[0:genNewModelIndices[0]], y[genNewModelIndices[0]:]])
self.update(x_minus,y_minus)
return
#Get points where we are not generating a new model
x_assign = x[genNewModelIndices.bitwise_not()]
y_assign = y[genNewModelIndices.bitwise_not()]
closestIndex_assign = closestChildIndex[genNewModelIndices.bitwise_not()]\
if closestChildIndex.dim()>0 else closestChildIndex.unsqueeze(0)[genNewModelIndices.bitwise_not()]
#loop over children and assign them the new data points
for childIndex in range(len(self.children)):
#Get the data which are closest to the current child
x_child = x_assign[closestIndex_assign==childIndex].squeeze(0)
y_child = y_assign[closestIndex_assign==childIndex].squeeze(0)
#If new data is a singleton, unsqueeze the 0th dim
if x_child.dim() == 1:
x_child,y_child = x_child.unsqueeze(0),y_child.unsqueeze(0)
#Only proceed if there are some data in the batch assigned to the child
if x_child.shape[0] > 0:
closestChildModel = self.children[childIndex]
#Create new model(s) which additionally incorporates the pair {x,y}. This will return more than one model
#if a split occurs.
newChildModel = closestChildModel.update(x_child,y_child)
#Replace the existing model with the new model which incorporates new data
self.children[closestIndex_assign] = newChildModel
'''
Instantiate a new child model using the training pair {x,y}
Note that the likelihood used to instantiate the child model is distinct
from each other child model, as opposed to the kernel which is shared
between the children.
'''
def createChild(self,x,y):
#Create new child model, then train
if self.numInducingPoints is None:
newChildModel = LocalGPChild(x,y,self,self.inheritKernel)
else:
newChildModel = ApproximateGPChild(x,y,self,self.inheritKernel)
#Set other children to not be last updated.
self.setChildLastUpdated(newChildModel)
#Add to the list of child models
self.children.append(newChildModel)
def setChildLastUpdated(self,child):
for _child in self.children:
_child.lastUpdated = False
child.lastUpdated = True
'''
Return a pytorch tensor of the centers of all child models.
'''
def getCenters(self):
#Get the center of each child model
centersList = list(map(lambda x:x.center.reshape((x.center.shape[0])),self.children))
#Return the centers after stacking in new dimension
return torch.stack(centersList,dim=0)
'''
Returns the index of the closest child model to the point x, as well as the distance
between the model's center and x.
'''
def getClosestChild(self,x):
#Compute distances between new input x and existing inputs
distances = self.getDistanceToCenters(x)
#Get the single minimum distance from the tensor (max covar)
minResults = torch.max(distances,1) if distances.dim()>1 else torch.max(distances,0)
return minResults[1],minResults[0]
'''
Compute the distances from the point x to each center
'''
def getDistanceToCenters(self,x,returnPowers=False):
centers = self.getCenters()
x,centers = x.double(),centers.double()
distances = self.covar_module(x,centers).evaluate()
powers = torch.zeros(distances.shape)
#Switch to double precision for this calculation
'''
vec = ((x-centers.repeat(x.shape[0],1))/self.covar_module.lengthscale).double().repeat(x.shape[0],1)
powers = .5*torch.sum(vec**2,dim=1)
distances = torch.exp(-powers)
'''
if returnPowers:
return distances.squeeze(0),powers
else:
return distances.squeeze(0)
'''
Make a prediction at the point(s) x. This method is a wrapper which handles the messy case of multidimensional inputs.
The actual prediction is done in the predictAtPoint helper method. If no M is given, use default
'''
def predict(self,x,individualPredictions=False,getVar=False):
return self.predict_Helper(x,self.M,individualPredictions,getVar)
'''
Make a prediction at the point(s) x. This method is a wrapper which handles the messy case of multidimensional inputs.
The actual prediction is done in the predictAtPoint helper method
'''
def predict_Helper(self,x,M,individualPredictions,getVar):
if M is None:
M = len(self.children)
else:
M = min(M,len(self.children))
#Update all of the covar modules to the most recent
if self.inheritKernel:
for child in self.children:
child.covar_module = self.covar_module
#If not inheriting kernel, then average the lengthscale params of child kernels
else:
lengthscales = [child.covar_module.lengthscale for child in self.children]
self.covar_module.lengthscale = torch.mean(torch.stack(lengthscales),dim=0)
mean_predictions = []
var_predictions = []
#Get the predictions of each child at each point
for child in self.children:
prediction = child.predict(x)
mean_predictions.append(prediction.mean)
var_predictions.append(prediction.variance)
#Concatenate into pytorch tensors
mean_predictions = torch.stack(mean_predictions).transpose(0,1)
var_predictions = torch.stack(var_predictions).transpose(0,1)
#Squeeze out any extra dims that may have accumulated
if mean_predictions.dim()>2:
mean_predictions = mean_predictions.squeeze()
var_predictions = var_predictions.squeeze()
#if the predictions are done at a single point, we need to unsqueeze in dim 0
if mean_predictions.dim()<2:
mean_predictions = mean_predictions.unsqueeze(-1)
var_predictions = var_predictions.unsqueeze(-1)
#Transpose to agree with minIndices dims
#Note: This only needs to be done for the incremental experiments where we track memory usage.
#Leave this commented out otherwise
'''
mean_predictions = mean_predictions.transpose(0,1)
var_predictions = var_predictions.transpose(0,1)
'''
#We don't need this weighting procedure if there is only one child
if mean_predictions.shape[-1]>1:
#Get the covar matrix
distances = self.getDistanceToCenters(x)
#Get the M closest child models. Need to squeeze out extra dims of 1.
sortResults = torch.sort(distances.squeeze(-1).squeeze(-1),descending=True)
#Get the minDists for weighting predictions
#minDists = sortResults[0][:,:M].squeeze(-1) if sortResults[0].dim()>1 else sortResults[0].unsqueeze(0)
minDists = sortResults[0][:,:M] if sortResults[0].dim()>1 else sortResults[0].unsqueeze(0)
#Get the min indices for selecting the correct predictions. If dim==1, then there is only one child, so no need to take up to M predictions
minIndices = sortResults[1][:,:M] if sortResults[1].dim()>1 else sortResults[1].unsqueeze(0)
#Get the associate predictions
gatherDim = 1 if mean_predictions.dim()>1 else 0
mean_predictions = mean_predictions.gather(gatherDim,minIndices)
var_predictions = var_predictions.gather(gatherDim,minIndices)
#Compute weights for the predictions. Switch to double precision for this somewhat unstable computation
minDists = minDists.double()
#If we have M=1, we need to unsqueeze for the summation
if minDists.dim() == 1:
minDists = minDists.unsqueeze(-1)
#Sum the m smallest distances for each prediction point to normalize
denominator = torch.sum(minDists,dim=1).unsqueeze(-1).repeat((1,minDists.shape[1]))
weights = minDists/denominator
#Compute weighted predictions.
#IMPORTANT: the weighted variance predictions are highly negatively biased since we do not account for the covariance between models
weighted_mean_predictions = torch.sum(weights * mean_predictions,dim=1)
weighted_var_predictions = torch.sum(weights**2 * var_predictions,dim=1)
else:
weighted_mean_predictions = mean_predictions
weighted_var_predictions = var_predictions
if getVar:
return weighted_mean_predictions,weighted_var_predictions
elif individualPredictions:
return weighted_mean_predictions,mean_predictions,weights,minDists
else:
return weighted_mean_predictions
'''
Make a prediction at the point x by finding the M closest child models and
computing a weighted average of their predictions. By default M is the number
of child models. If M < number of child models, use all of them.
THIS METHOD IS NOW DEPRECATED. DO NOT RELY ON THIS.
'''
def predictAtPoint(self,x,M=None,individualPredictions=False):
if M is None:
M = len(self.children)
else:
M = min(M,len(self.children))
#Compute distances between new input x and existing inputs
distances,powers = self.getDistanceToCenters(x,True)
#Get the M closest child models. Need to squeeze out extra dims of 1.
sortResults = torch.sort(distances.squeeze(-1).squeeze(-1),descending=True)
minDists = sortResults[0][:M].squeeze(-1) if sortResults[0].dim()>0 else sortResults[0].unsqueeze(0)
minIndices = sortResults[1][:M] if sortResults[1].dim()>0 else sortResults[1].unsqueeze(0)
closestChildren = [self.children[i] for i in minIndices]
'''
Get a posterior distribution for each child model. Note each will be
multivariate normal. Then compute weighted average of the means of the
posterior distributions.
'''
posteriorMeans = []
for child in closestChildren:
posterior = child.predict(x)
posteriorMeans.append(posterior.mean)
'''
TODO: It would be better to instead compute the weighted average of the
posterior distributions so we have access to variance as well.
'''
posteriorMeans = torch.stack(posteriorMeans)
#We need to be careful with this computation. If the covariances are very small, we may end up with a nan value here.
nonZeroDists = minDists[minDists>0.0]
#Address the case where we are predicting very far away from all models. Take unweighted mean of all predictions
if nonZeroDists.shape[-1]==0:
weights = 1.0/(powers+1.0)
weights = weights/torch.sum(weights)
else:
minDists = minDists
weights = minDists/torch.sum(minDists)
weightedAverageMean = torch.dot(weights,posteriorMeans.squeeze(-1).double()).float()
if individualPredictions:
return weightedAverageMean,posteriorMeans,weights,minDists
else:
return weightedAverageMean
class LocalGPChild(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, parent, inheritKernel=True, **kwargs):
#Track if the child was created by splitting
self.isSplittingChild = True if 'split' in kwargs and kwargs['split'] else False
#Handle prior likelihood
if 'priorLik' in kwargs and kwargs['priorLik'] is not None:
priorLik = kwargs['priorLik']
else:
#If no prior is provided, use the default of the parent
priorLik = parent.likelihood()
#In this case, we reset the isSplittingChild flag to false in order for the new likelihood to be trained
self.isSplittingChild = False
super(LocalGPChild, self).__init__(train_x, train_y, priorLik)
#Set to double mode
self.double()
self.likelihood.double()
self.parent = parent
if 'priorMean' in kwargs and kwargs['priorMean'] is not None:
#If given, take a prior for the mean. Used for splitting models.
self.mean_module = copy.deepcopy(kwargs['priorMean'])
else:
self.mean_module = parent.mean_module()
'''
If inheritKernel is set to True, then the same Kernel function (including the same hyperparameters)
will be used in all of the child models. Otherwise, a separate instance of the same kernel function
is used for each child model.
'''
if inheritKernel:
self.covar_module = parent.covar_module
else:
self.covar_module = parent.covar_module.__class__(ard_num_dims=train_x.shape[1] if train_x.dim()>1 else 1)
self.lastUpdated = True
'''
Compute the center as the mean of the training data
'''
self.center = torch.mean(train_x,dim=0)
if self.center.dim()==0:
self.center = self.center.unsqueeze(0)
self.train_x = train_x
self.train_y = train_y
self.trained = False
self.initTraining()
def forward(self,x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
def update(self,x,y):
#Sync covar
if self.parent.inheritKernel:
self.covar_module = self.parent.covar_module
#Update train_x, train_y
self.train_x = torch.cat([self.train_x, x])
self.train_y = torch.cat([self.train_y, y])
#Update the data which can be used for optimizing
self.train_inputs = (self.train_x,)
self.train_targets = self.train_y
#Flag the child as not having been trained.
self.trained = False
#Update center
self.center = torch.mean(self.train_x,dim=0)
if self.center.dim()==0:
self.center = self.center.unsqueeze(0)
return self
'''
Perform a rank-one update of the child model's inverse covariance matrix cache.
'''
def updateInvCovarCache(self,update=False):
lazy_covar = self.prediction_strategy.lik_train_train_covar
if is_in_cache(lazy_covar,"root_inv_decomposition"):
if update:
#Get the old cached inverse covar matrix
K_0inv = lazy_covar.root_inv_decomposition()
#Get the new covar matrix by calling the covar module on the training data
K = self.covar_module(self.train_x)
#Compute the update
Kinv = updateInverseCovarWoodbury(K_0inv, K)
#Store updated inverse covar matrix in cache
add_to_cache(lazy_covar, "root_inv_decomposition", RootLazyTensor(torch.sqrt(Kinv)))
else:
#This is a bit dirty, but here we will simply delete the root/root_inv from cache. This forces
#GPyTorch to recompute them.
lazy_covar._memoize_cache = {}
self.prediction_strategy._memoize_cache = {}
'''
Setup optimizer and perform initial training
'''
def initTraining(self):
#Switch to training mode
self.train()
self.likelihood.train()
#We only train on instantiation if the child model is not a result of a split
if not self.isSplittingChild:
#Setup optimizer
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.1)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self)
mll.double()
#Perform training iterations
training_iter = self.parent.training_iter
for i in range(training_iter):
self.optimizer.zero_grad()
output = self(self.train_x)
loss = -mll(output, self.train_y)
loss.backward()
self.optimizer.step()
self.trained = True
'''
Retrain model after new data is obtained
'''
def retrain(self):
#Switch to training mode
self.train()
self.likelihood.train()
mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self)
#Perform training iterations
training_iter = self.parent.training_iter
for i in range(training_iter):
self.optimizer.zero_grad()
output = self(self.train_x)
loss = -mll(output, self.train_y)
loss.backward()
self.optimizer.step()
self.trained = True
'''
Evaluate the child model to get the predictive posterior distribution
'''
def predict(self,x):
if not self.trained:
self.retrain()
#Switch to eval/prediction mode
self.eval()
self.likelihood.eval()
with torch.no_grad(), gpytorch.settings.fast_pred_var():
prediction = self.likelihood(self(x))
return prediction
|
[
"torch.mean",
"copy.deepcopy",
"UtilityFunctions.updateInverseCovarWoodbury",
"torch.stack",
"gpytorch.distributions.MultivariateNormal",
"gpytorch.mlls.ExactMarginalLogLikelihood",
"torch.sqrt",
"torch.cat",
"gpytorch.settings.fast_pred_var",
"torch.max",
"torch.zeros",
"torch.no_grad",
"torch.sum"
] |
[((7082, 7113), 'torch.stack', 'torch.stack', (['centersList'], {'dim': '(0)'}), '(centersList, dim=0)\n', (7093, 7113), False, 'import torch\n'), ((7921, 7949), 'torch.zeros', 'torch.zeros', (['distances.shape'], {}), '(distances.shape)\n', (7932, 7949), False, 'import torch\n'), ((15212, 15239), 'torch.stack', 'torch.stack', (['posteriorMeans'], {}), '(posteriorMeans)\n', (15223, 15239), False, 'import torch\n'), ((17908, 17934), 'torch.mean', 'torch.mean', (['train_x'], {'dim': '(0)'}), '(train_x, dim=0)\n', (17918, 17934), False, 'import torch\n'), ((18289, 18347), 'gpytorch.distributions.MultivariateNormal', 'gpytorch.distributions.MultivariateNormal', (['mean_x', 'covar_x'], {}), '(mean_x, covar_x)\n', (18330, 18347), False, 'import gpytorch\n'), ((18559, 18587), 'torch.cat', 'torch.cat', (['[self.train_x, x]'], {}), '([self.train_x, x])\n', (18568, 18587), False, 'import torch\n'), ((18611, 18639), 'torch.cat', 'torch.cat', (['[self.train_y, y]'], {}), '([self.train_y, y])\n', (18620, 18639), False, 'import torch\n'), ((18937, 18968), 'torch.mean', 'torch.mean', (['self.train_x'], {'dim': '(0)'}), '(self.train_x, dim=0)\n', (18947, 18968), False, 'import torch\n'), ((21362, 21425), 'gpytorch.mlls.ExactMarginalLogLikelihood', 'gpytorch.mlls.ExactMarginalLogLikelihood', (['self.likelihood', 'self'], {}), '(self.likelihood, self)\n', (21402, 21425), False, 'import gpytorch\n'), ((7500, 7523), 'torch.max', 'torch.max', (['distances', '(1)'], {}), '(distances, 1)\n', (7509, 7523), False, 'import torch\n'), ((7549, 7572), 'torch.max', 'torch.max', (['distances', '(0)'], {}), '(distances, 0)\n', (7558, 7572), False, 'import torch\n'), ((13019, 13063), 'torch.sum', 'torch.sum', (['(weights * mean_predictions)'], {'dim': '(1)'}), '(weights * mean_predictions, dim=1)\n', (13028, 13063), False, 'import torch\n'), ((13102, 13150), 'torch.sum', 'torch.sum', (['(weights ** 2 * var_predictions)'], {'dim': '(1)'}), '(weights ** 2 * var_predictions, dim=1)\n', (13111, 13150), False, 'import torch\n'), ((17162, 17196), 'copy.deepcopy', 'copy.deepcopy', (["kwargs['priorMean']"], {}), "(kwargs['priorMean'])\n", (17175, 17196), False, 'import copy\n'), ((20655, 20718), 'gpytorch.mlls.ExactMarginalLogLikelihood', 'gpytorch.mlls.ExactMarginalLogLikelihood', (['self.likelihood', 'self'], {}), '(self.likelihood, self)\n', (20695, 20718), False, 'import gpytorch\n'), ((22084, 22099), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22097, 22099), False, 'import torch\n'), ((22101, 22134), 'gpytorch.settings.fast_pred_var', 'gpytorch.settings.fast_pred_var', ([], {}), '()\n', (22132, 22134), False, 'import gpytorch\n'), ((9574, 9599), 'torch.stack', 'torch.stack', (['lengthscales'], {}), '(lengthscales)\n', (9585, 9599), False, 'import torch\n'), ((10023, 10052), 'torch.stack', 'torch.stack', (['mean_predictions'], {}), '(mean_predictions)\n', (10034, 10052), False, 'import torch\n'), ((10094, 10122), 'torch.stack', 'torch.stack', (['var_predictions'], {}), '(var_predictions)\n', (10105, 10122), False, 'import torch\n'), ((15649, 15667), 'torch.sum', 'torch.sum', (['weights'], {}), '(weights)\n', (15658, 15667), False, 'import torch\n'), ((15745, 15764), 'torch.sum', 'torch.sum', (['minDists'], {}), '(minDists)\n', (15754, 15764), False, 'import torch\n'), ((19707, 19744), 'UtilityFunctions.updateInverseCovarWoodbury', 'updateInverseCovarWoodbury', (['K_0inv', 'K'], {}), '(K_0inv, K)\n', (19733, 19744), False, 'from UtilityFunctions import updateInverseCovarWoodbury\n'), ((3860, 3926), 'torch.cat', 'torch.cat', (['[x[0:genNewModelIndices[0]], x[genNewModelIndices[0]:]]'], {}), '([x[0:genNewModelIndices[0]], x[genNewModelIndices[0]:]])\n', (3869, 3926), False, 'import torch\n'), ((3957, 4023), 'torch.cat', 'torch.cat', (['[y[0:genNewModelIndices[0]], y[genNewModelIndices[0]:]]'], {}), '([y[0:genNewModelIndices[0]], y[genNewModelIndices[0]:]])\n', (3966, 4023), False, 'import torch\n'), ((19888, 19904), 'torch.sqrt', 'torch.sqrt', (['Kinv'], {}), '(Kinv)\n', (19898, 19904), False, 'import torch\n'), ((12651, 12677), 'torch.sum', 'torch.sum', (['minDists'], {'dim': '(1)'}), '(minDists, dim=1)\n', (12660, 12677), False, 'import torch\n')]
|
from pathlib import Path
import environ
env = environ.Env(
# set casting, default value
DEBUG=(bool, False)
)
environ.Env.read_env()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'speechrecognition.apps.SpeechrecognitionConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'flags',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'smartteddydashboard.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
# TODO Use a more restrictive permission, because allow any can be insecure.
'rest_framework.permissions.AllowAny',
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_PARSER_CLASSES': [
'rest_framework.parsers.JSONParser',
]
}
WSGI_APPLICATION = 'smartteddydashboard.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# Feature toggles that are pre-defined
# Django-flags https://cfpb.github.io/django-flags/
FLAGS = {
'FLAG_WITH_EMPTY_CONDITIONS': [],
'FLAG_WITH_ANY_CONDITIONS': [],
'FLAG_WITH_REQUIRED_CONDITIONS': [],
}
|
[
"pathlib.Path",
"environ.Env.read_env",
"environ.Env"
] |
[((47, 79), 'environ.Env', 'environ.Env', ([], {'DEBUG': '(bool, False)'}), '(DEBUG=(bool, False))\n', (58, 79), False, 'import environ\n'), ((119, 141), 'environ.Env.read_env', 'environ.Env.read_env', ([], {}), '()\n', (139, 141), False, 'import environ\n'), ((219, 233), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (223, 233), False, 'from pathlib import Path\n')]
|
from typing import TYPE_CHECKING
import requests
if TYPE_CHECKING:
from undergen.lib.data import Character
url = "https://api.15.ai/app/getAudioFile5"
cdn_url = "https://cdn.15.ai/audio/"
headers = {'authority': 'api.15.ai',
'access-control-allow-origin': '*',
'accept': 'application/json, text/plain, */*',
'dnt': '1',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36',
'content-type': 'application/json;charset=UTF-8',
'sec-gpc': '1',
'origin': 'https://15.ai',
'sec-fetch-site': 'same-site',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://15.ai/',
'accept-language': 'en-US,en;q=0.9'}
def get_sound(character: "Character", text: str):
character_name = character.sound_name
emotion = "Normal"
print(f"Getting audio for {character_name} '{text}'...")
response = requests.post(url, json = {
"character": character_name,
"emotion": emotion,
"text": text
}, headers = headers)
if response.status_code != 200:
raise RuntimeError(f"15.ai responded with code {response.status_code}.")
data_json = response.json()
wav_name = data_json["wavNames"][0]
second_response = requests.get(cdn_url + wav_name)
if second_response.status_code != 200:
raise RuntimeError(f"15.ai CDN responded with code {second_response.status_code}.")
print("Audio success!")
return second_response.content
|
[
"requests.post",
"requests.get"
] |
[((1019, 1128), 'requests.post', 'requests.post', (['url'], {'json': "{'character': character_name, 'emotion': emotion, 'text': text}", 'headers': 'headers'}), "(url, json={'character': character_name, 'emotion': emotion,\n 'text': text}, headers=headers)\n", (1032, 1128), False, 'import requests\n'), ((1373, 1405), 'requests.get', 'requests.get', (['(cdn_url + wav_name)'], {}), '(cdn_url + wav_name)\n', (1385, 1405), False, 'import requests\n')]
|
from django.contrib.auth import get_user_model
from rest_framework import serializers
from apps.basics.op_drf.serializers import CustomModelSerializer
from apps.projects.efficiency.models import Efficiency
from apps.projects.efficiency.models import Module
UserProfile = get_user_model()
class EfficiencySerializer(CustomModelSerializer):
"""
简单菜单序列化器
"""
class Meta:
model = Efficiency
# fields = '__all__'
exclude = ('description', 'creator', 'modifier')
class ModuleSerializer(CustomModelSerializer):
"""
模块管理 简单序列化器
"""
parentId = serializers.IntegerField(source="parentId.id", default=0)
class Meta:
model = Module
exclude = ('description', 'creator', 'modifier')
class ModuleCreateUpdateSerializer(CustomModelSerializer):
"""
模块管理 创建/更新时的列化器
"""
def validate(self, attrs: dict):
return super().validate(attrs)
class Meta:
model = Module
fields = '__all__'
class ModuleTreeSerializer(serializers.ModelSerializer):
"""
模块树形架构序列化器:递归序列化所有深度的子模块
"""
label = serializers.CharField(source='name', default='')
parentId = serializers.IntegerField(source="parentId.id", default=0)
class Meta:
model = Module
fields = ('id', 'label', 'parentId', 'status')
|
[
"rest_framework.serializers.IntegerField",
"django.contrib.auth.get_user_model",
"rest_framework.serializers.CharField"
] |
[((274, 290), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (288, 290), False, 'from django.contrib.auth import get_user_model\n'), ((598, 655), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'source': '"""parentId.id"""', 'default': '(0)'}), "(source='parentId.id', default=0)\n", (622, 655), False, 'from rest_framework import serializers\n'), ((1110, 1158), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""name"""', 'default': '""""""'}), "(source='name', default='')\n", (1131, 1158), False, 'from rest_framework import serializers\n'), ((1174, 1231), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'source': '"""parentId.id"""', 'default': '(0)'}), "(source='parentId.id', default=0)\n", (1198, 1231), False, 'from rest_framework import serializers\n')]
|
"""
This file stores all the possible configurations for the Flask app.
Changing configurations like the secret key or the database
url should be stored as environment variables and imported
using the 'os' library in Python.
"""
import os
class BaseConfig:
SQLALCHEMY_TRACK_MODIFICATIONS = False
SSL = os.getenv('POSTGRESQL_SSL', True)
if isinstance(SSL, str):
SSL = SSL.lower() in ['true', '1', 'yes', "t"]
DATABASE = os.getenv('POSTGRESQL_DATABASE', 'postgres')
HOST = os.getenv('POSTGRESQL_HOST', 'localhost')
PORT = os.getenv('POSTGRESQL_PORT', 5432 )
USERNAME = os.getenv('POSTGRESQL_USERNAME', 'root')
PASSWORD = os.getenv('POSTGRESQL_PASSWORD', '<PASSWORD>')
COLLECT_METRICS_INTERVAL_SEC = int(
os.getenv('COLLECT_METRICS_INTERVAL_SEC', 120))
DEBUG = False
TESTING = False
class TestingConfig(BaseConfig):
DEBUG = True
TESTING = True
class DevelopmentConfig(BaseConfig):
DEBUG = True
class ProductionConfig(BaseConfig):
DEBUG = False
|
[
"os.getenv"
] |
[((313, 346), 'os.getenv', 'os.getenv', (['"""POSTGRESQL_SSL"""', '(True)'], {}), "('POSTGRESQL_SSL', True)\n", (322, 346), False, 'import os\n'), ((446, 490), 'os.getenv', 'os.getenv', (['"""POSTGRESQL_DATABASE"""', '"""postgres"""'], {}), "('POSTGRESQL_DATABASE', 'postgres')\n", (455, 490), False, 'import os\n'), ((502, 543), 'os.getenv', 'os.getenv', (['"""POSTGRESQL_HOST"""', '"""localhost"""'], {}), "('POSTGRESQL_HOST', 'localhost')\n", (511, 543), False, 'import os\n'), ((555, 589), 'os.getenv', 'os.getenv', (['"""POSTGRESQL_PORT"""', '(5432)'], {}), "('POSTGRESQL_PORT', 5432)\n", (564, 589), False, 'import os\n'), ((606, 646), 'os.getenv', 'os.getenv', (['"""POSTGRESQL_USERNAME"""', '"""root"""'], {}), "('POSTGRESQL_USERNAME', 'root')\n", (615, 646), False, 'import os\n'), ((662, 708), 'os.getenv', 'os.getenv', (['"""POSTGRESQL_PASSWORD"""', '"""<PASSWORD>"""'], {}), "('POSTGRESQL_PASSWORD', '<PASSWORD>')\n", (671, 708), False, 'import os\n'), ((757, 803), 'os.getenv', 'os.getenv', (['"""COLLECT_METRICS_INTERVAL_SEC"""', '(120)'], {}), "('COLLECT_METRICS_INTERVAL_SEC', 120)\n", (766, 803), False, 'import os\n')]
|
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import configparser
import env
from envs.seoul_env import SeoulEnv, SeoulController
import numpy as np
import matplotlib
ilds_map ={'1_l', '1_r', '2_l', '2_r', '3_u', '3_d'}
class SeoulConuterEnv(SeoulEnv):
def __init__(self, config, port=0, output_path='', is_record=False, record_stat=False, sumo_config=None):
self.sumo_config = sumo_config
self.ilds_map = ilds_map
self.counts_map = dict()
self.vehicles_in_lane = dict()
for edge in set(self.ilds_map.values()):
self.vehicles_in_lane[edge] = list()
super().__init__(config, output_path, is_record, record_stat, port=port)
def _init_sim_config(self, seed=None):
return self.sumo_config
def step(self, action):
self.count()
super().step(action)
def count(self):
for ilds in self.ilds_map.keys():
vid = self.sim.lanearea.getLastStepVehicleIDs(ild)
class TrafficCounter:
def __init__(self, config, base_dir, sumo_config):
self.config = config
self.base_dir = base_dir
self.sumo_config = sumo_config
if not os.path.exists(self.base_dir):
os.mkdir(self.base_dir)
self.env = SeoulCounterEnv(self.config['ENV_CONFIG'], 2,self.base_dir, is_record=True, record_stat=True, sumo_config=self.sumo_config)
self.ob = env.reset()
self.controller = SeoulController(self.env.node_names, self.env.nodes)
def exploreGreedy(self):
while True:
it += 1
next_ob, _, done, reward = self.env.step(self.controller.forward(self.ob))
if done:
break
self.ob = next_ob
self.env.terminate()
def run(self):
self.exploreGreedy()
if __name__ == '__main__':
config = configparser.ConfigParser()
config.read('./config/config_greedy_seoul.ini')
base_dir = './output_result/'
counter = TrafficCounter(config, base_dir)
counter.run()
|
[
"env.reset",
"os.mkdir",
"configparser.ConfigParser",
"os.path.dirname",
"os.path.exists",
"envs.seoul_env.SeoulController"
] |
[((1956, 1983), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1981, 1983), False, 'import configparser\n'), ((1444, 1455), 'env.reset', 'env.reset', ([], {}), '()\n', (1453, 1455), False, 'import env\n'), ((1482, 1534), 'envs.seoul_env.SeoulController', 'SeoulController', (['self.env.node_names', 'self.env.nodes'], {}), '(self.env.node_names, self.env.nodes)\n', (1497, 1534), False, 'from envs.seoul_env import SeoulEnv, SeoulController\n'), ((63, 88), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (78, 88), False, 'import os, sys\n'), ((1216, 1245), 'os.path.exists', 'os.path.exists', (['self.base_dir'], {}), '(self.base_dir)\n', (1230, 1245), False, 'import os, sys\n'), ((1259, 1282), 'os.mkdir', 'os.mkdir', (['self.base_dir'], {}), '(self.base_dir)\n', (1267, 1282), False, 'import os, sys\n')]
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
def is_none(array, axis=0, highlevel=True, behavior=None):
raise NotImplementedError
# """
# Args:
# array: Data to check for missing values (None).
# axis (int): The dimension at which this operation is applied. The
# outermost dimension is `0`, followed by `1`, etc., and negative
# values count backward from the innermost: `-1` is the innermost
# dimension, `-2` is the next level up, etc.
# highlevel (bool): If True, return an #ak.Array; otherwise, return
# a low-level #ak.layout.Content subclass.
# behavior (None or dict): Custom #ak.behavior for the output array, if
# high-level.
# Returns an array whose value is True where an element of `array` is None;
# False otherwise (at a given `axis` depth).
# """
# def getfunction(layout, depth, posaxis):
# posaxis = layout.axis_wrap_if_negative(posaxis)
# if posaxis == depth - 1:
# nplike = ak.nplike.of(layout)
# if isinstance(layout, ak._v2._util.optiontypes):
# return lambda: ak._v2.contents.NumpyArray(
# nplike.asarray(layout.bytemask()).view(np.bool_)
# )
# elif isinstance(
# layout,
# (
# ak._v2._util.unknowntypes,
# ak._v2._util.listtypes,
# ak._v2._util.recordtypes,
# ak._v2.contents.NumpyArray,
# ),
# ):
# return lambda: ak._v2.contents.NumpyArray(
# nplike.zeros(len(layout), dtype=np.bool_)
# )
# else:
# return posaxis
# else:
# return posaxis
# layout = ak._v2.operations.convert.to_layout(array)
# out = ak._v2._util.recursively_apply(
# layout, getfunction, pass_depth=True, pass_user=True, user=axis
# )
# return ak._v2._util.maybe_wrap_like(out, array, behavior, highlevel)
|
[
"awkward.nplike.NumpyMetadata.instance"
] |
[((156, 190), 'awkward.nplike.NumpyMetadata.instance', 'ak.nplike.NumpyMetadata.instance', ([], {}), '()\n', (188, 190), True, 'import awkward as ak\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from nparser.neural.recur_cells.base_cell import BaseCell
from nparser.neural.linalg import linear
#***************************************************************
class RNNCell(BaseCell):
""" """
#=============================================================
def __call__(self, inputs, state, scope=None):
""" """
with tf.variable_scope(scope or type(self).__name__):
inputs_list = [inputs, state]
hidden_act = linear(inputs_list,
self.output_size,
add_bias=True,
moving_params=self.moving_params)
hidden = self.recur_func(hidden_act)
return hidden, hidden
#=============================================================
@property
def state_size(self):
return self.output_size
|
[
"nparser.neural.linalg.linear"
] |
[((1097, 1188), 'nparser.neural.linalg.linear', 'linear', (['inputs_list', 'self.output_size'], {'add_bias': '(True)', 'moving_params': 'self.moving_params'}), '(inputs_list, self.output_size, add_bias=True, moving_params=self.\n moving_params)\n', (1103, 1188), False, 'from nparser.neural.linalg import linear\n')]
|
import arrow
import json
import requests
def kanban_webhook(event, context):
input_body = json.loads(event['body'])
print(event['body'])
action = input_body["action"]
action_type = action["type"]
if action_type == "createCard":
list_name, card_name = get_create_card(action["data"])
elif action_type == "updateCard":
list_name, card_name = get_update_card(action["data"])
kanban_list = ["DOING", "BREAK", "DONE"]
if list_name in kanban_list:
payload = make_payload(action=list_name, msg=card_name)
r = send_to_kino({"text": payload})
response = {
"statusCode": r.status_code
}
response = {
"statusCode": 400
}
return response
def get_create_card(action_data):
list_name = action_data["list"]["name"].upper()
card_name = action_data["card"]["name"]
return list_name, card_name
def get_update_card(action_data):
list_name = action_data["listAfter"]["name"].upper()
card_name = action_data["card"]["name"]
return list_name, card_name
def make_payload(action=None, msg=None, time=None):
if time is None:
now = arrow.now()
time = now.format(" MMMM d, YYYY") + " at " + now.format("HH:mmA")
payload = {
"action": "KANBAN_" + action,
"msg": msg,
"time": time
}
return json.dumps(payload)
def send_to_kino(data):
return requests.post("https://hooks.slack.com/services/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", data=json.dumps(data))
|
[
"arrow.now",
"json.loads",
"json.dumps"
] |
[((97, 122), 'json.loads', 'json.loads', (["event['body']"], {}), "(event['body'])\n", (107, 122), False, 'import json\n'), ((1364, 1383), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (1374, 1383), False, 'import json\n'), ((1165, 1176), 'arrow.now', 'arrow.now', ([], {}), '()\n', (1174, 1176), False, 'import arrow\n'), ((1520, 1536), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1530, 1536), False, 'import json\n')]
|
# Generated by Django 3.2.6 on 2022-02-06 17:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0008_auto_20220202_1858'),
]
operations = [
migrations.RemoveField(
model_name='student',
name='activities',
),
migrations.AddField(
model_name='student',
name='activities',
field=models.ManyToManyField(blank=True, null=True, related_name='fils', to='home.Activity'),
),
]
|
[
"django.db.migrations.RemoveField",
"django.db.models.ManyToManyField"
] |
[((232, 295), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""student"""', 'name': '"""activities"""'}), "(model_name='student', name='activities')\n", (254, 295), False, 'from django.db import migrations, models\n'), ((444, 535), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'null': '(True)', 'related_name': '"""fils"""', 'to': '"""home.Activity"""'}), "(blank=True, null=True, related_name='fils', to=\n 'home.Activity')\n", (466, 535), False, 'from django.db import migrations, models\n')]
|
import pathlib
bib = pathlib.Path(__file__).parent.absolute() / 'bibliography.bib'
del(pathlib)
|
[
"pathlib.Path"
] |
[((22, 44), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (34, 44), False, 'import pathlib\n')]
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import connexion
import json
import traceback
from swagger_server.models import ApiCatalogUploadError
from swagger_server.models.api_catalog_upload import ApiCatalogUpload # noqa: E501
from swagger_server.models.api_catalog_upload_response import ApiCatalogUploadResponse
from swagger_server.models.api_catalog_upload_item import ApiCatalogUploadItem
from swagger_server.models.api_list_catalog_items_response import ApiListCatalogItemsResponse # noqa: E501
from swagger_server.controllers_impl.component_service_controller_impl import list_components, upload_component_from_url
from swagger_server.controllers_impl.dataset_service_controller_impl import list_datasets, upload_dataset_from_url
from swagger_server.controllers_impl.model_service_controller_impl import list_models, upload_model_from_url
from swagger_server.controllers_impl.notebook_service_controller_impl import list_notebooks, upload_notebook_from_url
from swagger_server.controllers_impl.pipeline_service_controller_impl import list_pipelines, upload_pipeline_from_url
from swagger_server.util import ApiError
def list_all_assets(page_token=None, page_size=None, sort_by=None, filter=None): # noqa: E501
"""list_all_assets
:param page_token:
:type page_token: str
:param page_size:
:type page_size: int
:param sort_by: Can be format of \"field_name\", \"field_name asc\" or \"field_name desc\" Ascending by default.
:type sort_by: str
:param filter: A string-serialized JSON dictionary with key-value pairs that correspond to the ApiComponent's attribute names and their respective values to be filtered for.
:type filter: str
:rtype: ApiListCatalogItemsResponse
"""
if page_size == 0:
return {}, 204
# TODO: do not mis-use page_token as MySQL result offset
offset = int(page_token) if page_token and page_token.isdigit() else 0
if page_size or page_token:
print(f"WARNING: page_size and page_token are not implemented on {__file__}#list_all_assets()")
list_methods = {
"components": list_components,
"datasets": list_datasets,
"models": list_models,
"notebooks": list_notebooks,
"pipelines": list_pipelines
}
api_response = ApiListCatalogItemsResponse(
components=[], datasets=[], models=[], notebooks=[], pipelines=[],
total_size=0)
for asset_type, list_method in list_methods.items():
asset_list_response, status = list_method(filter=filter, sort_by=sort_by)
if 200 <= status < 300:
asset_list = asset_list_response.__getattribute__(asset_type)
api_response.__getattribute__(asset_type).extend(asset_list)
# TODO: return filtered size or total number of all assets
# api_response.total_size += asset_list_response.total_size
api_response.total_size += len(asset_list)
return api_response, 200
def upload_multiple_assets(body: ApiCatalogUpload): # noqa: E501
"""upload_multiple_assets
:param body:
:type body: ApiCatalogUpload
:rtype: ApiCatalogUploadResponse
"""
if connexion.request.is_json:
body = ApiCatalogUpload.from_dict(connexion.request.get_json()) # noqa: E501
def get_access_token_for_url(url: str) -> str:
for api_access_token in body.api_access_tokens or []:
if api_access_token.url_host in url:
return api_access_token.api_token
return None
upload_methods = {
"components": upload_component_from_url,
"datasets": upload_dataset_from_url,
"models": upload_model_from_url,
"notebooks": upload_notebook_from_url,
"pipelines": upload_pipeline_from_url
}
api_response = ApiCatalogUploadResponse(
components=[], datasets=[], models=[], notebooks=[], pipelines=[],
total_created=0, errors=[], total_errors=0)
for asset_type, upload_method in upload_methods.items():
for asset in body.__getattribute__(asset_type) or []:
try:
api_object, status = upload_method(
url=asset.url, name=asset.name,
access_token=get_access_token_for_url(asset.url))
if 200 <= status < 300:
api_response.__getattribute__(asset_type).append(api_object)
api_response.total_created += 1
else:
# TODO: remove this?
api_error = ApiCatalogUploadError(**asset.to_dict(),
error_message=f"THIS SHOULD NOT HAPPEN: {str(api_object).strip()}",
status_code=500)
api_response.errors.append(api_error)
print(f"THIS SHOULD NOT HAPPEN: {api_error}")
print(traceback.format_exc())
except ApiError as e:
api_error = ApiCatalogUploadError(**asset.to_dict(),
error_message=e.message,
status_code=e.http_status_code)
api_response.errors.append(api_error)
except Exception as e:
api_error = ApiCatalogUploadError(**asset.to_dict(),
error_message=str(e),
status_code=500)
api_response.errors.append(api_error)
print(traceback.format_exc())
api_response.total_errors = len(api_response.errors)
response_status = \
201 if api_response.total_created > 0 and api_response.total_errors == 0 else \
207 if api_response.total_created > 0 and api_response.total_errors > 0 else \
max([e.status_code for e in api_response.errors])
return api_response, response_status
|
[
"connexion.request.get_json",
"swagger_server.models.api_catalog_upload_response.ApiCatalogUploadResponse",
"traceback.format_exc",
"swagger_server.models.api_list_catalog_items_response.ApiListCatalogItemsResponse"
] |
[((2853, 2965), 'swagger_server.models.api_list_catalog_items_response.ApiListCatalogItemsResponse', 'ApiListCatalogItemsResponse', ([], {'components': '[]', 'datasets': '[]', 'models': '[]', 'notebooks': '[]', 'pipelines': '[]', 'total_size': '(0)'}), '(components=[], datasets=[], models=[],\n notebooks=[], pipelines=[], total_size=0)\n', (2880, 2965), False, 'from swagger_server.models.api_list_catalog_items_response import ApiListCatalogItemsResponse\n'), ((4356, 4496), 'swagger_server.models.api_catalog_upload_response.ApiCatalogUploadResponse', 'ApiCatalogUploadResponse', ([], {'components': '[]', 'datasets': '[]', 'models': '[]', 'notebooks': '[]', 'pipelines': '[]', 'total_created': '(0)', 'errors': '[]', 'total_errors': '(0)'}), '(components=[], datasets=[], models=[], notebooks=[\n ], pipelines=[], total_created=0, errors=[], total_errors=0)\n', (4380, 4496), False, 'from swagger_server.models.api_catalog_upload_response import ApiCatalogUploadResponse\n'), ((3801, 3829), 'connexion.request.get_json', 'connexion.request.get_json', ([], {}), '()\n', (3827, 3829), False, 'import connexion\n'), ((5476, 5498), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (5496, 5498), False, 'import traceback\n'), ((6135, 6157), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6155, 6157), False, 'import traceback\n')]
|
"""Training GCMC model on the MovieLens data set.
The script loads the full graph to the training device.
"""
import os, time
import argparse
import logging
import random
import string
import dgl
import scipy.sparse as sp
import pandas as pd
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from data import DataSetLoader
#from data_custom import DataSetLoader
from model import BiDecoder, GCMCLayer, MLPDecoder
from utils import get_activation, get_optimizer, torch_total_param_num, torch_net_info, MetricLogger
from utils import to_etype_name
from sklearn.metrics import ndcg_score
#f1 = open(os.path.join(DATA_ROOT, 'EHCF.txt'), 'w')
def sample_negative(ratings, sample_rate, item_set):
"""
input:
1. training rating ::pd.frame
2. sample number::int
3. item_set:a set of item::set
"""
#"""return all negative items & 100 sampled negative items"""
interact_status = ratings.groupby('user_id')['movie_id'].apply(set).reset_index().rename(columns={'itemId': 'interacted_items'})
#print(interact_status)
#item_list = set(item_list)
interact_status['negative_items'] = interact_status['movie_id'].apply(lambda x: item_set - x)
#print(interact_status['negative_items'])
interact_status['negative_samples'] = interact_status['negative_items'].apply(lambda x: random.sample(x, sample_rate))
return interact_status[['user_id', 'negative_items', 'negative_samples']]
def generate_pair(user_list, num_movie):
# 输入user_list num_movie
# num_movie 是电影的总数
rating_pairs = (np.array(np.array([[ele] * num_movie for ele in user_list]).flatten(),
dtype=np.int64),
np.array(np.array([[np.arange(num_movie)] * len(user_list)]).flatten(),
dtype=np.int64))
return rating_pairs
def generate_dec_graph(rating_pairs, num_user, num_movie):
#print(rating_pairs)
#print("***:",len(rating_pairs), num_user, num_movie)
ones = np.ones_like(rating_pairs[0])
user_movie_ratings_coo = sp.coo_matrix(
(ones, rating_pairs),
shape=(num_user, num_movie), dtype=np.float32)
g = dgl.bipartite_from_scipy(user_movie_ratings_coo, utype='_U', etype='_E', vtype='_V')
return dgl.heterograph({('user', 'rate', 'movie'): g.edges()},
num_nodes_dict={'user': num_user, 'movie': num_movie})
class Net(nn.Module):
def __init__(self, args):
super(Net, self).__init__()
self._act = get_activation(args.model_activation)
self.encoder = nn.ModuleList()
self.encoder.append(GCMCLayer(args.rating_vals,
args.src_in_units,
args.dst_in_units,
args.gcn_agg_units,
args.gcn_out_units,
args.gcn_dropout,
args.gcn_agg_accum,
agg_act=self._act,
share_user_item_param=args.share_param,
device=args.device))
self.gcn_agg_accum = args.gcn_agg_accum
self.rating_vals = args.rating_vals
self.device = args.device
self.gcn_agg_units = args.gcn_agg_units
self.src_in_units = args.src_in_units
for i in range(1, args.layers):
if args.gcn_agg_accum == 'stack':
gcn_out_units = args.gcn_out_units * len(args.rating_vals)
else:
gcn_out_units = args.gcn_out_units
self.encoder.append(GCMCLayer(args.rating_vals,
args.gcn_out_units,
args.gcn_out_units,
gcn_out_units,
args.gcn_out_units,
args.gcn_dropout - i*0.1,
args.gcn_agg_accum,
agg_act=self._act,
share_user_item_param=args.share_param,
ini = False,
device=args.device))
if args.decoder == "Bi":
self.decoder = BiDecoder(in_units= args.gcn_out_units, #* args.layers,
num_classes=len(args.rating_vals),
num_basis=args.gen_r_num_basis_func)
'''
self.decoder2 = MLPDecoder(in_units= args.gcn_out_units * 2,
num_classes=len(args.rating_vals),
num_basis=args.gen_r_num_basis_func)
'''
elif args.decoder == "MLP":
if args.loss_func == "CE":
num_classes = len(args.rating_vals)
else:
num_classes = 1
self.decoder = MLPDecoder(in_units= args.gcn_out_units * args.layers,
num_classes=num_classes,
num_basis=args.gen_r_num_basis_func)
self.rating_vals = args.rating_vals
def forward(self, enc_graph, dec_graph, ufeat, ifeat, Two_Stage = False):
user_out = []
movie_out = []
for i in range(0, args.layers):
user_o, movie_o = self.encoder[i](
enc_graph,
ufeat,
ifeat,
Two_Stage)
if i == 0:
user_out = user_o
movie_out = movie_o
else:
user_out += user_o / float(i + 1)
movie_out += movie_o /float(i + 1)
#user_out.append(user_o)
#movie_out.append(movie_o)
ufeat = user_o
ifeat = movie_o
#pred_ratings = self.decoder2(dec_graph, th.cat([user_out[0], user_out[1]], 1), th.cat([movie_out[1], movie_out[0]], 1))
#user_out = th.cat(user_out, 1)
#movie_out = th.cat(movie_out, 1)
#print("user_out:", user_out[0])
#print("movie_out:", movie_out[0])
pred_ratings = self.decoder(dec_graph, user_out, movie_out)
W_r_last = None
reg_loss = 0.0
'''
for rating in self.rating_vals:
rating = to_etype_name(rating)
if W_r_last is not None:
reg_loss += th.sum((self.encoder[0].W_r[rating] - W_r_last)**2)
W_r_last = self.encoder[0].W_r[rating]
#W_r_last_2 = self.encoder_2.W_r[rating]
'''
W = th.matmul(self.encoder[0].att, self.encoder[0].basis.view(self.encoder[0].basis_units, -1))
W = W.view(len(self.rating_vals), self.src_in_units, -1)
for i, rating in enumerate(self.rating_vals):
rating = to_etype_name(rating)
if i != 0:
reg_loss += -th.sum(th.cosine_similarity(W[i,:,:], W[i-1,:,:], dim=1))
return pred_ratings, reg_loss, user_out, movie_out, W
def train(args):
print(args)
dataset = DataSetLoader(args.data_name, args.device,
use_one_hot_fea=args.use_one_hot_fea,
symm=args.gcn_agg_norm_symm,
test_ratio=args.data_test_ratio,
valid_ratio=args.data_valid_ratio,
sample_rate = args.sample_rate)
print("Loading data finished ...\n")
args.src_in_units = dataset.user_feature_shape[1]
args.dst_in_units = dataset.movie_feature_shape[1]
args.rating_vals = dataset.possible_rating_values
### build the net
net = Net(args=args)
net = net.to(args.device)
nd_possible_rating_values = th.FloatTensor(dataset.possible_rating_values).to(args.device)
rating_loss_net = nn.CrossEntropyLoss()
learning_rate = args.train_lr
optimizer = get_optimizer(args.train_optimizer)(net.parameters(), lr=learning_rate)
print("Loading network finished ...\n")
### perpare training data
train_gt_labels = dataset.train_labels
train_gt_ratings = dataset.train_truths
### prepare the logger
NDCG_logger = MetricLogger(['recall50', 'recall100', 'recall200','ndcg50', 'ndcg100', 'ndcg200'], ['%.4f', '%.4f', '%.4f','%.4f', '%.4f', '%.4f'], os.path.join(args.save_dir, 'NDCG.csv'))
### declare the loss information
best_valid_rmse = np.inf
best_valid_ndcg = -np.inf
best_test_ndcg = []
no_better_valid = 0
best_iter = -1
count_rmse = 0
count_num = 0
count_loss = 0
dataset.train_enc_graph = dataset.train_enc_graph.int().to(args.device)
dataset.train_dec_graph = dataset.train_dec_graph.int().to(args.device)
dataset.valid_enc_graph = dataset.train_enc_graph
dataset.valid_dec_graph = dataset.valid_dec_graph.int().to(args.device)
dataset.test_enc_graph = dataset.test_enc_graph.int().to(args.device)
dataset.test_dec_graph = dataset.test_dec_graph.int().to(args.device)
train_m = dataset.train_m
test_m = dataset.test_m
tset = dataset.tset
user_num ,item_num = train_m.shape[0], train_m.shape[1]
#dataset.valid_recall_dec_graph = dataset.valid_recall_dec_graph.to(args.device)
#dataset.test_recall_dec_graph = dataset.test_recall_dec_graph.to(args.device)
print("Start training ...")
train_rating_pairs, train_rating_values = dataset._generate_pair_value(dataset.train_rating_info)
def update_encode_graph(dataset, train_rating_pairs, train_rating_values, sampled_data):
train_rating_pairs_zeros, train_rating_values_zeros = dataset._generate_pair_value_for_zero(dataset.train_rating_info, sampled_data)
train_rating_pairs = (np.append(train_rating_pairs[0], train_rating_pairs_zeros[0]), np.append(train_rating_pairs[1], train_rating_pairs_zeros[1]))
train_rating_values = np.append(train_rating_values, train_rating_values_zeros)
dataset.train_enc_graph = dataset._generate_enc_graph(train_rating_pairs, train_rating_values, add_support = True)
dataset.train_enc_graph = dataset.train_enc_graph.int().to(args.device)
dataset.valid_enc_graph = dataset.train_enc_graph
return dataset.train_enc_graph
def sample_data(interact_status, random_number, sample_rate):
random.seed(random_number)
interact_status['negative_samples'] = interact_status['negative_items'].apply(lambda x: random.sample(x, sample_rate))
return interact_status[['user_id', 'negative_items', 'negative_samples']]
seed_list = np.random.randint(0, 10000, (args.train_max_iter,))
Two_Stage = False
#sampled_data = sample_data(negitive_all, random_number = seed_list[iter_idx], sample_rate = 3)
negitive_all = dataset.negative_all(dataset.train_rating_info)
sampled_data = sample_data(negitive_all, random_number = 1, sample_rate = 99)
dataset.train_enc_graph = update_encode_graph(dataset, train_rating_pairs, train_rating_values, sampled_data)
dataset.valid_enc_graph = dataset.train_enc_graph
for iter_idx in range(1, args.train_max_iter):
#sampled_data = sample_data(negitive_all, random_number = 1, sample_rate = 3)
#dataset.train_enc_graph = update_encode_graph(dataset, train_rating_pairs, train_rating_values, sampled_data)
print("iter:",iter_idx)
net.train()
pred_ratings, reg_loss, user_out, movie_out, W = net(dataset.train_enc_graph, dataset.train_dec_graph,
dataset.user_feature, dataset.movie_feature, Two_Stage)
loss = rating_loss_net(pred_ratings, train_gt_labels).mean() + args.ARR * reg_loss
count_loss += loss.item()
optimizer.zero_grad()
loss.backward(retain_graph=True)
nn.utils.clip_grad_norm_(net.parameters(), args.train_grad_clip)
optimizer.step()
real_pred_ratings = (th.softmax(pred_ratings, dim=1) * nd_possible_rating_values.view(1, -1)).sum(dim=1)
#print(real_pred_ratings.shape)
# 对pred的
if iter_idx < 100:
if iter_idx % 10 == 0:
recall50_, recall100_, recall200_, ndcg50_, ndcg100_, ndcg200_ = \
dev_step(tset, train_m, test_m, net, dataset, args, nd_possible_rating_values)
#dev_cold(u_train,i_train, tset, train_m, test_m)
NDCG_logger.log(recall50 = recall50_, recall100 = recall100_, recall200 = recall200_, ndcg50 = ndcg50_, ndcg100 = ndcg100_, ndcg200 = ndcg200_)
if iter_idx >= 500:
recall50, recall100, recall200, ndcg50, ndcg100, ndcg200 = \
dev_step(tset, train_m, test_m, net, dataset, args ,nd_possible_rating_values)
NDCG_logger.log(recall50 = recall50_, recall100 = recall100_, recall200 = recall200_, ndcg50 = ndcg50_, ndcg100 = ndcg100_, ndcg200 = ndcg200_)
#dev_cold(u_train,i_train, tset, train_m, test_m)
NDCG_logger.close()
def dev_step(tset, train_m, test_m, net, dataset, args, nd_possible_rating_values):
"""
Evaluates model on a dev set
"""
batch_size = 128
#print("tset:",tset)
user_te = np.array(list(tset.keys()))
#print("user_te:",user_te)
user_te2 = user_te[:, np.newaxis]
#user_te2 = user_te
ll = int(len(user_te) / batch_size) + 1
recall50 = []
recall100 = []
recall200 = []
ndcg50 = []
ndcg100 = []
ndcg200 = []
for batch_num in range(ll):
print(batch_num/ll*100,"%")
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, len(user_te))
# u_batch 是每个batch中的一个对user的一个list
u_batch = user_te2[start_index:end_index]
# batch_users 是这个batch中user的个数
batch_users = end_index - start_index
num_user = train_m.shape[0]#user总数
num_movie = train_m.shape[1]#item总数
user_list = user_te[start_index:end_index]
batch_rating_pairs = generate_pair(user_list, num_movie)
batch_dec_graph = generate_dec_graph(batch_rating_pairs, num_user, num_movie).to(args.device)
Two_Stage = False
pred_ratings, reg_loss, user_out, movie_out, W = net(dataset.train_enc_graph, batch_dec_graph, dataset.user_feature, dataset.movie_feature, Two_Stage)
real_pred_ratings = (th.softmax(pred_ratings, dim=1) * nd_possible_rating_values.view(1, -1)).sum(dim=1)
u_b = user_te[start_index:end_index]
real_pred_ratings = real_pred_ratings.cpu()
#print("pred_shape:", real_pred_ratings.shape)
pre = real_pred_ratings.reshape(batch_users, -1)
#print("pred_shape:", pre.shape)
#pre = np.reshape(real_pred_ratings, (batch_users, num_movie))
pre = pre.detach().numpy()
idx = np.zeros_like(pre, dtype=bool)
idx[train_m[u_b].nonzero()] = True
pre[idx] = -np.inf
recall = []
for kj in [50, 100, 200]:
idx_topk_part = np.argpartition(-pre, kj, 1)
# print pre[np.arange(batch_users)[:, np.newaxis], idx_topk_part[:, :kj]]
# print idx_topk_part
pre_bin = np.zeros_like(pre, dtype=bool)
pre_bin[np.arange(batch_users)[:, np.newaxis], idx_topk_part[:, :kj]] = True
# print pre_bin
true_bin = np.zeros_like(pre, dtype=bool)
true_bin[test_m[u_b].nonzero()] = True
tmp = (np.logical_and(true_bin, pre_bin).sum(axis=1)).astype(np.float32)
#print("tmp:",tmp)
recall.append(tmp / np.minimum(kj, true_bin.sum(axis=1)))
#print("recall:",tmp / np.minimum(kj, true_bin.sum(axis=1)))
# print tmp
#print("recall:",recall)
ndcg = []
for kj in [20, 40, 80]:
# 获取前20个元素的大致序号
idx_topk_part = np.argpartition(-pre, kj, 1)
#print("pre:",pre.shape)
#
#print("idx_topk_part[:, :kj]:",idx_topk_part[:, :kj])
#获取每个用户对应的前20个预测的index
topk_part = pre[np.arange(batch_users)[:, np.newaxis], idx_topk_part[:, :kj]]
#print("topk_part:",topk_part[0:2])
idx_part = np.argsort(-topk_part, axis=1)
# 将预测分数进行排序,从大到校输出index的值
#print("idx_part:",idx_part[0:2])
idx_topk = idx_topk_part[np.arange(end_index - start_index)[:, np.newaxis], idx_part]
# 得到原来的序列中的对应index
#print("idx_topk:",idx_topk[0:2])
tp = np.log(2) / np.log(np.arange(2, kj + 2))
test_batch = test_m[u_b]
#print("test_batch:",test_batch)
DCG = (test_batch[np.arange(batch_users)[:, np.newaxis], idx_topk].toarray() * tp).sum(axis=1)
# 就只计算真实结果在预测结果中的第几号的dcg
#print("tp:",tp)
#print("DCG:",DCG)
IDCG = np.array([(tp[:min(n, kj)]).sum()
for n in test_batch.getnnz(axis=1)])
#print("IDCG:",np.array([(tp[:min(n, kj)]).sum()
# for n in test_batch.getnnz(axis=1)]))
ndcg.append(DCG / IDCG)
#print("ndcg:",ndcg)
recall50.append(recall[0])
recall100.append(recall[1])
recall200.append(recall[2])
ndcg50.append(ndcg[0])
ndcg100.append(ndcg[1])
ndcg200.append(ndcg[2])
recall50 = np.hstack(recall50)
recall100 = np.hstack(recall100)
recall200 = np.hstack(recall200)
ndcg50 = np.hstack(ndcg50)
ndcg100 = np.hstack(ndcg100)
ndcg200 = np.hstack(ndcg200)
print("recall50:",recall50[0:10])
print("ndcg50:", ndcg50.shape)
print("recall50:", np.mean(recall50), "ndcg50:",np.mean(ndcg50))
print("recall100:",np.mean(recall100),"ndcg100:", np.mean(ndcg100))
print("recall200:",np.mean(recall200), "ndcg200:",np.mean(ndcg200))
#f1.write(str(np.mean(recall100)) + ' ' + str(np.mean(ndcg100)) + '\n')
#f1.flush()
return np.mean(recall50), np.mean(recall100), np.mean(recall200), np.mean(ndcg50), np.mean(ndcg100), np.mean(ndcg200)
def config():
parser = argparse.ArgumentParser(description='PGMC')
parser.add_argument('--seed', default=125, type=int) #123
parser.add_argument('--device', default='1', type=int,
help='Running device. E.g `--device 0`, if using cpu, set `--device -1`')
parser.add_argument('--save_dir', type=str, help='The saving directory')
parser.add_argument('--save_id', type=int, help='The saving log id')
parser.add_argument('--silent', action='store_true')
parser.add_argument('--data_name', default='yahoo_music', type=str,
help='The dataset name: ml-100k, ml-1m, ml-10m, flixster, douban, yahoo_music')
parser.add_argument('--data_test_ratio', type=float, default=0.1) ## for ml-100k the test ration is 0.2
parser.add_argument('--data_valid_ratio', type=float, default=0.05)
parser.add_argument('--use_one_hot_fea', action='store_true', default=False)
parser.add_argument('--model_activation', type=str, default="leaky")
parser.add_argument('--sample_rate', type=int, default=1)
parser.add_argument('--gcn_dropout', type=float, default=0.7)
parser.add_argument('--gcn_agg_norm_symm', type=bool, default=True)
parser.add_argument('--gcn_agg_units', type=int, default=1800)
parser.add_argument('--gcn_agg_accum', type=str, default="sum")
parser.add_argument('--gcn_out_units', type=int, default=75)
parser.add_argument('--gen_r_num_basis_func', type=int, default=2)
parser.add_argument('--train_max_iter', type=int, default=50000)
parser.add_argument('--train_log_interval', type=int, default=1)
parser.add_argument('--train_valid_interval', type=int, default=1)
parser.add_argument('--train_optimizer', type=str, default="adam")
parser.add_argument('--decoder', type=str, default="Bi")
parser.add_argument('--train_grad_clip', type=float, default=1.0)
parser.add_argument('--train_lr', type=float, default=0.01)
parser.add_argument('--train_min_lr', type=float, default=0.001)
parser.add_argument('--train_lr_decay_factor', type=float, default=0.5)
parser.add_argument('--train_decay_patience', type=int, default=50)
parser.add_argument('--layers', type=int, default=1)
parser.add_argument('--train_early_stopping_patience', type=int, default=200)
parser.add_argument('--share_param', default=True, action='store_true')
parser.add_argument('--ARR', type=float, default='0.000004')
parser.add_argument('--loss_func', type=str, default='CE')
parser.add_argument('--sparse_ratio', type=float, default=0.0)
args = parser.parse_args()
args.device = th.device(args.device) if args.device >= 0 else th.device('cpu')
### configure save_fir to save all the info
now = int(round(time.time()*1000))
now02 = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(now/1000))
if args.save_dir is None:
args.save_dir = args.data_name+"_" + ''.join(now02)
if args.save_id is None:
args.save_id = np.random.randint(20)
args.save_dir = os.path.join("log", args.save_dir)
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
return args
if __name__ == '__main__':
'''
ml_1m : param, ARR = 0.0000004, gcn_agg_units = 1000, gcn_agg_accum = sum, tmse = 0.8322, valid_ratio = 0.05
ml_100k : param, ARR = 0.000001, gcn_agg_units = 500, gcn_agg_accum = sum, tmse = 0.9046, valid_ratio = 0.05
1lyaer ml_1m : param, ARR = 0.0000005, gcn_agg_units = 2400, gcn_agg_accum = sum, tmse = 0.8305, valid_ratio = 0.05, gcn_out_units = 75
1layer ml_100k : param, pos_emb, ARR = 0.000005, gcn_agg_units = 750, gcn_agg_accum = sum, tmse = 0.8974, valid_ratio = 0.05, gcn_out_units = 75
2layer ml_100k : param, pos_emb, ARR = 0.000005, gcn_agg_units = 750, gcn_agg_accum = sum, tmse = 0.8969, valid_ratio = 0.05, gcn_out_units = 75
2lyaer ml_1m : param, ARR = 0.0000004, gcn_agg_units = 1800, gcn_agg_accum = sum, tmse = 0.8319, valid_ratio = 0.05, gcn_out_units = 75
'''
args = config()
np.random.seed(args.seed)
th.manual_seed(args.seed)
if th.cuda.is_available():
th.cuda.manual_seed_all(args.seed)
train(args)
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"random.sample",
"utils.get_activation",
"numpy.argsort",
"numpy.argpartition",
"numpy.random.randint",
"numpy.mean",
"utils.to_etype_name",
"numpy.arange",
"torch.device",
"model.MLPDecoder",
"os.path.join",
"numpy.zeros_like",
"torch.FloatTensor",
"torch.softmax",
"numpy.append",
"scipy.sparse.coo_matrix",
"random.seed",
"time.localtime",
"numpy.ones_like",
"torch.nn.ModuleList",
"torch.manual_seed",
"numpy.hstack",
"torch.cuda.is_available",
"dgl.bipartite_from_scipy",
"utils.get_optimizer",
"os.makedirs",
"numpy.log",
"numpy.logical_and",
"os.path.isdir",
"model.GCMCLayer",
"data.DataSetLoader",
"torch.nn.CrossEntropyLoss",
"time.time",
"torch.cuda.manual_seed_all",
"numpy.array",
"torch.cosine_similarity"
] |
[((2008, 2037), 'numpy.ones_like', 'np.ones_like', (['rating_pairs[0]'], {}), '(rating_pairs[0])\n', (2020, 2037), True, 'import numpy as np\n'), ((2067, 2154), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['(ones, rating_pairs)'], {'shape': '(num_user, num_movie)', 'dtype': 'np.float32'}), '((ones, rating_pairs), shape=(num_user, num_movie), dtype=np.\n float32)\n', (2080, 2154), True, 'import scipy.sparse as sp\n'), ((2175, 2263), 'dgl.bipartite_from_scipy', 'dgl.bipartite_from_scipy', (['user_movie_ratings_coo'], {'utype': '"""_U"""', 'etype': '"""_E"""', 'vtype': '"""_V"""'}), "(user_movie_ratings_coo, utype='_U', etype='_E',\n vtype='_V')\n", (2199, 2263), False, 'import dgl\n'), ((7122, 7344), 'data.DataSetLoader', 'DataSetLoader', (['args.data_name', 'args.device'], {'use_one_hot_fea': 'args.use_one_hot_fea', 'symm': 'args.gcn_agg_norm_symm', 'test_ratio': 'args.data_test_ratio', 'valid_ratio': 'args.data_valid_ratio', 'sample_rate': 'args.sample_rate'}), '(args.data_name, args.device, use_one_hot_fea=args.\n use_one_hot_fea, symm=args.gcn_agg_norm_symm, test_ratio=args.\n data_test_ratio, valid_ratio=args.data_valid_ratio, sample_rate=args.\n sample_rate)\n', (7135, 7344), False, 'from data import DataSetLoader\n'), ((7812, 7833), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (7831, 7833), True, 'import torch.nn as nn\n'), ((10558, 10609), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10000)', '(args.train_max_iter,)'], {}), '(0, 10000, (args.train_max_iter,))\n', (10575, 10609), True, 'import numpy as np\n'), ((17375, 17394), 'numpy.hstack', 'np.hstack', (['recall50'], {}), '(recall50)\n', (17384, 17394), True, 'import numpy as np\n'), ((17411, 17431), 'numpy.hstack', 'np.hstack', (['recall100'], {}), '(recall100)\n', (17420, 17431), True, 'import numpy as np\n'), ((17448, 17468), 'numpy.hstack', 'np.hstack', (['recall200'], {}), '(recall200)\n', (17457, 17468), True, 'import numpy as np\n'), ((17482, 17499), 'numpy.hstack', 'np.hstack', (['ndcg50'], {}), '(ndcg50)\n', (17491, 17499), True, 'import numpy as np\n'), ((17514, 17532), 'numpy.hstack', 'np.hstack', (['ndcg100'], {}), '(ndcg100)\n', (17523, 17532), True, 'import numpy as np\n'), ((17547, 17565), 'numpy.hstack', 'np.hstack', (['ndcg200'], {}), '(ndcg200)\n', (17556, 17565), True, 'import numpy as np\n'), ((18096, 18139), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PGMC"""'}), "(description='PGMC')\n", (18119, 18139), False, 'import argparse\n'), ((21107, 21141), 'os.path.join', 'os.path.join', (['"""log"""', 'args.save_dir'], {}), "('log', args.save_dir)\n", (21119, 21141), False, 'import os, time\n'), ((22107, 22132), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (22121, 22132), True, 'import numpy as np\n'), ((22137, 22162), 'torch.manual_seed', 'th.manual_seed', (['args.seed'], {}), '(args.seed)\n', (22151, 22162), True, 'import torch as th\n'), ((22170, 22192), 'torch.cuda.is_available', 'th.cuda.is_available', ([], {}), '()\n', (22190, 22192), True, 'import torch as th\n'), ((2520, 2557), 'utils.get_activation', 'get_activation', (['args.model_activation'], {}), '(args.model_activation)\n', (2534, 2557), False, 'from utils import get_activation, get_optimizer, torch_total_param_num, torch_net_info, MetricLogger\n'), ((2581, 2596), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2594, 2596), True, 'import torch.nn as nn\n'), ((7884, 7919), 'utils.get_optimizer', 'get_optimizer', (['args.train_optimizer'], {}), '(args.train_optimizer)\n', (7897, 7919), False, 'from utils import get_activation, get_optimizer, torch_total_param_num, torch_net_info, MetricLogger\n'), ((8298, 8337), 'os.path.join', 'os.path.join', (['args.save_dir', '"""NDCG.csv"""'], {}), "(args.save_dir, 'NDCG.csv')\n", (8310, 8337), False, 'import os, time\n'), ((9867, 9924), 'numpy.append', 'np.append', (['train_rating_values', 'train_rating_values_zeros'], {}), '(train_rating_values, train_rating_values_zeros)\n', (9876, 9924), True, 'import numpy as np\n'), ((10304, 10330), 'random.seed', 'random.seed', (['random_number'], {}), '(random_number)\n', (10315, 10330), False, 'import random\n'), ((14794, 14824), 'numpy.zeros_like', 'np.zeros_like', (['pre'], {'dtype': 'bool'}), '(pre, dtype=bool)\n', (14807, 14824), True, 'import numpy as np\n'), ((17663, 17680), 'numpy.mean', 'np.mean', (['recall50'], {}), '(recall50)\n', (17670, 17680), True, 'import numpy as np\n'), ((17692, 17707), 'numpy.mean', 'np.mean', (['ndcg50'], {}), '(ndcg50)\n', (17699, 17707), True, 'import numpy as np\n'), ((17732, 17750), 'numpy.mean', 'np.mean', (['recall100'], {}), '(recall100)\n', (17739, 17750), True, 'import numpy as np\n'), ((17763, 17779), 'numpy.mean', 'np.mean', (['ndcg100'], {}), '(ndcg100)\n', (17770, 17779), True, 'import numpy as np\n'), ((17804, 17822), 'numpy.mean', 'np.mean', (['recall200'], {}), '(recall200)\n', (17811, 17822), True, 'import numpy as np\n'), ((17835, 17851), 'numpy.mean', 'np.mean', (['ndcg200'], {}), '(ndcg200)\n', (17842, 17851), True, 'import numpy as np\n'), ((17957, 17974), 'numpy.mean', 'np.mean', (['recall50'], {}), '(recall50)\n', (17964, 17974), True, 'import numpy as np\n'), ((17976, 17994), 'numpy.mean', 'np.mean', (['recall100'], {}), '(recall100)\n', (17983, 17994), True, 'import numpy as np\n'), ((17996, 18014), 'numpy.mean', 'np.mean', (['recall200'], {}), '(recall200)\n', (18003, 18014), True, 'import numpy as np\n'), ((18016, 18031), 'numpy.mean', 'np.mean', (['ndcg50'], {}), '(ndcg50)\n', (18023, 18031), True, 'import numpy as np\n'), ((18033, 18049), 'numpy.mean', 'np.mean', (['ndcg100'], {}), '(ndcg100)\n', (18040, 18049), True, 'import numpy as np\n'), ((18051, 18067), 'numpy.mean', 'np.mean', (['ndcg200'], {}), '(ndcg200)\n', (18058, 18067), True, 'import numpy as np\n'), ((20698, 20720), 'torch.device', 'th.device', (['args.device'], {}), '(args.device)\n', (20707, 20720), True, 'import torch as th\n'), ((20746, 20762), 'torch.device', 'th.device', (['"""cpu"""'], {}), "('cpu')\n", (20755, 20762), True, 'import torch as th\n'), ((20897, 20923), 'time.localtime', 'time.localtime', (['(now / 1000)'], {}), '(now / 1000)\n', (20911, 20923), False, 'import os, time\n'), ((21065, 21086), 'numpy.random.randint', 'np.random.randint', (['(20)'], {}), '(20)\n', (21082, 21086), True, 'import numpy as np\n'), ((21153, 21181), 'os.path.isdir', 'os.path.isdir', (['args.save_dir'], {}), '(args.save_dir)\n', (21166, 21181), False, 'import os, time\n'), ((21191, 21217), 'os.makedirs', 'os.makedirs', (['args.save_dir'], {}), '(args.save_dir)\n', (21202, 21217), False, 'import os, time\n'), ((22202, 22236), 'torch.cuda.manual_seed_all', 'th.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (22225, 22236), True, 'import torch as th\n'), ((1355, 1384), 'random.sample', 'random.sample', (['x', 'sample_rate'], {}), '(x, sample_rate)\n', (1368, 1384), False, 'import random\n'), ((2625, 2861), 'model.GCMCLayer', 'GCMCLayer', (['args.rating_vals', 'args.src_in_units', 'args.dst_in_units', 'args.gcn_agg_units', 'args.gcn_out_units', 'args.gcn_dropout', 'args.gcn_agg_accum'], {'agg_act': 'self._act', 'share_user_item_param': 'args.share_param', 'device': 'args.device'}), '(args.rating_vals, args.src_in_units, args.dst_in_units, args.\n gcn_agg_units, args.gcn_out_units, args.gcn_dropout, args.gcn_agg_accum,\n agg_act=self._act, share_user_item_param=args.share_param, device=args.\n device)\n', (2634, 2861), False, 'from model import BiDecoder, GCMCLayer, MLPDecoder\n'), ((6880, 6901), 'utils.to_etype_name', 'to_etype_name', (['rating'], {}), '(rating)\n', (6893, 6901), False, 'from utils import to_etype_name\n'), ((7727, 7773), 'torch.FloatTensor', 'th.FloatTensor', (['dataset.possible_rating_values'], {}), '(dataset.possible_rating_values)\n', (7741, 7773), True, 'import torch as th\n'), ((9711, 9772), 'numpy.append', 'np.append', (['train_rating_pairs[0]', 'train_rating_pairs_zeros[0]'], {}), '(train_rating_pairs[0], train_rating_pairs_zeros[0])\n', (9720, 9772), True, 'import numpy as np\n'), ((9774, 9835), 'numpy.append', 'np.append', (['train_rating_pairs[1]', 'train_rating_pairs_zeros[1]'], {}), '(train_rating_pairs[1], train_rating_pairs_zeros[1])\n', (9783, 9835), True, 'import numpy as np\n'), ((14985, 15013), 'numpy.argpartition', 'np.argpartition', (['(-pre)', 'kj', '(1)'], {}), '(-pre, kj, 1)\n', (15000, 15013), True, 'import numpy as np\n'), ((15156, 15186), 'numpy.zeros_like', 'np.zeros_like', (['pre'], {'dtype': 'bool'}), '(pre, dtype=bool)\n', (15169, 15186), True, 'import numpy as np\n'), ((15329, 15359), 'numpy.zeros_like', 'np.zeros_like', (['pre'], {'dtype': 'bool'}), '(pre, dtype=bool)\n', (15342, 15359), True, 'import numpy as np\n'), ((15836, 15864), 'numpy.argpartition', 'np.argpartition', (['(-pre)', 'kj', '(1)'], {}), '(-pre, kj, 1)\n', (15851, 15864), True, 'import numpy as np\n'), ((16180, 16210), 'numpy.argsort', 'np.argsort', (['(-topk_part)'], {'axis': '(1)'}), '(-topk_part, axis=1)\n', (16190, 16210), True, 'import numpy as np\n'), ((3629, 3883), 'model.GCMCLayer', 'GCMCLayer', (['args.rating_vals', 'args.gcn_out_units', 'args.gcn_out_units', 'gcn_out_units', 'args.gcn_out_units', '(args.gcn_dropout - i * 0.1)', 'args.gcn_agg_accum'], {'agg_act': 'self._act', 'share_user_item_param': 'args.share_param', 'ini': '(False)', 'device': 'args.device'}), '(args.rating_vals, args.gcn_out_units, args.gcn_out_units,\n gcn_out_units, args.gcn_out_units, args.gcn_dropout - i * 0.1, args.\n gcn_agg_accum, agg_act=self._act, share_user_item_param=args.\n share_param, ini=False, device=args.device)\n', (3638, 3883), False, 'from model import BiDecoder, GCMCLayer, MLPDecoder\n'), ((4990, 5110), 'model.MLPDecoder', 'MLPDecoder', ([], {'in_units': '(args.gcn_out_units * args.layers)', 'num_classes': 'num_classes', 'num_basis': 'args.gen_r_num_basis_func'}), '(in_units=args.gcn_out_units * args.layers, num_classes=\n num_classes, num_basis=args.gen_r_num_basis_func)\n', (5000, 5110), False, 'from model import BiDecoder, GCMCLayer, MLPDecoder\n'), ((10427, 10456), 'random.sample', 'random.sample', (['x', 'sample_rate'], {}), '(x, sample_rate)\n', (10440, 10456), False, 'import random\n'), ((16487, 16496), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (16493, 16496), True, 'import numpy as np\n'), ((20832, 20843), 'time.time', 'time.time', ([], {}), '()\n', (20841, 20843), False, 'import os, time\n'), ((1586, 1638), 'numpy.array', 'np.array', (['[([ele] * num_movie) for ele in user_list]'], {}), '([([ele] * num_movie) for ele in user_list])\n', (1594, 1638), True, 'import numpy as np\n'), ((11887, 11918), 'torch.softmax', 'th.softmax', (['pred_ratings'], {'dim': '(1)'}), '(pred_ratings, dim=1)\n', (11897, 11918), True, 'import torch as th\n'), ((14331, 14362), 'torch.softmax', 'th.softmax', (['pred_ratings'], {'dim': '(1)'}), '(pred_ratings, dim=1)\n', (14341, 14362), True, 'import torch as th\n'), ((16506, 16526), 'numpy.arange', 'np.arange', (['(2)', '(kj + 2)'], {}), '(2, kj + 2)\n', (16515, 16526), True, 'import numpy as np\n'), ((6961, 7016), 'torch.cosine_similarity', 'th.cosine_similarity', (['W[i, :, :]', 'W[i - 1, :, :]'], {'dim': '(1)'}), '(W[i, :, :], W[i - 1, :, :], dim=1)\n', (6981, 7016), True, 'import torch as th\n'), ((15207, 15229), 'numpy.arange', 'np.arange', (['batch_users'], {}), '(batch_users)\n', (15216, 15229), True, 'import numpy as np\n'), ((16047, 16069), 'numpy.arange', 'np.arange', (['batch_users'], {}), '(batch_users)\n', (16056, 16069), True, 'import numpy as np\n'), ((16332, 16366), 'numpy.arange', 'np.arange', (['(end_index - start_index)'], {}), '(end_index - start_index)\n', (16341, 16366), True, 'import numpy as np\n'), ((15431, 15464), 'numpy.logical_and', 'np.logical_and', (['true_bin', 'pre_bin'], {}), '(true_bin, pre_bin)\n', (15445, 15464), True, 'import numpy as np\n'), ((1733, 1753), 'numpy.arange', 'np.arange', (['num_movie'], {}), '(num_movie)\n', (1742, 1753), True, 'import numpy as np\n'), ((16644, 16666), 'numpy.arange', 'np.arange', (['batch_users'], {}), '(batch_users)\n', (16653, 16666), True, 'import numpy as np\n')]
|
from django.db import models
# Create your models here.
class Page(models.Model):
STATUS_CHOICES = (
(1, 'Active'),
(2, 'Inactive'),
)
PAGE_CHOICES = (
(1, 'Home'),
(2, 'About Us'),
)
page = models.PositiveSmallIntegerField(choices=PAGE_CHOICES,unique=True)
title = models.CharField(max_length = 255, null=True, blank=True)
heading = models.CharField(max_length = 255, null=True, blank=True)
short_description = models.CharField(max_length = 255, null=True, blank=True)
long_description = models.CharField(max_length = 255, null=True, blank=True)
image = models.ImageField(upload_to='static/uploads/cms/', null=True, blank=True)
background_image = models.ImageField(upload_to='static/uploads/cms/', null=True, blank=True)
status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES, null=True, blank=True)
def __str__(self): # __unicode__ for Python 2
return self.title + " (" + str(self.page) + ")"
|
[
"django.db.models.CharField",
"django.db.models.ImageField",
"django.db.models.PositiveSmallIntegerField"
] |
[((246, 313), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': 'PAGE_CHOICES', 'unique': '(True)'}), '(choices=PAGE_CHOICES, unique=True)\n', (278, 313), False, 'from django.db import models\n'), ((325, 380), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, null=True, blank=True)\n', (341, 380), False, 'from django.db import models\n'), ((397, 452), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, null=True, blank=True)\n', (413, 452), False, 'from django.db import models\n'), ((479, 534), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, null=True, blank=True)\n', (495, 534), False, 'from django.db import models\n'), ((560, 615), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, null=True, blank=True)\n', (576, 615), False, 'from django.db import models\n'), ((630, 703), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""static/uploads/cms/"""', 'null': '(True)', 'blank': '(True)'}), "(upload_to='static/uploads/cms/', null=True, blank=True)\n", (647, 703), False, 'from django.db import models\n'), ((727, 800), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""static/uploads/cms/"""', 'null': '(True)', 'blank': '(True)'}), "(upload_to='static/uploads/cms/', null=True, blank=True)\n", (744, 800), False, 'from django.db import models\n'), ((814, 893), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': 'STATUS_CHOICES', 'null': '(True)', 'blank': '(True)'}), '(choices=STATUS_CHOICES, null=True, blank=True)\n', (846, 893), False, 'from django.db import models\n')]
|
"""
ShadeSketch
https://github.com/qyzdao/ShadeSketch
Learning to Shadow Hand-drawn Sketches
<NAME>, <NAME>, <NAME>
Copyright (C) 2020 The respective authors and Project HAT. All rights reserved.
Licensed under MIT license.
"""
import tensorflow as tf
# import keras
keras = tf.keras
K = keras.backend
Layer = keras.layers.Layer
Conv2D = keras.layers.Conv2D
InputSpec = keras.layers.InputSpec
image_data_format = K.image_data_format
activations = keras.activations
initializers = keras.initializers
regularizers = keras.regularizers
constraints = keras.constraints
class Composite(Layer):
def __init__(self,
data_format='channels_last',
**kwargs):
self.data_format = data_format
super(Composite, self).__init__(**kwargs)
def call(self, inputs):
line_inputs, shade_inputs = inputs
return line_inputs + (shade_inputs + 1) * 0.25
def compute_output_shape(self, input_shape):
return input_shape[0]
class PixelwiseConcat(Layer):
def __init__(self,
data_format='channels_last',
**kwargs):
self.data_format = data_format
super(PixelwiseConcat, self).__init__(**kwargs)
def call(self, inputs):
pixel_inputs, unit_inputs = inputs
if self.data_format == 'channels_first':
repeated_unit_inputs = tf.tile(
K.expand_dims(K.expand_dims(unit_inputs, 2), 2),
[1, K.shape(pixel_inputs)[2], K.shape(pixel_inputs)[3], 1]
)
elif self.data_format == 'channels_last':
repeated_unit_inputs = tf.tile(
K.expand_dims(K.expand_dims(unit_inputs, 1), 1),
[1, K.shape(pixel_inputs)[1], K.shape(pixel_inputs)[2], 1]
)
return K.concatenate([pixel_inputs, repeated_unit_inputs])
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
return (input_shape[0][0], input_shape[0][1] + input_shape[1][1], input_shape[0][2], input_shape[0][3])
elif self.data_format == 'channels_last':
return (input_shape[0][0], input_shape[0][1], input_shape[0][2], input_shape[0][3] + input_shape[1][1])
class SubPixelConv2D(Conv2D):
def __init__(self,
filters,
kernel_size,
r,
padding='same',
data_format=None,
strides=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(SubPixelConv2D, self).__init__(
filters=r * r * filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.r = r
if hasattr(tf.nn, 'depth_to_space'):
self.depth_to_space = tf.nn.depth_to_space
else:
self.depth_to_space = tf.depth_to_space
def phase_shift(self, I):
if self.data_format == 'channels_first':
return self.depth_to_space(I, self.r, data_format="NCHW")
elif self.data_format == 'channels_last':
return self.depth_to_space(I, self.r, data_format="NHWC")
def call(self, inputs):
return self.phase_shift(super(SubPixelConv2D, self).call(inputs))
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
n, c, h, w = super(SubPixelConv2D, self).compute_output_shape(input_shape)
elif self.data_format == 'channels_last':
n, h, w, c = super(SubPixelConv2D, self).compute_output_shape(input_shape)
if h is not None:
h = int(self.r * h)
if w is not None:
w = int(self.r * w)
c = int(c / (self.r * self.r))
if self.data_format == 'channels_first':
return (n, c, h, w)
elif self.data_format == 'channels_last':
return (n, h, w, c)
def get_config(self):
config = super(Conv2D, self).get_config()
config.pop('rank')
config.pop('dilation_rate')
config['filters'] /= self.r * self.r
config['r'] = self.r
return config
class SelfAttention(Layer):
def __init__(self,
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(SelfAttention, self).__init__(**kwargs)
self.data_format = data_format
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
kernel_size = (1, 1)
self.filters = int(input_shape[channel_axis])
self.kernel_f = self.add_weight(shape=kernel_size + (self.filters, self.filters // 8),
initializer=self.kernel_initializer,
name='kernel_f',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.kernel_g = self.add_weight(shape=kernel_size + (self.filters, self.filters // 8),
initializer=self.kernel_initializer,
name='kernel_g',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.kernel_h = self.add_weight(shape=kernel_size + (self.filters, self.filters),
initializer=self.kernel_initializer,
name='kernel_h',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias_f = self.add_weight(shape=(self.filters // 8,),
initializer=self.bias_initializer,
name='bias_f',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.bias_g = self.add_weight(shape=(self.filters // 8,),
initializer=self.bias_initializer,
name='bias_g',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.bias_h = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias_h',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias_f = None
self.bias_g = None
self.bias_h = None
self.gamma = self.add_weight(
name='gamma',
shape=(1,),
initializer=initializers.Constant(0)
)
super(SelfAttention, self).build(input_shape)
def call(self, inputs):
f = K.conv2d(inputs,
self.kernel_f,
data_format=self.data_format,
strides=(1, 1),
dilation_rate=(1, 1)) # [bs, h, w, c']
g = K.conv2d(inputs,
self.kernel_g,
data_format=self.data_format,
strides=(1, 1),
dilation_rate=(1, 1)) # [bs, h, w, c']
h = K.conv2d(inputs,
self.kernel_h,
data_format=self.data_format,
strides=(1, 1),
dilation_rate=(1, 1)) # [bs, h, w, c]
if self.use_bias:
f = K.bias_add(f, self.bias_f, data_format=self.data_format) # [bs, h, w, c']
g = K.bias_add(g, self.bias_g, data_format=self.data_format) # [bs, h, w, c']
h = K.bias_add(h, self.bias_h, data_format=self.data_format) # [bs, h, w, c]
# N = h * w
s = K.dot(K.batch_flatten(g), K.transpose(K.batch_flatten(f))) # # [bs, N, N]
beta = K.softmax(s) # attention map
o = K.dot(beta, K.batch_flatten(h)) # [bs, N, C]
o = K.reshape(o, K.shape(inputs)) # [bs, h, w, C]
return self.activation(self.gamma * o + inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'activation': activations.serialize(self.activation),
'data_format': self.data_format,
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(SelfAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
"""
Implementation of Coordinate Channel
keras-coordconv
MIT License
Copyright (c) 2018 <NAME>
https://github.com/titu1994/keras-coordconv/blob/master/coord.py
"""
class _CoordinateChannel(Layer):
""" Adds Coordinate Channels to the input tensor.
# Arguments
rank: An integer, the rank of the input data-uniform,
e.g. "2" for 2D convolution.
use_radius: Boolean flag to determine whether the
radius coordinate should be added for 2D rank
inputs or not.
data_format: A string,
one of `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, ..., channels)` while `"channels_first"` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
# Input shape
ND tensor with shape:
`(samples, channels, *)`
if `data_format` is `"channels_first"`
or ND tensor with shape:
`(samples, *, channels)`
if `data_format` is `"channels_last"`.
# Output shape
ND tensor with shape:
`(samples, channels + 2, *)`
if `data_format` is `"channels_first"`
or 5D tensor with shape:
`(samples, *, channels + 2)`
if `data_format` is `"channels_last"`.
# References:
- [An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](https://arxiv.org/abs/1807.03247)
"""
def __init__(self, rank,
use_radius=False,
data_format='channels_last',
**kwargs):
super(_CoordinateChannel, self).__init__(**kwargs)
if data_format not in [None, 'channels_first', 'channels_last']:
raise ValueError('`data_format` must be either "channels_last", "channels_first" '
'or None.')
self.rank = rank
self.use_radius = use_radius
self.data_format = data_format
self.axis = 1 if image_data_format() == 'channels_first' else -1
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[self.axis]
self.input_spec = InputSpec(min_ndim=self.rank + 2,
axes={self.axis: input_dim})
self.built = True
def call(self, inputs, training=None, mask=None):
input_shape = K.shape(inputs)
if self.rank == 1:
input_shape = [input_shape[i] for i in range(3)]
batch_shape, dim, channels = input_shape
xx_range = tf.tile(K.expand_dims(K.arange(0, dim), axis=0),
K.stack([batch_shape, 1]))
xx_range = K.expand_dims(xx_range, axis=-1)
xx_channels = K.cast(xx_range, K.floatx())
xx_channels = xx_channels / K.cast(dim - 1, K.floatx())
xx_channels = (xx_channels * 2) - 1.
outputs = K.concatenate([inputs, xx_channels], axis=-1)
if self.rank == 2:
if self.data_format == 'channels_first':
inputs = K.permute_dimensions(inputs, [0, 2, 3, 1])
input_shape = K.shape(inputs)
input_shape = [input_shape[i] for i in range(4)]
batch_shape, dim1, dim2, channels = input_shape
xx_ones = tf.ones(K.stack([batch_shape, dim2]), dtype='int32')
xx_ones = K.expand_dims(xx_ones, axis=-1)
xx_range = tf.tile(K.expand_dims(K.arange(0, dim1), axis=0),
K.stack([batch_shape, 1]))
xx_range = K.expand_dims(xx_range, axis=1)
xx_channels = K.batch_dot(xx_ones, xx_range, axes=[2, 1])
xx_channels = K.expand_dims(xx_channels, axis=-1)
xx_channels = K.permute_dimensions(xx_channels, [0, 2, 1, 3])
yy_ones = tf.ones(K.stack([batch_shape, dim1]), dtype='int32')
yy_ones = K.expand_dims(yy_ones, axis=1)
yy_range = tf.tile(K.expand_dims(K.arange(0, dim2), axis=0),
K.stack([batch_shape, 1]))
yy_range = K.expand_dims(yy_range, axis=-1)
yy_channels = K.batch_dot(yy_range, yy_ones, axes=[2, 1])
yy_channels = K.expand_dims(yy_channels, axis=-1)
yy_channels = K.permute_dimensions(yy_channels, [0, 2, 1, 3])
xx_channels = K.cast(xx_channels, K.floatx())
xx_channels = xx_channels / K.cast(dim1 - 1, K.floatx())
xx_channels = (xx_channels * 2) - 1.
yy_channels = K.cast(yy_channels, K.floatx())
yy_channels = yy_channels / K.cast(dim2 - 1, K.floatx())
yy_channels = (yy_channels * 2) - 1.
outputs = K.concatenate([inputs, xx_channels, yy_channels], axis=-1)
if self.use_radius:
rr = K.sqrt(K.square(xx_channels - 0.5) +
K.square(yy_channels - 0.5))
outputs = K.concatenate([outputs, rr], axis=-1)
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 3, 1, 2])
if self.rank == 3:
if self.data_format == 'channels_first':
inputs = K.permute_dimensions(inputs, [0, 2, 3, 4, 1])
input_shape = K.shape(inputs)
input_shape = [input_shape[i] for i in range(5)]
batch_shape, dim1, dim2, dim3, channels = input_shape
xx_ones = tf.ones(K.stack([batch_shape, dim3]), dtype='int32')
xx_ones = K.expand_dims(xx_ones, axis=-1)
xx_range = tf.tile(K.expand_dims(K.arange(0, dim2), axis=0),
K.stack([batch_shape, 1]))
xx_range = K.expand_dims(xx_range, axis=1)
xx_channels = K.batch_dot(xx_ones, xx_range, axes=[2, 1])
xx_channels = K.expand_dims(xx_channels, axis=-1)
xx_channels = K.permute_dimensions(xx_channels, [0, 2, 1, 3])
xx_channels = K.expand_dims(xx_channels, axis=1)
xx_channels = tf.tile(xx_channels,
[1, dim1, 1, 1, 1])
yy_ones = tf.ones(K.stack([batch_shape, dim2]), dtype='int32')
yy_ones = K.expand_dims(yy_ones, axis=1)
yy_range = tf.tile(K.expand_dims(K.arange(0, dim3), axis=0),
K.stack([batch_shape, 1]))
yy_range = K.expand_dims(yy_range, axis=-1)
yy_channels = K.batch_dot(yy_range, yy_ones, axes=[2, 1])
yy_channels = K.expand_dims(yy_channels, axis=-1)
yy_channels = K.permute_dimensions(yy_channels, [0, 2, 1, 3])
yy_channels = K.expand_dims(yy_channels, axis=1)
yy_channels = tf.tile(yy_channels,
[1, dim1, 1, 1, 1])
zz_range = tf.tile(K.expand_dims(K.arange(0, dim1), axis=0),
K.stack([batch_shape, 1]))
zz_range = K.expand_dims(zz_range, axis=-1)
zz_range = K.expand_dims(zz_range, axis=-1)
zz_channels = tf.tile(zz_range,
[1, 1, dim2, dim3])
zz_channels = K.expand_dims(zz_channels, axis=-1)
xx_channels = K.cast(xx_channels, K.floatx())
xx_channels = xx_channels / K.cast(dim2 - 1, K.floatx())
xx_channels = xx_channels * 2 - 1.
yy_channels = K.cast(yy_channels, K.floatx())
yy_channels = yy_channels / K.cast(dim3 - 1, K.floatx())
yy_channels = yy_channels * 2 - 1.
zz_channels = K.cast(zz_channels, K.floatx())
zz_channels = zz_channels / K.cast(dim1 - 1, K.floatx())
zz_channels = zz_channels * 2 - 1.
outputs = K.concatenate([inputs, zz_channels, xx_channels, yy_channels],
axis=-1)
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 4, 1, 2, 3])
return outputs
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
assert input_shape[self.axis]
if self.use_radius and self.rank == 2:
channel_count = 3
else:
channel_count = self.rank
output_shape = list(input_shape)
output_shape[self.axis] = input_shape[self.axis] + channel_count
return tuple(output_shape)
def get_config(self):
config = {
'rank': self.rank,
'use_radius': self.use_radius,
'data_format': self.data_format
}
base_config = super(_CoordinateChannel, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class CoordinateChannel1D(_CoordinateChannel):
""" Adds Coordinate Channels to the input tensor of rank 1.
# Arguments
data_format: A string,
one of `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, ..., channels)` while `"channels_first"` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
# Input shape
3D tensor with shape: `(batch_size, steps, input_dim)`
# Output shape
3D tensor with shape: `(batch_size, steps, input_dim + 2)`
# References:
- [An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](https://arxiv.org/abs/1807.03247)
"""
def __init__(self, data_format=None, **kwargs):
super(CoordinateChannel1D, self).__init__(
rank=1,
use_radius=False,
data_format=data_format,
**kwargs
)
def get_config(self):
config = super(CoordinateChannel1D, self).get_config()
config.pop('rank')
config.pop('use_radius')
return config
class CoordinateChannel2D(_CoordinateChannel):
""" Adds Coordinate Channels to the input tensor.
# Arguments
use_radius: Boolean flag to determine whether the
radius coordinate should be added for 2D rank
inputs or not.
data_format: A string,
one of `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, ..., channels)` while `"channels_first"` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)`
if `data_format` is `"channels_first"`
or 4D tensor with shape:
`(samples, rows, cols, channels)`
if `data_format` is `"channels_last"`.
# Output shape
4D tensor with shape:
`(samples, channels + 2/3, rows, cols)`
if `data_format` is `"channels_first"`
or 4D tensor with shape:
`(samples, rows, cols, channels + 2/3)`
if `data_format` is `"channels_last"`.
If `use_radius` is set, then will have 3 additional filers,
else only 2 additional filters will be added.
# References:
- [An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](https://arxiv.org/abs/1807.03247)
"""
def __init__(self, use_radius=False,
data_format=None,
**kwargs):
super(CoordinateChannel2D, self).__init__(
rank=2,
use_radius=use_radius,
data_format=data_format,
**kwargs
)
def get_config(self):
config = super(CoordinateChannel2D, self).get_config()
config.pop('rank')
return config
|
[
"tensorflow.tile"
] |
[((17561, 17601), 'tensorflow.tile', 'tf.tile', (['xx_channels', '[1, dim1, 1, 1, 1]'], {}), '(xx_channels, [1, dim1, 1, 1, 1])\n', (17568, 17601), True, 'import tensorflow as tf\n'), ((18248, 18288), 'tensorflow.tile', 'tf.tile', (['yy_channels', '[1, dim1, 1, 1, 1]'], {}), '(yy_channels, [1, dim1, 1, 1, 1])\n', (18255, 18288), True, 'import tensorflow as tf\n'), ((18594, 18631), 'tensorflow.tile', 'tf.tile', (['zz_range', '[1, 1, dim2, dim3]'], {}), '(zz_range, [1, 1, dim2, dim3])\n', (18601, 18631), True, 'import tensorflow as tf\n')]
|
import json
class JsonFormatter:
def __init__(self):
pass
def format(self, message):
return json.dumps(message)
|
[
"json.dumps"
] |
[((119, 138), 'json.dumps', 'json.dumps', (['message'], {}), '(message)\n', (129, 138), False, 'import json\n')]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
file="/Users/spanta/Documents/batch_aeneas_scripts/batch_directory/QC_data/BMQBSMN2DA_epo_eng_plot_cdf.csv"
data_req = pd.read_table(file, sep=",")
arr = data_req.values
arr.sort(axis=0)
data_req = pd.DataFrame(arr, index=data_req.index, columns=data_req.columns)
#sort values per column
sorted_values = data_req.apply(lambda x: x.sort_values())
fig, ax = plt.subplots()
for col in sorted_values.columns:
y = np.linspace(0.,1., len(sorted_values[col].dropna()))
ax.plot(sorted_values[col].dropna(), y,label=col)
legend = ax.legend(loc='lower right', shadow=True, fontsize='medium')
plt.xlim([0, 5])
filename=(file.split('/')[-1]).split('_')[0]
plt.savefig('/Users/spanta/Documents/batch_aeneas_scripts/batch_directory/QC_data/'+filename+'.png')
#plt.show()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.xlim",
"pandas.read_table",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((191, 219), 'pandas.read_table', 'pd.read_table', (['file'], {'sep': '""","""'}), "(file, sep=',')\n", (204, 219), True, 'import pandas as pd\n'), ((272, 337), 'pandas.DataFrame', 'pd.DataFrame', (['arr'], {'index': 'data_req.index', 'columns': 'data_req.columns'}), '(arr, index=data_req.index, columns=data_req.columns)\n', (284, 337), True, 'import pandas as pd\n'), ((433, 447), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (445, 447), True, 'import matplotlib.pyplot as plt\n'), ((673, 689), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 5]'], {}), '([0, 5])\n', (681, 689), True, 'import matplotlib.pyplot as plt\n'), ((735, 848), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('/Users/spanta/Documents/batch_aeneas_scripts/batch_directory/QC_data/' +\n filename + '.png')"], {}), "(\n '/Users/spanta/Documents/batch_aeneas_scripts/batch_directory/QC_data/' +\n filename + '.png')\n", (746, 848), True, 'import matplotlib.pyplot as plt\n')]
|
from abc import ABC, abstractmethod
from decimal import Decimal
import stripe
from django.conf import settings
class PaymentGateway(ABC):
@classmethod
@abstractmethod
def generate_checkout_session_id(
cls,
name: str,
description: str,
price: float,
) -> str:
pass
@classmethod
@abstractmethod
def get_session_id_from_event(cls, event: dict) -> str:
pass
@classmethod
@abstractmethod
def get_customer_email_from_event(cls, event: dict) -> str:
pass
class StripePaymentGateway(PaymentGateway):
@classmethod
def generate_checkout_session_id(
cls,
name: str,
description: str,
price: Decimal
) -> str:
stripe.api_key = settings.STRIPE_SECRET_KEY
session = stripe.checkout.Session.create(
payment_method_types=['card'],
line_items=[
{
'name': name,
'description': description,
'amount': cls._format_price(price),
'currency': 'eur',
'quantity': 1,
}
],
success_url=(
settings.STRIPE_REDIRECT_URL
+ '?session_id={CHECKOUT_SESSION_ID}'
),
cancel_url=settings.STRIPE_REDIRECT_URL,
)
return session.id
@classmethod
def get_session_id_from_event(cls, event: dict) -> str:
return event['data']['object']['id']
@classmethod
def get_customer_email_from_event(cls, event: dict) -> str:
customer_id = event['data']['object']['customer']
stripe.api_key = settings.STRIPE_SECRET_KEY
customer = stripe.Customer.retrieve(customer_id)
return customer['email']
@staticmethod
def _format_price(price):
return int(float(price) * 100)
|
[
"stripe.Customer.retrieve"
] |
[((1741, 1778), 'stripe.Customer.retrieve', 'stripe.Customer.retrieve', (['customer_id'], {}), '(customer_id)\n', (1765, 1778), False, 'import stripe\n')]
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
import unittest
from telemetry import decorators
from telemetry.internal.backends.chrome_inspector import tracing_backend
from telemetry.internal.backends.chrome_inspector.tracing_backend import _DevToolsStreamReader
from telemetry.testing import fakes
from telemetry.testing import simple_mock
from telemetry.testing import tab_test_case
from telemetry.timeline import model as model_module
from telemetry.timeline import tracing_config
class TracingBackendTest(tab_test_case.TabTestCase):
# Number of consecutively requested memory dumps.
_REQUESTED_DUMP_COUNT = 3
@classmethod
def CustomizeBrowserOptions(cls, options):
options.AppendExtraBrowserArgs([
# Memory maps currently cannot be retrieved on sandboxed processes.
# See crbug.com/461788.
'--no-sandbox',
# Workaround to disable periodic memory dumps. See crbug.com/513692.
'--enable-memory-benchmarking'
])
def setUp(self):
super(TracingBackendTest, self).setUp()
self._tracing_controller = self._browser.platform.tracing_controller
if not self._tracing_controller.IsChromeTracingSupported():
self.skipTest('Browser does not support tracing, skipping test.')
if not self._browser.supports_memory_dumping:
self.skipTest('Browser does not support memory dumping, skipping test.')
@decorators.Disabled('win') # crbug.com/570955
def testDumpMemorySuccess(self):
# Check that dumping memory before tracing starts raises an exception.
self.assertRaises(Exception, self._browser.DumpMemory)
# Start tracing with memory dumps enabled.
config = tracing_config.TracingConfig()
config.tracing_category_filter.AddDisabledByDefault(
'disabled-by-default-memory-infra')
config.enable_chrome_trace = True
self._tracing_controller.StartTracing(config)
# Request several memory dumps in a row and test that they were all
# successfully created with unique IDs.
expected_dump_ids = []
for _ in xrange(self._REQUESTED_DUMP_COUNT):
dump_id = self._browser.DumpMemory()
self.assertIsNotNone(dump_id)
self.assertNotIn(dump_id, expected_dump_ids)
expected_dump_ids.append(dump_id)
trace_data = self._tracing_controller.StopTracing()
# Check that dumping memory after tracing stopped raises an exception.
self.assertRaises(Exception, self._browser.DumpMemory)
# Test that trace data is parsable.
model = model_module.TimelineModel(trace_data)
self.assertGreater(len(model.processes), 0)
# Test that the resulting model contains the requested memory dumps in the
# correct order (and nothing more).
actual_dump_ids = [d.dump_id for d in model.IterGlobalMemoryDumps()]
self.assertEqual(actual_dump_ids, expected_dump_ids)
@decorators.Disabled('win') # crbug.com/570955
def testDumpMemoryFailure(self):
# Check that dumping memory before tracing starts raises an exception.
self.assertRaises(Exception, self._browser.DumpMemory)
# Start tracing with memory dumps disabled.
config = tracing_config.TracingConfig()
config.enable_chrome_trace = True
self._tracing_controller.StartTracing(config)
# Check that the method returns None if the dump was not successful.
self.assertIsNone(self._browser.DumpMemory())
trace_data = self._tracing_controller.StopTracing()
# Check that dumping memory after tracing stopped raises an exception.
self.assertRaises(Exception, self._browser.DumpMemory)
# Test that trace data is parsable.
model = model_module.TimelineModel(trace_data)
self.assertGreater(len(model.processes), 0)
# Test that the resulting model contains no memory dumps.
self.assertEqual(len(list(model.IterGlobalMemoryDumps())), 0)
class TracingBackendUnitTest(unittest.TestCase):
def setUp(self):
self._mock_timer = simple_mock.MockTimer(tracing_backend)
self._inspector_socket = fakes.FakeInspectorWebsocket(self._mock_timer)
def tearDown(self):
self._mock_timer.Restore()
def testCollectTracingDataTimeout(self):
self._inspector_socket.AddEvent(
'Tracing.dataCollected', {'value': [{'ph': 'B'}]}, 9)
self._inspector_socket.AddEvent(
'Tracing.dataCollected', {'value': [{'ph': 'E'}]}, 19)
self._inspector_socket.AddEvent('Tracing.tracingComplete', {}, 35)
backend = tracing_backend.TracingBackend(self._inspector_socket)
# The third response is 16 seconds after the second response, so we expect
# a TracingTimeoutException.
with self.assertRaises(tracing_backend.TracingTimeoutException):
backend._CollectTracingData(10)
self.assertEqual(2, len(backend._trace_events))
self.assertFalse(backend._has_received_all_tracing_data)
def testCollectTracingDataNoTimeout(self):
self._inspector_socket.AddEvent(
'Tracing.dataCollected', {'value': [{'ph': 'B'}]}, 9)
self._inspector_socket.AddEvent(
'Tracing.dataCollected', {'value': [{'ph': 'E'}]}, 14)
self._inspector_socket.AddEvent('Tracing.tracingComplete', {}, 19)
backend = tracing_backend.TracingBackend(self._inspector_socket)
backend._CollectTracingData(10)
self.assertEqual(2, len(backend._trace_events))
self.assertTrue(backend._has_received_all_tracing_data)
def testCollectTracingDataFromStream(self):
self._inspector_socket.AddEvent(
'Tracing.tracingComplete', {'stream': '42'}, 1)
self._inspector_socket.AddAsyncResponse(
'IO.read', {'data': '[{},{},{'}, 2)
self._inspector_socket.AddAsyncResponse(
'IO.read', {'data': '},{},{}]', 'eof': True}, 3)
backend = tracing_backend.TracingBackend(self._inspector_socket)
backend._CollectTracingData(10)
self.assertEqual(5, len(backend._trace_events))
self.assertTrue(backend._has_received_all_tracing_data)
def testDumpMemorySuccess(self):
self._inspector_socket.AddResponseHandler(
'Tracing.requestMemoryDump',
lambda req: {'result': {'success': True, 'dumpGuid': '42abc'}})
backend = tracing_backend.TracingBackend(self._inspector_socket)
self.assertEqual(backend.DumpMemory(), '42abc')
def testDumpMemoryFailure(self):
self._inspector_socket.AddResponseHandler(
'Tracing.requestMemoryDump',
lambda req: {'result': {'success': False, 'dumpGuid': '42abc'}})
backend = tracing_backend.TracingBackend(self._inspector_socket)
self.assertIsNone(backend.DumpMemory())
class DevToolsStreamPerformanceTest(unittest.TestCase):
def setUp(self):
self._mock_timer = simple_mock.MockTimer(tracing_backend)
self._inspector_socket = fakes.FakeInspectorWebsocket(self._mock_timer)
def _MeasureReadTime(self, count):
mock_time = self._mock_timer.time() + 1
payload = ','.join(['{}'] * 5000)
self._inspector_socket.AddAsyncResponse('IO.read', {'data': '[' + payload},
mock_time)
startClock = time.clock()
done = {'done': False}
def mark_done(data):
del data # unused
done['done'] = True
reader = _DevToolsStreamReader(self._inspector_socket, 'dummy')
reader.Read(mark_done)
while not done['done']:
mock_time += 1
if count > 0:
self._inspector_socket.AddAsyncResponse('IO.read', {'data': payload},
mock_time)
elif count == 0:
self._inspector_socket.AddAsyncResponse('IO.read',
{'data': payload + ']', 'eof': True}, mock_time)
count -= 1
self._inspector_socket.DispatchNotifications(10)
return time.clock() - startClock
def testReadTime(self):
t1k = self._MeasureReadTime(1000)
t10k = self._MeasureReadTime(10000)
# Time is an illusion, CPU time is doubly so, allow great deal of tolerance.
toleranceFactor = 5
self.assertLess(t10k / t1k, 10000 / 1000 * toleranceFactor)
|
[
"telemetry.timeline.tracing_config.TracingConfig",
"telemetry.testing.fakes.FakeInspectorWebsocket",
"telemetry.testing.simple_mock.MockTimer",
"telemetry.decorators.Disabled",
"telemetry.internal.backends.chrome_inspector.tracing_backend._DevToolsStreamReader",
"time.clock",
"telemetry.internal.backends.chrome_inspector.tracing_backend.TracingBackend",
"telemetry.timeline.model.TimelineModel"
] |
[((1510, 1536), 'telemetry.decorators.Disabled', 'decorators.Disabled', (['"""win"""'], {}), "('win')\n", (1529, 1536), False, 'from telemetry import decorators\n'), ((2956, 2982), 'telemetry.decorators.Disabled', 'decorators.Disabled', (['"""win"""'], {}), "('win')\n", (2975, 2982), False, 'from telemetry import decorators\n'), ((1787, 1817), 'telemetry.timeline.tracing_config.TracingConfig', 'tracing_config.TracingConfig', ([], {}), '()\n', (1815, 1817), False, 'from telemetry.timeline import tracing_config\n'), ((2615, 2653), 'telemetry.timeline.model.TimelineModel', 'model_module.TimelineModel', (['trace_data'], {}), '(trace_data)\n', (2641, 2653), True, 'from telemetry.timeline import model as model_module\n'), ((3234, 3264), 'telemetry.timeline.tracing_config.TracingConfig', 'tracing_config.TracingConfig', ([], {}), '()\n', (3262, 3264), False, 'from telemetry.timeline import tracing_config\n'), ((3722, 3760), 'telemetry.timeline.model.TimelineModel', 'model_module.TimelineModel', (['trace_data'], {}), '(trace_data)\n', (3748, 3760), True, 'from telemetry.timeline import model as model_module\n'), ((4032, 4070), 'telemetry.testing.simple_mock.MockTimer', 'simple_mock.MockTimer', (['tracing_backend'], {}), '(tracing_backend)\n', (4053, 4070), False, 'from telemetry.testing import simple_mock\n'), ((4100, 4146), 'telemetry.testing.fakes.FakeInspectorWebsocket', 'fakes.FakeInspectorWebsocket', (['self._mock_timer'], {}), '(self._mock_timer)\n', (4128, 4146), False, 'from telemetry.testing import fakes\n'), ((4529, 4583), 'telemetry.internal.backends.chrome_inspector.tracing_backend.TracingBackend', 'tracing_backend.TracingBackend', (['self._inspector_socket'], {}), '(self._inspector_socket)\n', (4559, 4583), False, 'from telemetry.internal.backends.chrome_inspector import tracing_backend\n'), ((5247, 5301), 'telemetry.internal.backends.chrome_inspector.tracing_backend.TracingBackend', 'tracing_backend.TracingBackend', (['self._inspector_socket'], {}), '(self._inspector_socket)\n', (5277, 5301), False, 'from telemetry.internal.backends.chrome_inspector import tracing_backend\n'), ((5796, 5850), 'telemetry.internal.backends.chrome_inspector.tracing_backend.TracingBackend', 'tracing_backend.TracingBackend', (['self._inspector_socket'], {}), '(self._inspector_socket)\n', (5826, 5850), False, 'from telemetry.internal.backends.chrome_inspector import tracing_backend\n'), ((6206, 6260), 'telemetry.internal.backends.chrome_inspector.tracing_backend.TracingBackend', 'tracing_backend.TracingBackend', (['self._inspector_socket'], {}), '(self._inspector_socket)\n', (6236, 6260), False, 'from telemetry.internal.backends.chrome_inspector import tracing_backend\n'), ((6521, 6575), 'telemetry.internal.backends.chrome_inspector.tracing_backend.TracingBackend', 'tracing_backend.TracingBackend', (['self._inspector_socket'], {}), '(self._inspector_socket)\n', (6551, 6575), False, 'from telemetry.internal.backends.chrome_inspector import tracing_backend\n'), ((6720, 6758), 'telemetry.testing.simple_mock.MockTimer', 'simple_mock.MockTimer', (['tracing_backend'], {}), '(tracing_backend)\n', (6741, 6758), False, 'from telemetry.testing import simple_mock\n'), ((6788, 6834), 'telemetry.testing.fakes.FakeInspectorWebsocket', 'fakes.FakeInspectorWebsocket', (['self._mock_timer'], {}), '(self._mock_timer)\n', (6816, 6834), False, 'from telemetry.testing import fakes\n'), ((7107, 7119), 'time.clock', 'time.clock', ([], {}), '()\n', (7117, 7119), False, 'import time\n'), ((7238, 7292), 'telemetry.internal.backends.chrome_inspector.tracing_backend._DevToolsStreamReader', '_DevToolsStreamReader', (['self._inspector_socket', '"""dummy"""'], {}), "(self._inspector_socket, 'dummy')\n", (7259, 7292), False, 'from telemetry.internal.backends.chrome_inspector.tracing_backend import _DevToolsStreamReader\n'), ((7716, 7728), 'time.clock', 'time.clock', ([], {}), '()\n', (7726, 7728), False, 'import time\n')]
|
from functools import partial
from catalyst import dl, SETTINGS
E2E = {
"de": dl.DeviceEngine,
"dp": dl.DataParallelEngine,
"ddp": dl.DistributedDataParallelEngine,
}
if SETTINGS.amp_required:
E2E.update(
{"amp-dp": dl.DataParallelAMPEngine, "amp-ddp": dl.DistributedDataParallelAMPEngine}
)
if SETTINGS.apex_required:
E2E.update(
{"apex-dp": dl.DataParallelAPEXEngine, "apex-ddp": dl.DistributedDataParallelAPEXEngine}
)
if SETTINGS.deepspeed_required:
E2E.update({"ds-ddp": dl.DistributedDataParallelDeepSpeedEngine})
if SETTINGS.fairscale_required:
E2E.update(
{
"fs-pp": dl.PipelineParallelFairScaleEngine,
"fs-ddp": dl.SharedDataParallelFairScaleEngine,
"fs-ddp-amp": dl.SharedDataParallelFairScaleAMPEngine,
# for some reason we could catch a bug with FairScale flatten wrapper here, so...
"fs-fddp": partial(
dl.FullySharedDataParallelFairScaleEngine, ddp_kwargs={"flatten_parameters": False}
),
}
)
if SETTINGS.xla_required:
E2E.update({"xla": dl.XLAEngine, "xla-ddp": dl.DistributedXLAEngine})
|
[
"functools.partial"
] |
[((933, 1030), 'functools.partial', 'partial', (['dl.FullySharedDataParallelFairScaleEngine'], {'ddp_kwargs': "{'flatten_parameters': False}"}), "(dl.FullySharedDataParallelFairScaleEngine, ddp_kwargs={\n 'flatten_parameters': False})\n", (940, 1030), False, 'from functools import partial\n')]
|
#!/usr/bin/env python
# Copyright 2018 by <NAME>
#
# https://github.com/martinmoene/kalman-estimator
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
import os
nt = 'double'
nt = 'fp32_t'
std = 'c++17'
opt = '-O2'
mcu = 'atmega328p'
fcpu = '16000000L'
filename = 'avr-kalman-sim.cpp'
verbose = '-vv'
include = '../../include'
tpl = 'python ../../script/avr-gcc.py {verbose} -std={std} {opt} -mmcu={mcu} -fcpu={fcpu} -DKE_NUMERIC_TYPE={nt} -I{include} {filename}'
cmd = tpl.format(nt=nt, verbose=verbose, std=std, opt=opt, mcu=mcu, fcpu=fcpu, include=include, filename=filename)
print( cmd )
os.system( cmd )
|
[
"os.system"
] |
[((734, 748), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (743, 748), False, 'import os\n')]
|
from django.contrib.auth.models import Permission
def assign_perm(perm, group):
"""
Assigns a permission to a group
"""
if not isinstance(perm, Permission):
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label, codename=codename)
group.permissions.add(perm)
return perm
def remove_perm(perm, group):
"""
Removes a permission from a group
"""
if not isinstance(perm, Permission):
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label, codename=codename)
group.permissions.remove(perm)
return
|
[
"django.contrib.auth.models.Permission.objects.get"
] |
[((441, 517), 'django.contrib.auth.models.Permission.objects.get', 'Permission.objects.get', ([], {'content_type__app_label': 'app_label', 'codename': 'codename'}), '(content_type__app_label=app_label, codename=codename)\n', (463, 517), False, 'from django.contrib.auth.models import Permission\n'), ((960, 1036), 'django.contrib.auth.models.Permission.objects.get', 'Permission.objects.get', ([], {'content_type__app_label': 'app_label', 'codename': 'codename'}), '(content_type__app_label=app_label, codename=codename)\n', (982, 1036), False, 'from django.contrib.auth.models import Permission\n')]
|
"""Definition for mockerena schema
.. codeauthor:: <NAME> <<EMAIL>>
"""
from copy import deepcopy
SCHEMA = {
"item_title": "schema",
"schema": {
"schema": {
"type": "string",
"minlength": 3,
"maxlength": 64,
"unique": True,
"required": True
},
"num_rows": {
"type": "integer",
"min": 1,
"default": 1000
},
"file_format": {
"type": "string",
"required": True
},
"file_name": {
"type": "string",
"minlength": 3,
"maxlength": 64,
"unique": True,
"required": True
},
"include_header": {"type": "boolean"},
"exclude_null": {"type": "boolean"},
"is_nested": {"type": "boolean"},
"delimiter": {"type": "string"},
"key_separator": {"type": "string"},
"quote_character": {"type": "string"},
"template": {"type": "string"},
"root_node": {"type": "string"},
"table_name": {
"type": "string"
},
"columns": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"type": {"type": "string"},
"name": {"type": "string"},
"format": {"type": "string"},
"args": {"type": "dict"},
"percent_empty": {
"type": "float",
"min": 0,
"max": 1
},
"truncate": {"type": "boolean"},
"function": {"type": "string"},
"description": {"type": "string"}
}
}
},
"responses": {
"type": "list",
"items": [
{
"type": "dict",
"schema": {
"status_code": {
"type": "integer",
"min": 100,
"max": 599
},
"headers": {"type": "dict", "allow_unknown": True},
"content_type": {"type": "string"},
"data": {"type": "string"},
"weight": {
"type": "integer",
"min": 1
}
}
}
]
}
},
"additional_lookup": {
"url": 'regex("[\\w]+")',
"field": "schema"
},
}
# Build a schema for custom_schema route
CUSTOM_SCHEMA = deepcopy(SCHEMA["schema"])
del CUSTOM_SCHEMA["schema"]["unique"]
del CUSTOM_SCHEMA["file_name"]["unique"]
|
[
"copy.deepcopy"
] |
[((2742, 2768), 'copy.deepcopy', 'deepcopy', (["SCHEMA['schema']"], {}), "(SCHEMA['schema'])\n", (2750, 2768), False, 'from copy import deepcopy\n')]
|
# Copyright 2020 Stanford University, Los Alamos National Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flexflow.keras.models import Model, Sequential
from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate
import flexflow.keras.optimizers
from flexflow.keras.datasets import mnist
from flexflow.keras.datasets import cifar10
from flexflow.keras import losses
from flexflow.keras import metrics
from flexflow.keras.callbacks import Callback, VerifyMetrics, EpochVerifyMetrics
from accuracy import ModelAccuracy
import flexflow.core as ff
import numpy as np
import argparse
import gc
from PIL import Image
def top_level_task():
num_samples = 10000
(x_train, y_train), (x_test, y_test) = cifar10.load_data(num_samples)
full_input_np = np.zeros((num_samples, 3, 229, 229), dtype=np.float32)
for i in range(0, num_samples):
image = x_train[i, :, :, :]
image = image.transpose(1, 2, 0)
pil_image = Image.fromarray(image)
pil_image = pil_image.resize((229,229), Image.NEAREST)
image = np.array(pil_image, dtype=np.float32)
image = image.transpose(2, 0, 1)
full_input_np[i, :, :, :] = image
if (i == 0):
print(image)
full_input_np /= 255
y_train = y_train.astype('int32')
full_label_np = y_train
input_tensor = Input(shape=(3, 229, 229), dtype="float32")
output = Conv2D(filters=64, input_shape=(3,229,229), kernel_size=(11,11), strides=(4,4), padding=(2,2), activation="relu")(input_tensor)
output = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding="valid")(output)
output = Conv2D(filters=192, kernel_size=(5,5), strides=(1,1), padding=(2,2), activation="relu")(output)
output = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding="valid")(output)
output = Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu")(output)
output = Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu")(output)
output = Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu")(output)
output = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding="valid")(output)
output = Flatten()(output)
output = Dense(4096, activation="relu")(output)
output = Dense(4096, activation="relu")(output)
output = Dense(10)(output)
output = Activation("softmax")(output)
model = Model(input_tensor, output)
opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy', 'sparse_categorical_crossentropy'])
print(model.summary())
model.fit(full_input_np, full_label_np, epochs=40, callbacks=[VerifyMetrics(ModelAccuracy.CIFAR10_ALEXNET), EpochVerifyMetrics(ModelAccuracy.CIFAR10_ALEXNET)])
if __name__ == "__main__":
print("Functional API, cifar10 alexnet")
top_level_task()
gc.collect()
|
[
"flexflow.keras.datasets.cifar10.load_data",
"flexflow.keras.models.Model",
"flexflow.keras.layers.Dense",
"flexflow.keras.callbacks.VerifyMetrics",
"flexflow.keras.layers.MaxPooling2D",
"numpy.zeros",
"flexflow.keras.layers.Input",
"flexflow.keras.layers.Flatten",
"gc.collect",
"flexflow.keras.layers.Activation",
"numpy.array",
"flexflow.keras.callbacks.EpochVerifyMetrics",
"PIL.Image.fromarray",
"flexflow.keras.layers.Conv2D"
] |
[((1277, 1307), 'flexflow.keras.datasets.cifar10.load_data', 'cifar10.load_data', (['num_samples'], {}), '(num_samples)\n', (1294, 1307), False, 'from flexflow.keras.datasets import cifar10\n'), ((1327, 1381), 'numpy.zeros', 'np.zeros', (['(num_samples, 3, 229, 229)'], {'dtype': 'np.float32'}), '((num_samples, 3, 229, 229), dtype=np.float32)\n', (1335, 1381), True, 'import numpy as np\n'), ((1856, 1899), 'flexflow.keras.layers.Input', 'Input', ([], {'shape': '(3, 229, 229)', 'dtype': '"""float32"""'}), "(shape=(3, 229, 229), dtype='float32')\n", (1861, 1899), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2925, 2952), 'flexflow.keras.models.Model', 'Model', (['input_tensor', 'output'], {}), '(input_tensor, output)\n', (2930, 2952), False, 'from flexflow.keras.models import Model, Sequential\n'), ((3424, 3436), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3434, 3436), False, 'import gc\n'), ((1501, 1523), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (1516, 1523), False, 'from PIL import Image\n'), ((1595, 1632), 'numpy.array', 'np.array', (['pil_image'], {'dtype': 'np.float32'}), '(pil_image, dtype=np.float32)\n', (1603, 1632), True, 'import numpy as np\n'), ((1914, 2037), 'flexflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'input_shape': '(3, 229, 229)', 'kernel_size': '(11, 11)', 'strides': '(4, 4)', 'padding': '(2, 2)', 'activation': '"""relu"""'}), "(filters=64, input_shape=(3, 229, 229), kernel_size=(11, 11), strides\n =(4, 4), padding=(2, 2), activation='relu')\n", (1920, 2037), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2053, 2116), 'flexflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""valid"""'}), "(pool_size=(3, 3), strides=(2, 2), padding='valid')\n", (2065, 2116), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2134, 2228), 'flexflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(192)', 'kernel_size': '(5, 5)', 'strides': '(1, 1)', 'padding': '(2, 2)', 'activation': '"""relu"""'}), "(filters=192, kernel_size=(5, 5), strides=(1, 1), padding=(2, 2),\n activation='relu')\n", (2140, 2228), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2241, 2304), 'flexflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""valid"""'}), "(pool_size=(3, 3), strides=(2, 2), padding='valid')\n", (2253, 2304), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2322, 2416), 'flexflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(384)', 'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '(1, 1)', 'activation': '"""relu"""'}), "(filters=384, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1),\n activation='relu')\n", (2328, 2416), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2429, 2523), 'flexflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(256)', 'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '(1, 1)', 'activation': '"""relu"""'}), "(filters=256, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1),\n activation='relu')\n", (2435, 2523), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2536, 2630), 'flexflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(256)', 'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '(1, 1)', 'activation': '"""relu"""'}), "(filters=256, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1),\n activation='relu')\n", (2542, 2630), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2643, 2706), 'flexflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""valid"""'}), "(pool_size=(3, 3), strides=(2, 2), padding='valid')\n", (2655, 2706), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2724, 2733), 'flexflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2731, 2733), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2753, 2783), 'flexflow.keras.layers.Dense', 'Dense', (['(4096)'], {'activation': '"""relu"""'}), "(4096, activation='relu')\n", (2758, 2783), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2803, 2833), 'flexflow.keras.layers.Dense', 'Dense', (['(4096)'], {'activation': '"""relu"""'}), "(4096, activation='relu')\n", (2808, 2833), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2853, 2862), 'flexflow.keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (2858, 2862), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2882, 2903), 'flexflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (2892, 2903), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((3234, 3278), 'flexflow.keras.callbacks.VerifyMetrics', 'VerifyMetrics', (['ModelAccuracy.CIFAR10_ALEXNET'], {}), '(ModelAccuracy.CIFAR10_ALEXNET)\n', (3247, 3278), False, 'from flexflow.keras.callbacks import Callback, VerifyMetrics, EpochVerifyMetrics\n'), ((3280, 3329), 'flexflow.keras.callbacks.EpochVerifyMetrics', 'EpochVerifyMetrics', (['ModelAccuracy.CIFAR10_ALEXNET'], {}), '(ModelAccuracy.CIFAR10_ALEXNET)\n', (3298, 3329), False, 'from flexflow.keras.callbacks import Callback, VerifyMetrics, EpochVerifyMetrics\n')]
|
#!/usr/bin/env python
import asyncio
import logging
import hummingbot.connector.exchange.huobi.huobi_constants as CONSTANTS
from collections import defaultdict
from typing import (
Any,
Dict,
List,
Optional,
)
from hummingbot.connector.exchange.huobi.huobi_order_book import HuobiOrderBook
from hummingbot.connector.exchange.huobi.huobi_utils import (
convert_from_exchange_trading_pair,
convert_to_exchange_trading_pair,
build_api_factory,
)
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.order_book_message import OrderBookMessage
from hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource
from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest, RESTResponse, WSRequest
from hummingbot.core.web_assistant.rest_assistant import RESTAssistant
from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory
from hummingbot.core.web_assistant.ws_assistant import WSAssistant
from hummingbot.logger import HummingbotLogger
class HuobiAPIOrderBookDataSource(OrderBookTrackerDataSource):
MESSAGE_TIMEOUT = 30.0
PING_TIMEOUT = 10.0
HEARTBEAT_INTERVAL = 30.0 # seconds
ORDER_BOOK_SNAPSHOT_DELAY = 60 * 60 # expressed in seconds
TRADE_CHANNEL_SUFFIX = "trade.detail"
ORDERBOOK_CHANNEL_SUFFIX = "depth.step0"
_haobds_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._haobds_logger is None:
cls._haobds_logger = logging.getLogger(__name__)
return cls._haobds_logger
def __init__(self,
trading_pairs: List[str],
api_factory: Optional[WebAssistantsFactory] = None,
):
super().__init__(trading_pairs)
self._api_factory = api_factory or build_api_factory()
self._rest_assistant: Optional[RESTAssistant] = None
self._ws_assistant: Optional[WSAssistant] = None
self._message_queue: Dict[str, asyncio.Queue] = defaultdict(asyncio.Queue)
async def _get_rest_assistant(self) -> RESTAssistant:
if self._rest_assistant is None:
self._rest_assistant = await self._api_factory.get_rest_assistant()
return self._rest_assistant
async def _get_ws_assistant(self) -> WSAssistant:
if self._ws_assistant is None:
self._ws_assistant = await self._api_factory.get_ws_assistant()
return self._ws_assistant
@classmethod
async def get_last_traded_prices(cls, trading_pairs: List[str]) -> Dict[str, float]:
api_factory = build_api_factory()
rest_assistant = await api_factory.get_rest_assistant()
url = CONSTANTS.REST_URL + CONSTANTS.TICKER_URL
request = RESTRequest(method=RESTMethod.GET,
url=url)
response: RESTResponse = await rest_assistant.call(request=request)
results = dict()
resp_json = await response.json()
for trading_pair in trading_pairs:
resp_record = [o for o in resp_json["data"] if o["symbol"] == convert_to_exchange_trading_pair(trading_pair)][0]
results[trading_pair] = float(resp_record["close"])
return results
@staticmethod
async def fetch_trading_pairs() -> List[str]:
try:
api_factory = build_api_factory()
rest_assistant = await api_factory.get_rest_assistant()
url = CONSTANTS.REST_URL + CONSTANTS.API_VERSION + CONSTANTS.SYMBOLS_URL
request = RESTRequest(method=RESTMethod.GET,
url=url)
response: RESTResponse = await rest_assistant.call(request=request)
if response.status == 200:
all_symbol_infos: Dict[str, Any] = await response.json()
return [f"{symbol_info['base-currency']}-{symbol_info['quote-currency']}".upper()
for symbol_info in all_symbol_infos["data"]
if symbol_info["state"] == "online"]
except Exception:
# Do nothing if the request fails -- there will be no autocomplete for huobi trading pairs
pass
return []
async def get_snapshot(self, trading_pair: str) -> Dict[str, Any]:
rest_assistant = await self._get_rest_assistant()
url = CONSTANTS.REST_URL + CONSTANTS.DEPTH_URL
# when type is set to "step0", the default value of "depth" is 150
params: Dict = {"symbol": convert_to_exchange_trading_pair(trading_pair), "type": "step0"}
request = RESTRequest(method=RESTMethod.GET,
url=url,
params=params)
response: RESTResponse = await rest_assistant.call(request=request)
if response.status != 200:
raise IOError(f"Error fetching Huobi market snapshot for {trading_pair}. "
f"HTTP status is {response.status}.")
snapshot_data: Dict[str, Any] = await response.json()
return snapshot_data
async def get_new_order_book(self, trading_pair: str) -> OrderBook:
snapshot: Dict[str, Any] = await self.get_snapshot(trading_pair)
timestamp = snapshot["tick"]["ts"]
snapshot_msg: OrderBookMessage = HuobiOrderBook.snapshot_message_from_exchange(
msg=snapshot,
timestamp=timestamp,
metadata={"trading_pair": trading_pair},
)
order_book: OrderBook = self.order_book_create_function()
order_book.apply_snapshot(snapshot_msg.bids, snapshot_msg.asks, snapshot_msg.update_id)
return order_book
async def _subscribe_channels(self, ws: WSAssistant):
try:
for trading_pair in self._trading_pairs:
subscribe_orderbook_request: WSRequest = WSRequest({
"sub": f"market.{convert_to_exchange_trading_pair(trading_pair)}.depth.step0",
"id": convert_to_exchange_trading_pair(trading_pair)
})
subscribe_trade_request: WSRequest = WSRequest({
"sub": f"market.{convert_to_exchange_trading_pair(trading_pair)}.trade.detail",
"id": convert_to_exchange_trading_pair(trading_pair)
})
await ws.send(subscribe_orderbook_request)
await ws.send(subscribe_trade_request)
self.logger().info("Subscribed to public orderbook and trade channels...")
except asyncio.CancelledError:
raise
except Exception:
self.logger().error(
"Unexpected error occurred subscribing to order book trading and delta streams...", exc_info=True
)
raise
async def listen_for_subscriptions(self):
ws = None
while True:
try:
ws: WSAssistant = await self._get_ws_assistant()
await ws.connect(ws_url=CONSTANTS.WS_PUBLIC_URL, ping_timeout=self.HEARTBEAT_INTERVAL)
await self._subscribe_channels(ws)
async for ws_response in ws.iter_messages():
data = ws_response.data
if "subbed" in data:
continue
if "ping" in data:
ping_request = WSRequest(payload={
"pong": data["ping"]
})
await ws.send(request=ping_request)
channel = data.get("ch", "")
if channel.endswith(self.TRADE_CHANNEL_SUFFIX):
self._message_queue[self.TRADE_CHANNEL_SUFFIX].put_nowait(data)
if channel.endswith(self.ORDERBOOK_CHANNEL_SUFFIX):
self._message_queue[self.ORDERBOOK_CHANNEL_SUFFIX].put_nowait(data)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error(
"Unexpected error occurred when listening to order book streams. Retrying in 5 seconds...",
exc_info=True,
)
await self._sleep(5.0)
finally:
ws and await ws.disconnect()
async def listen_for_trades(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
message_queue = self._message_queue[self.TRADE_CHANNEL_SUFFIX]
while True:
try:
msg: Dict[str, Any] = await message_queue.get()
trading_pair = msg["ch"].split(".")[1]
timestamp = msg["tick"]["ts"]
for data in msg["tick"]["data"]:
trade_message: OrderBookMessage = HuobiOrderBook.trade_message_from_exchange(
msg=data,
timestamp=timestamp,
metadata={"trading_pair": convert_from_exchange_trading_pair(trading_pair)}
)
output.put_nowait(trade_message)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error with WebSocket connection. Retrying after 30 seconds...",
exc_info=True)
await self._sleep(30.0)
async def listen_for_order_book_diffs(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
message_queue = self._message_queue[self.ORDERBOOK_CHANNEL_SUFFIX]
while True:
try:
msg: Dict[str, Any] = await message_queue.get()
timestamp = msg["tick"]["ts"]
order_book_message: OrderBookMessage = HuobiOrderBook.diff_message_from_exchange(
msg=msg,
timestamp=timestamp
)
output.put_nowait(order_book_message)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error with WebSocket connection. Retrying after 30 seconds...",
exc_info=True)
await self._sleep(30.0)
async def listen_for_order_book_snapshots(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
while True:
await self._sleep(self.ORDER_BOOK_SNAPSHOT_DELAY)
try:
for trading_pair in self._trading_pairs:
snapshot: Dict[str, Any] = await self.get_snapshot(trading_pair)
snapshot_message: OrderBookMessage = HuobiOrderBook.snapshot_message_from_exchange(
snapshot,
timestamp=snapshot["tick"]["ts"],
metadata={"trading_pair": trading_pair},
)
output.put_nowait(snapshot_message)
self.logger().debug(f"Saved order book snapshot for {trading_pair}")
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error listening for orderbook snapshots. Retrying in 5 secs...", exc_info=True)
await self._sleep(5.0)
|
[
"hummingbot.connector.exchange.huobi.huobi_order_book.HuobiOrderBook.diff_message_from_exchange",
"hummingbot.core.web_assistant.connections.data_types.RESTRequest",
"hummingbot.connector.exchange.huobi.huobi_utils.convert_to_exchange_trading_pair",
"hummingbot.connector.exchange.huobi.huobi_order_book.HuobiOrderBook.snapshot_message_from_exchange",
"collections.defaultdict",
"hummingbot.connector.exchange.huobi.huobi_utils.convert_from_exchange_trading_pair",
"hummingbot.core.web_assistant.connections.data_types.WSRequest",
"hummingbot.connector.exchange.huobi.huobi_utils.build_api_factory",
"logging.getLogger"
] |
[((2082, 2108), 'collections.defaultdict', 'defaultdict', (['asyncio.Queue'], {}), '(asyncio.Queue)\n', (2093, 2108), False, 'from collections import defaultdict\n'), ((2658, 2677), 'hummingbot.connector.exchange.huobi.huobi_utils.build_api_factory', 'build_api_factory', ([], {}), '()\n', (2675, 2677), False, 'from hummingbot.connector.exchange.huobi.huobi_utils import convert_from_exchange_trading_pair, convert_to_exchange_trading_pair, build_api_factory\n'), ((2817, 2860), 'hummingbot.core.web_assistant.connections.data_types.RESTRequest', 'RESTRequest', ([], {'method': 'RESTMethod.GET', 'url': 'url'}), '(method=RESTMethod.GET, url=url)\n', (2828, 2860), False, 'from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest, RESTResponse, WSRequest\n'), ((4635, 4693), 'hummingbot.core.web_assistant.connections.data_types.RESTRequest', 'RESTRequest', ([], {'method': 'RESTMethod.GET', 'url': 'url', 'params': 'params'}), '(method=RESTMethod.GET, url=url, params=params)\n', (4646, 4693), False, 'from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest, RESTResponse, WSRequest\n'), ((5338, 5464), 'hummingbot.connector.exchange.huobi.huobi_order_book.HuobiOrderBook.snapshot_message_from_exchange', 'HuobiOrderBook.snapshot_message_from_exchange', ([], {'msg': 'snapshot', 'timestamp': 'timestamp', 'metadata': "{'trading_pair': trading_pair}"}), "(msg=snapshot, timestamp=\n timestamp, metadata={'trading_pair': trading_pair})\n", (5383, 5464), False, 'from hummingbot.connector.exchange.huobi.huobi_order_book import HuobiOrderBook\n'), ((1587, 1614), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1604, 1614), False, 'import logging\n'), ((1888, 1907), 'hummingbot.connector.exchange.huobi.huobi_utils.build_api_factory', 'build_api_factory', ([], {}), '()\n', (1905, 1907), False, 'from hummingbot.connector.exchange.huobi.huobi_utils import convert_from_exchange_trading_pair, convert_to_exchange_trading_pair, build_api_factory\n'), ((3398, 3417), 'hummingbot.connector.exchange.huobi.huobi_utils.build_api_factory', 'build_api_factory', ([], {}), '()\n', (3415, 3417), False, 'from hummingbot.connector.exchange.huobi.huobi_utils import convert_from_exchange_trading_pair, convert_to_exchange_trading_pair, build_api_factory\n'), ((3594, 3637), 'hummingbot.core.web_assistant.connections.data_types.RESTRequest', 'RESTRequest', ([], {'method': 'RESTMethod.GET', 'url': 'url'}), '(method=RESTMethod.GET, url=url)\n', (3605, 3637), False, 'from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest, RESTResponse, WSRequest\n'), ((4552, 4598), 'hummingbot.connector.exchange.huobi.huobi_utils.convert_to_exchange_trading_pair', 'convert_to_exchange_trading_pair', (['trading_pair'], {}), '(trading_pair)\n', (4584, 4598), False, 'from hummingbot.connector.exchange.huobi.huobi_utils import convert_from_exchange_trading_pair, convert_to_exchange_trading_pair, build_api_factory\n'), ((9755, 9826), 'hummingbot.connector.exchange.huobi.huobi_order_book.HuobiOrderBook.diff_message_from_exchange', 'HuobiOrderBook.diff_message_from_exchange', ([], {'msg': 'msg', 'timestamp': 'timestamp'}), '(msg=msg, timestamp=timestamp)\n', (9796, 9826), False, 'from hummingbot.connector.exchange.huobi.huobi_order_book import HuobiOrderBook\n'), ((10644, 10779), 'hummingbot.connector.exchange.huobi.huobi_order_book.HuobiOrderBook.snapshot_message_from_exchange', 'HuobiOrderBook.snapshot_message_from_exchange', (['snapshot'], {'timestamp': "snapshot['tick']['ts']", 'metadata': "{'trading_pair': trading_pair}"}), "(snapshot, timestamp=snapshot[\n 'tick']['ts'], metadata={'trading_pair': trading_pair})\n", (10689, 10779), False, 'from hummingbot.connector.exchange.huobi.huobi_order_book import HuobiOrderBook\n'), ((6014, 6060), 'hummingbot.connector.exchange.huobi.huobi_utils.convert_to_exchange_trading_pair', 'convert_to_exchange_trading_pair', (['trading_pair'], {}), '(trading_pair)\n', (6046, 6060), False, 'from hummingbot.connector.exchange.huobi.huobi_utils import convert_from_exchange_trading_pair, convert_to_exchange_trading_pair, build_api_factory\n'), ((6271, 6317), 'hummingbot.connector.exchange.huobi.huobi_utils.convert_to_exchange_trading_pair', 'convert_to_exchange_trading_pair', (['trading_pair'], {}), '(trading_pair)\n', (6303, 6317), False, 'from hummingbot.connector.exchange.huobi.huobi_utils import convert_from_exchange_trading_pair, convert_to_exchange_trading_pair, build_api_factory\n'), ((7379, 7420), 'hummingbot.core.web_assistant.connections.data_types.WSRequest', 'WSRequest', ([], {'payload': "{'pong': data['ping']}"}), "(payload={'pong': data['ping']})\n", (7388, 7420), False, 'from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest, RESTResponse, WSRequest\n'), ((3152, 3198), 'hummingbot.connector.exchange.huobi.huobi_utils.convert_to_exchange_trading_pair', 'convert_to_exchange_trading_pair', (['trading_pair'], {}), '(trading_pair)\n', (3184, 3198), False, 'from hummingbot.connector.exchange.huobi.huobi_utils import convert_from_exchange_trading_pair, convert_to_exchange_trading_pair, build_api_factory\n'), ((5926, 5972), 'hummingbot.connector.exchange.huobi.huobi_utils.convert_to_exchange_trading_pair', 'convert_to_exchange_trading_pair', (['trading_pair'], {}), '(trading_pair)\n', (5958, 5972), False, 'from hummingbot.connector.exchange.huobi.huobi_utils import convert_from_exchange_trading_pair, convert_to_exchange_trading_pair, build_api_factory\n'), ((6182, 6228), 'hummingbot.connector.exchange.huobi.huobi_utils.convert_to_exchange_trading_pair', 'convert_to_exchange_trading_pair', (['trading_pair'], {}), '(trading_pair)\n', (6214, 6228), False, 'from hummingbot.connector.exchange.huobi.huobi_utils import convert_from_exchange_trading_pair, convert_to_exchange_trading_pair, build_api_factory\n'), ((8950, 8998), 'hummingbot.connector.exchange.huobi.huobi_utils.convert_from_exchange_trading_pair', 'convert_from_exchange_trading_pair', (['trading_pair'], {}), '(trading_pair)\n', (8984, 8998), False, 'from hummingbot.connector.exchange.huobi.huobi_utils import convert_from_exchange_trading_pair, convert_to_exchange_trading_pair, build_api_factory\n')]
|
#!/usr/bin/env python3
import argparse
import logging
import varifier
def main(args=None):
parser = argparse.ArgumentParser(
prog="varifier",
usage="varifier <command> <options>",
description="varifier: variant call adjudication",
)
parser.add_argument("--version", action="version", version=varifier.__version__)
parser.add_argument(
"--debug",
help="More verbose logging, and less file cleaning",
action="store_true",
)
subparsers = parser.add_subparsers(title="Available commands", help="", metavar="")
# ---------------------- make_truth_vcf ------------------------------------
subparser_make_truth_vcf = subparsers.add_parser(
"make_truth_vcf",
help="Make truth VCF file",
usage="varifier make_truth_vcf [options] <truth_fasta> <ref_fasta> <outdir>",
description="Make truth VCF file",
)
subparser_make_truth_vcf.add_argument(
"truth_fasta", help="FASTA file of truth genome"
)
subparser_make_truth_vcf.add_argument(
"ref_fasta", help="FASTA file of reference genome"
)
subparser_make_truth_vcf.add_argument(
"--snps_only", help="Output SNPs only",
action="store_true",
)
subparser_make_truth_vcf.add_argument(
"--output_probes_in_VCF", help="If REF and ALT probes should be output in VCF",
action="store_true",
)
subparser_make_truth_vcf.add_argument(
"--detailed_VCF", help="Outputs all fields computed by varifier in the final VCF, instead of only GT",
action="store_true",
)
subparser_make_truth_vcf.add_argument(
"--max_recall_ref_len",
help="Do not include variants where REF length is more than this number. Default is no limit",
type=int,
metavar="INT",
)
subparser_make_truth_vcf.add_argument(
"--flank_length",
help="Length of sequence to add either side of variant when making probe sequences [%(default)s]",
type=int,
default=100,
metavar="INT",
)
subparser_make_truth_vcf.add_argument(
"--truth_mask",
help="BED file of truth genome regions to mask. Any variants in the VCF matching to the mask are flagged and will not count towards precision or recall if the output VCF is used with vcf_eval",
metavar="FILENAME",
)
subparser_make_truth_vcf.add_argument("outdir", help="Name of output directory")
subparser_make_truth_vcf.set_defaults(func=varifier.tasks.make_truth_vcf.run)
# ------------------------ vcf_eval ----------------------------------------
subparser_vcf_eval = subparsers.add_parser(
"vcf_eval",
help="Evaluate VCF file",
usage="varifier vcf_eval [options] <truth_fasta> <vcf_fasta> <vcf_file> <outdir>",
description="Evaluate VCF file",
)
subparser_vcf_eval.add_argument(
"--flank_length",
help="Length of sequence to add either side of variant when making probe sequences [%(default)s]",
type=int,
default=100,
metavar="INT",
)
subparser_vcf_eval.add_argument(
"--force", help="Replace outdir if it already exists", action="store_true"
)
subparser_vcf_eval.add_argument(
"--ref_mask",
help="BED file of ref regions to mask. Any variants in the VCF overlapping the mask are removed at the start of the pipeline",
metavar="FILENAME",
)
subparser_vcf_eval.add_argument(
"--truth_mask",
help="BED file of truth genome regions to mask. Any variants in the VCF matching to the mask are flagged and do not count towards precision or recall",
metavar="FILENAME",
)
subparser_vcf_eval.add_argument(
"--truth_vcf",
help="VCF file of variant calls between vcf_fasta and truth_fasta, where reference of this VCF file is truth_fasta. If provided, used to calculate recall",
metavar="FILENAME",
)
subparser_vcf_eval.add_argument(
"--max_recall_ref_len",
help="For recall, do not look for expected variants where REF length is more than this number. Default is no limit. This option will not work if you use --truth_vcf",
type=int,
metavar="INT",
)
subparser_vcf_eval.add_argument(
"--use_ref_calls",
help="Include 0/0 genotype calls when calculating TPs and precision. By default they are ignored",
action="store_true",
)
subparser_vcf_eval.add_argument("truth_fasta", help="FASTA file of truth genome")
subparser_vcf_eval.add_argument(
"vcf_fasta", help="FASTA file corresponding to vcf_file"
)
subparser_vcf_eval.add_argument("vcf_in", help="VCF file to evaluate")
subparser_vcf_eval.add_argument("outdir", help="Name of output directory")
subparser_vcf_eval.set_defaults(func=varifier.tasks.vcf_eval.run)
args = parser.parse_args()
log = logging.getLogger()
if args.debug:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
if hasattr(args, "func"):
args.func(args)
else:
parser.print_help()
if __name__ == "__main__":
main()
|
[
"argparse.ArgumentParser",
"logging.getLogger"
] |
[((107, 246), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""varifier"""', 'usage': '"""varifier <command> <options>"""', 'description': '"""varifier: variant call adjudication"""'}), "(prog='varifier', usage=\n 'varifier <command> <options>', description=\n 'varifier: variant call adjudication')\n", (130, 246), False, 'import argparse\n'), ((4943, 4962), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (4960, 4962), False, 'import logging\n')]
|
# Generated by Django 3.0.2 on 2020-01-13 19:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='account',
name='statut',
field=models.CharField(choices=[('PROFESSOR', 'PROFESSOR'), ('STUDENT', 'STUDENT')], default='STUDENT', max_length=10),
),
]
|
[
"django.db.models.CharField"
] |
[((324, 441), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('PROFESSOR', 'PROFESSOR'), ('STUDENT', 'STUDENT')]", 'default': '"""STUDENT"""', 'max_length': '(10)'}), "(choices=[('PROFESSOR', 'PROFESSOR'), ('STUDENT', 'STUDENT'\n )], default='STUDENT', max_length=10)\n", (340, 441), False, 'from django.db import migrations, models\n')]
|
import pytest
from decharges.parametre.models import ParametresDApplication
pytestmark = pytest.mark.django_db
def test_instanciate_parameters():
params = ParametresDApplication.objects.create()
assert f"{params}" == "Paramètres de l'application"
|
[
"decharges.parametre.models.ParametresDApplication.objects.create"
] |
[((163, 202), 'decharges.parametre.models.ParametresDApplication.objects.create', 'ParametresDApplication.objects.create', ([], {}), '()\n', (200, 202), False, 'from decharges.parametre.models import ParametresDApplication\n')]
|
import numpy as np
import torch
import torch.nn as nn
# Adapted from https://github.com/gpeyre/SinkhornAutoDiff
# Adapted from https://github.com/gpeyre/SinkhornAutoDiff/blob/master/sinkhorn_pointcloud.py
class GTOT(nn.Module):
r"""
GTOT implementation.
"""
def __init__(self, eps=0.1, thresh=0.1, max_iter=100, reduction='none'):
super(GTOT, self).__init__()
self.eps = eps
self.max_iter = max_iter
self.reduction = reduction
self.thresh = thresh
self.mask_matrix = None
def marginal_prob_unform(self, N_s=None, N_t=None, mask=None, ):
if mask is not None:
mask = mask.float()
# uniform distribution
mask_mean = (1 / mask.sum(1)).unsqueeze(1)
mu = mask * mask_mean # 1/n
# mu = mu.unsqueeze(2)
else:
mu = torch.ones(self.bs, N_s) / N_s
nu = mu.clone().detach()
return mu, nu
def forward(self, x, y, C=None, A=None, mask=None):
# The Sinkhorn algorithm takes as input three variables :
if C is None:
C = self._cost_matrix(x, y) # Wasserstein cost function
C = C / C.max()
if A is not None:
if A.type().startswith('torch.cuda.sparse'):
self.sparse = True
C = A.to_dense() * C
else:
self.sparse = False
C = A * C
N_s = x.shape[-2]
N_t = y.shape[-2]
if x.dim() == 2:
self.bs = 1
else:
self.bs = x.shape[0]
# both marginals are fixed with equal weights
if mask is None:
mu = torch.empty(self.bs, N_s, dtype=torch.float, device=C.device,
requires_grad=False).fill_(1.0 / N_s).squeeze()
nu = torch.empty(self.bs, N_t, dtype=torch.float, device=C.device,
requires_grad=False).fill_(1.0 / N_t).squeeze()
else:
mu, nu = self.marginal_prob_unform(N_s=N_s, N_t=N_t, mask=mask)
u = torch.zeros_like(mu)
v = torch.zeros_like(nu)
# To check if algorithm terminates because of threshold
# or max iterations reached
actual_nits = 0
# Stopping criterion
thresh = self.thresh
# Sinkhorn iterations
for i in range(self.max_iter):
u1 = u # useful to check the update
if mask is None:
u = self.eps * (torch.log(mu + 1e-8) - self.log_sum(self.exp_M(C, u, v, A=A), dim=-1)) + u
v = self.eps * (
torch.log(nu + 1e-8) - self.log_sum(self.exp_M(C, u, v, A=A).transpose(-2, -1), dim=-1)) + v
else:
u = self.eps * (torch.log(mu + 1e-8) - self.log_sum(self.exp_M(C, u, v, A=A), dim=-1)) + u
u = mask * u
v = self.eps * (
torch.log(nu + 1e-8) - self.log_sum(self.exp_M(C, u, v, A=A).transpose(-2, -1), dim=-1)) + v
v = mask * v
# err = (u - u1).abs().sum(-1).mean()
err = (u - u1).abs().sum(-1).max()
actual_nits += 1
if err.item() < thresh:
break
U, V = u, v
pi = self.exp_M(C, U, V, A=A)
cost = torch.sum(pi * C, dim=(-2, -1))
if self.reduction == 'mean':
cost = cost.mean()
elif self.reduction == 'sum':
cost = cost.sum()
if torch.isnan(cost.sum()):
print(pi)
raise
return cost, pi, C
def M(self, C, u, v, A=None):
"Modified cost for logarithmic updates"
"$M_{ij} = (-c_{ij} + u_i + v_j) / \epsilon$"
S = (-C + u.unsqueeze(-1) + v.unsqueeze(-2)) / self.eps
return S
def exp_M(self, C, u, v, A=None):
if A is not None:
if self.sparse:
a = A.to_dense()
S = torch.exp(self.M(C, u, v)).masked_fill(mask = (1-a).to(torch.bool),value=0)
else:
S = torch.exp(self.M(C, u, v)).masked_fill(mask = (1-A).to(torch.bool),value=0)
return S
elif self.mask_matrix is not None:
return self.mask_matrix * torch.exp(self.M(C, u, v))
else:
return torch.exp(self.M(C, u, v))
def log_sum(self, input_tensor, dim=-1, mask=None):
s = torch.sum(input_tensor, dim=dim)
out = torch.log(1e-8 + s)
if torch.isnan(out.sum()):
raise
if mask is not None:
out = mask * out
return out
def cost_matrix_batch_torch(self, x, y, mask=None):
"Returns the cosine distance batchwise"
# x is the source feature: bs * d * m
# y is the target feature: bs * d * m
# return: bs * n * m
# print(x.size())
bs = list(x.size())[0]
D = x.size(1)
assert (x.size(1) == y.size(1))
x = x.contiguous().view(bs, D, -1) # bs * d * m
x = x.div(torch.norm(x, p=2, dim=1, keepdim=True) + 1e-12)
y = y.div(torch.norm(y, p=2, dim=1, keepdim=True) + 1e-12)
cos_dis = torch.bmm(torch.transpose(x, 1, 2), y) # .transpose(1,2)
cos_dis = 1 - cos_dis # to minimize this value
# cos_dis = - cos_dis
if mask is not None:
mask0 = mask.unsqueeze(2).clone().float()
self.mask_matrix = torch.bmm(mask0, (mask0.transpose(2, 1))) # torch.ones_like(C)
cos_dis = cos_dis * self.mask_matrix
if torch.isnan(cos_dis.sum()):
raise
return cos_dis.transpose(2, 1)
def cost_matrix_torch(self, x, y):
"Returns the cosine distance"
# x is the image embedding
# y is the text embedding
D = x.size(0)
x = x.view(D, -1)
assert (x.size(0) == y.size(0))
x = x.div(torch.norm(x, p=2, dim=0, keepdim=True) + 1e-12)
y = y.div(torch.norm(y, p=2, dim=0, keepdim=True) + 1e-12)
cos_dis = torch.mm(torch.transpose(y, 0, 1), x) # .t()
cos_dis = 1 - cos_dis # to minimize this value
return cos_dis
@staticmethod
def _cost_matrix(x, y, p=2):
"Returns the matrix of $|x_i-y_j|^p$."
x_col = x.unsqueeze(-2)
y_lin = y.unsqueeze(-3)
C = torch.sum((torch.abs(x_col - y_lin)) ** p, -1)
return C
@staticmethod
def ave(u, u1, tau):
"Barycenter subroutine, used by kinetic acceleration through extrapolation."
return tau * u + (1 - tau) * u1
if __name__ == '__main__':
def random_A(n, dense_rate=0.5):
d = n
rand_mat = torch.rand(n, d)
k = round(dense_rate * d) # For the general case change 0.25 to the percentage you need
k_th_quant = torch.topk(rand_mat, k, largest=False)[0][:, -1:]
bool_tensor = rand_mat <= k_th_quant
desired_tensor = torch.where(bool_tensor, torch.tensor(1), torch.tensor(0))
return desired_tensor
n = 5
batch_size = 2
a = np.array([[[i, 0] for i in range(n)] for b in range(batch_size)])
b = np.array([[[i, b + 1] for i in range(n)] for b in range(batch_size)])
# Wrap with torch tensors
x = torch.tensor(a, dtype=torch.float)
y = torch.tensor(b, dtype=torch.float)
x = x.cuda()
y = y.cuda()
for i in np.array(range(2, 11)) * 0.1:
dense_rate = i
print('Adjacent matrix dense_rate', dense_rate, end=' ')
A = random_A(n, dense_rate=dense_rate)
A[range(A.shape[0]), range(A.shape[0])] = 1
# A = torch.eye(n)
print(A)
A = A.repeat(batch_size, 1, 1)
A = A.cuda().to_sparse()
# A=None
sinkhorn = GTOT(eps=0.1, max_iter=100, reduction=None)
dist, P, C = sinkhorn(x, y, A=A)
print("Sinkhorn distances: ", dist)
|
[
"torch.ones",
"torch.topk",
"torch.zeros_like",
"torch.norm",
"torch.empty",
"torch.abs",
"torch.rand",
"torch.sum",
"torch.log",
"torch.tensor",
"torch.transpose"
] |
[((7204, 7238), 'torch.tensor', 'torch.tensor', (['a'], {'dtype': 'torch.float'}), '(a, dtype=torch.float)\n', (7216, 7238), False, 'import torch\n'), ((7247, 7281), 'torch.tensor', 'torch.tensor', (['b'], {'dtype': 'torch.float'}), '(b, dtype=torch.float)\n', (7259, 7281), False, 'import torch\n'), ((2077, 2097), 'torch.zeros_like', 'torch.zeros_like', (['mu'], {}), '(mu)\n', (2093, 2097), False, 'import torch\n'), ((2110, 2130), 'torch.zeros_like', 'torch.zeros_like', (['nu'], {}), '(nu)\n', (2126, 2130), False, 'import torch\n'), ((3314, 3345), 'torch.sum', 'torch.sum', (['(pi * C)'], {'dim': '(-2, -1)'}), '(pi * C, dim=(-2, -1))\n', (3323, 3345), False, 'import torch\n'), ((4401, 4433), 'torch.sum', 'torch.sum', (['input_tensor'], {'dim': 'dim'}), '(input_tensor, dim=dim)\n', (4410, 4433), False, 'import torch\n'), ((4448, 4468), 'torch.log', 'torch.log', (['(1e-08 + s)'], {}), '(1e-08 + s)\n', (4457, 4468), False, 'import torch\n'), ((6639, 6655), 'torch.rand', 'torch.rand', (['n', 'd'], {}), '(n, d)\n', (6649, 6655), False, 'import torch\n'), ((5162, 5186), 'torch.transpose', 'torch.transpose', (['x', '(1)', '(2)'], {}), '(x, 1, 2)\n', (5177, 5186), False, 'import torch\n'), ((6015, 6039), 'torch.transpose', 'torch.transpose', (['y', '(0)', '(1)'], {}), '(y, 0, 1)\n', (6030, 6039), False, 'import torch\n'), ((6919, 6934), 'torch.tensor', 'torch.tensor', (['(1)'], {}), '(1)\n', (6931, 6934), False, 'import torch\n'), ((6936, 6951), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (6948, 6951), False, 'import torch\n'), ((871, 895), 'torch.ones', 'torch.ones', (['self.bs', 'N_s'], {}), '(self.bs, N_s)\n', (881, 895), False, 'import torch\n'), ((5018, 5057), 'torch.norm', 'torch.norm', (['x'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(x, p=2, dim=1, keepdim=True)\n', (5028, 5057), False, 'import torch\n'), ((5085, 5124), 'torch.norm', 'torch.norm', (['y'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(y, p=2, dim=1, keepdim=True)\n', (5095, 5124), False, 'import torch\n'), ((5872, 5911), 'torch.norm', 'torch.norm', (['x'], {'p': '(2)', 'dim': '(0)', 'keepdim': '(True)'}), '(x, p=2, dim=0, keepdim=True)\n', (5882, 5911), False, 'import torch\n'), ((5939, 5978), 'torch.norm', 'torch.norm', (['y'], {'p': '(2)', 'dim': '(0)', 'keepdim': '(True)'}), '(y, p=2, dim=0, keepdim=True)\n', (5949, 5978), False, 'import torch\n'), ((6317, 6341), 'torch.abs', 'torch.abs', (['(x_col - y_lin)'], {}), '(x_col - y_lin)\n', (6326, 6341), False, 'import torch\n'), ((6774, 6812), 'torch.topk', 'torch.topk', (['rand_mat', 'k'], {'largest': '(False)'}), '(rand_mat, k, largest=False)\n', (6784, 6812), False, 'import torch\n'), ((1679, 1766), 'torch.empty', 'torch.empty', (['self.bs', 'N_s'], {'dtype': 'torch.float', 'device': 'C.device', 'requires_grad': '(False)'}), '(self.bs, N_s, dtype=torch.float, device=C.device, requires_grad\n =False)\n', (1690, 1766), False, 'import torch\n'), ((1835, 1922), 'torch.empty', 'torch.empty', (['self.bs', 'N_t'], {'dtype': 'torch.float', 'device': 'C.device', 'requires_grad': '(False)'}), '(self.bs, N_t, dtype=torch.float, device=C.device, requires_grad\n =False)\n', (1846, 1922), False, 'import torch\n'), ((2494, 2515), 'torch.log', 'torch.log', (['(mu + 1e-08)'], {}), '(mu + 1e-08)\n', (2503, 2515), False, 'import torch\n'), ((2626, 2647), 'torch.log', 'torch.log', (['(nu + 1e-08)'], {}), '(nu + 1e-08)\n', (2635, 2647), False, 'import torch\n'), ((2769, 2790), 'torch.log', 'torch.log', (['(mu + 1e-08)'], {}), '(mu + 1e-08)\n', (2778, 2790), False, 'import torch\n'), ((2930, 2951), 'torch.log', 'torch.log', (['(nu + 1e-08)'], {}), '(nu + 1e-08)\n', (2939, 2951), False, 'import torch\n')]
|
from admin_app_config import db
from models import (User, HazardSummary, HazardLocation)
from views.home_view import HomeView
from views.login_view import LoginView
from views.logout_view import LogoutView
from views.user_view import UserView
from views.mobile_view import (MobileLoginView, MobileView)
from views.user_dash_view import UserDashView
from views.business_dash_view import BusinessDashView
from views.hazard_summary_view import HazardSummaryView
from views.hazard_location_view import HazardLocationView
def add_admin_views(admin, app):
# Home View
admin.add_view(HomeView(name='Home', endpoint='home'))
# Mobile view handling
admin.add_view(MobileLoginView(
name='Mobile Login', endpoint='mobilelogin'))
admin.add_view(MobileView(name='Mobile', endpoint='mobile'))
# User dash view handling
admin.add_view(UserDashView(name='User Portal', endpoint='userdash',
app=app))
admin.add_view(BusinessDashView(name='Business Portal',
endpoint='businessdash', app=app))
# Admin portal views
admin.add_view(UserView(User, db.session, name='Users'))
admin.add_view(HazardSummaryView(
HazardSummary, db.session, name='Hazard Summary'))
admin.add_view(HazardLocationView(
HazardLocation, db.session, name='Hazard Locations'))
# Login and Logout views
admin.add_view(LoginView(name='Login', endpoint='login'))
admin.add_view(LogoutView(name='Logout', endpoint='logout'))
|
[
"views.mobile_view.MobileLoginView",
"views.mobile_view.MobileView",
"views.hazard_summary_view.HazardSummaryView",
"views.business_dash_view.BusinessDashView",
"views.home_view.HomeView",
"views.login_view.LoginView",
"views.user_view.UserView",
"views.logout_view.LogoutView",
"views.user_dash_view.UserDashView",
"views.hazard_location_view.HazardLocationView"
] |
[((587, 625), 'views.home_view.HomeView', 'HomeView', ([], {'name': '"""Home"""', 'endpoint': '"""home"""'}), "(name='Home', endpoint='home')\n", (595, 625), False, 'from views.home_view import HomeView\n'), ((674, 734), 'views.mobile_view.MobileLoginView', 'MobileLoginView', ([], {'name': '"""Mobile Login"""', 'endpoint': '"""mobilelogin"""'}), "(name='Mobile Login', endpoint='mobilelogin')\n", (689, 734), False, 'from views.mobile_view import MobileLoginView, MobileView\n'), ((764, 808), 'views.mobile_view.MobileView', 'MobileView', ([], {'name': '"""Mobile"""', 'endpoint': '"""mobile"""'}), "(name='Mobile', endpoint='mobile')\n", (774, 808), False, 'from views.mobile_view import MobileLoginView, MobileView\n'), ((860, 922), 'views.user_dash_view.UserDashView', 'UserDashView', ([], {'name': '"""User Portal"""', 'endpoint': '"""userdash"""', 'app': 'app'}), "(name='User Portal', endpoint='userdash', app=app)\n", (872, 922), False, 'from views.user_dash_view import UserDashView\n'), ((962, 1036), 'views.business_dash_view.BusinessDashView', 'BusinessDashView', ([], {'name': '"""Business Portal"""', 'endpoint': '"""businessdash"""', 'app': 'app'}), "(name='Business Portal', endpoint='businessdash', app=app)\n", (978, 1036), False, 'from views.business_dash_view import BusinessDashView\n'), ((1119, 1159), 'views.user_view.UserView', 'UserView', (['User', 'db.session'], {'name': '"""Users"""'}), "(User, db.session, name='Users')\n", (1127, 1159), False, 'from views.user_view import UserView\n'), ((1180, 1247), 'views.hazard_summary_view.HazardSummaryView', 'HazardSummaryView', (['HazardSummary', 'db.session'], {'name': '"""Hazard Summary"""'}), "(HazardSummary, db.session, name='Hazard Summary')\n", (1197, 1247), False, 'from views.hazard_summary_view import HazardSummaryView\n'), ((1277, 1348), 'views.hazard_location_view.HazardLocationView', 'HazardLocationView', (['HazardLocation', 'db.session'], {'name': '"""Hazard Locations"""'}), "(HazardLocation, db.session, name='Hazard Locations')\n", (1295, 1348), False, 'from views.hazard_location_view import HazardLocationView\n'), ((1408, 1449), 'views.login_view.LoginView', 'LoginView', ([], {'name': '"""Login"""', 'endpoint': '"""login"""'}), "(name='Login', endpoint='login')\n", (1417, 1449), False, 'from views.login_view import LoginView\n'), ((1470, 1514), 'views.logout_view.LogoutView', 'LogoutView', ([], {'name': '"""Logout"""', 'endpoint': '"""logout"""'}), "(name='Logout', endpoint='logout')\n", (1480, 1514), False, 'from views.logout_view import LogoutView\n')]
|
import os
from plugin import connection
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.decorators import operation
# TODO Are methods like `_get_path_to_key_file()` necessary, since we do not
# save keys on the local filesystem?
@operation
def creation_validation(**_):
""" This validates all nodes before bootstrap.
"""
key_file = _get_path_to_key_file()
key_file_in_filesystem = _search_for_key_file(key_file)
if ctx.node.properties['use_external_resource']:
if not key_file_in_filesystem:
raise NonRecoverableError(
'External resource, but the key file does not exist locally.')
try:
_get_key_pair_by_id(ctx.node.properties['resource_id'])
except NonRecoverableError as e:
raise NonRecoverableError(
'External resource, '
'but the key pair does not exist in the account: '
'{0}'.format(str(e)))
else:
if key_file_in_filesystem:
raise NonRecoverableError(
'Not external resource, '
'but the key file exists locally.')
try:
_get_key_pair_by_id(ctx.node.properties['resource_id'])
except NonRecoverableError:
pass
else:
raise NonRecoverableError(
'Not external resource, '
'but the key pair exists in the account.')
@operation
def create(**kwargs):
"""Creates a keypair."""
conn = connection.MistConnectionClient()
if _create_external_keypair():
return
key_pair_name = get_resource_id()
ctx.instance.runtime_properties["key"] = key_pair_name
ctx.instance.runtime_properties["mist_type"] = "keypair"
kp = conn.client.keys(search=key_pair_name)
if len(kp):
kp = kp[0]
return # if key already in mist.io, skip
else:
key_pair_name = ctx.node.properties["key_name"] # commented out in plugin?
private = conn.client.generate_key()
conn.client.add_key(key_name=key_pair_name, private=private)
conn.client.update_keys()
kp = conn.client.keys(search=key_pair_name)[0]
_save_key_pair(kp)
@operation
def delete(**kwargs):
"""Deletes a keypair."""
conn = connection.MistConnectionClient()
key_pair_name = get_external_resource_id_or_raise('delete key pair')
if _delete_external_keypair():
return
if key_pair_name:
try:
conn.client.keys(search=key_pair_name)[0].delete()
except Exception as exc:
raise NonRecoverableError('{0}'.format(str(exc)))
unassign_runtime_property_from_resource('mist_resource_id')
_delete_key_file()
ctx.logger.info('Deleted key pair: {0}'.format(key_pair_name))
else:
ctx.logger.info('Not deleting key pair from account')
def _create_external_keypair():
"""If use_external_resource is True, this will set the runtime_properties,
and then exit.
:param ctx: The Cloudify context.
:return False: Cloudify resource. Continue operation.
:return True: External resource. Set runtime_properties. Ignore operation.
:raises NonRecoverableError: If unable to locate the existing key file.
"""
if not use_external_resource(ctx.node.properties):
return False
ctx.instance.runtime_properties["mist_type"] = "keypair"
key_pair_name = ctx.node.properties['resource_id']
key_pair_in_account = _get_key_pair_by_id(key_pair_name)
key_path_in_filesystem = _get_path_to_key_file()
ctx.logger.debug(
'Path to key file: {0}.'.format(key_path_in_filesystem))
if not key_pair_in_account:
raise NonRecoverableError(
'External resource, but the key pair is not in the account.')
if not _search_for_key_file(key_path_in_filesystem):
_save_key_pair(key_pair_in_account)
ctx.instance.runtime_properties["key_id"] = key_pair_name
set_external_resource_id(key_pair_name)
return True
def _delete_external_keypair():
"""If use_external_resource is True, this will delete the runtime_properties,
and then exit.
:param ctx: The Cloudify context.
:return False: Cloudify resource. Continue operation.
:return True: External resource. Unset runtime_properties.
Ignore operation.
"""
if not use_external_resource(ctx.node.properties):
return False
ctx.logger.info('External resource. Not deleting keypair.')
unassign_runtime_property_from_resource(
"mist_resource_id")
return True
def _delete_key_file():
""" Deletes the key pair in the file specified in the blueprint.
:param ctx: The Cloudify context.
:raises NonRecoverableError: If unable to delete the local key file.
"""
key_path = _get_path_to_key_file()
if _search_for_key_file(key_path):
try:
os.remove(key_path)
except OSError as e:
raise NonRecoverableError(
'Unable to delete key pair: {0}.'
.format(str(e)))
def _save_key_pair(key_pair_object):
"""Saves a keypair to the filesystem.
:param key_pair_object: The key pair object as returned from create.
:param ctx: The Cloudify Context.
:raises NonRecoverableError: If private_key_path node property not set.
:raises NonRecoverableError: If Unable to save key file locally.
"""
ctx.logger.debug('Attempting to save the key_pair_object.')
if not key_pair_object.private:
raise NonRecoverableError(
'Cannot save key. KeyPair contains no private key.')
file_path = _get_path_to_key_file()
if not file_path:
return
if os.path.exists(file_path):
raise NonRecoverableError(
'{0} already exists, it will not be overwritten.'.format(
file_path))
fp = open(file_path, 'wb')
fp.write(key_pair_object.private)
fp.close()
_set_key_file_permissions(file_path)
def _set_key_file_permissions(key_file):
if os.access(key_file, os.W_OK):
os.chmod(key_file, 0o600)
else:
ctx.logger.error(
'Unable to set permissions key file: {0}.'.format(key_file))
def _get_key_pair_by_id(key_pair_id):
"""Returns the key pair object for a given key pair id.
:param key_pair_id: The ID of a keypair.
:returns The mist keypair object.
:raises NonRecoverableError: If Mist finds no matching key pairs.
"""
conn = connection.MistConnectionClient()
key_pairs = conn.client.keys(search=key_pair_id)
return key_pairs[0] if key_pairs else None
def _get_path_to_key_file():
"""Gets the path to the key file.
:param ctx: The Cloudify context.
:returns key_path: Path to the key file.
:raises NonRecoverableError: If private_key_path is not set.
"""
if not ctx.node.properties['private_key_path']:
ctx.logger.error('No private_key_path supplied. Moving on...')
return
return os.path.expanduser(ctx.node.properties['private_key_path'])
def _search_for_key_file(path_to_key_file):
""" Checks if the key_path exists in the local filesystem.
:param key_path: The path to the key pair file.
:return boolean if key_path exists (True) or not.
"""
return True if os.path.exists(path_to_key_file) else False
def get_resource_id():
"""Returns the resource id, if the user doesn't provide one,
this will create one for them.
:param node_properties: The node properties dictionary.
:return resource_id: A string.
"""
if ctx.node.properties['resource_id']:
return ctx.node.properties['resource_id']
elif ctx.node.properties['private_key_path']:
directory_path, filename = \
os.path.split(ctx.node.properties['private_key_path'])
resource_id = filename.split('.')[0]
return resource_id
def get_external_resource_id_or_raise(operation):
"""Checks if the EXTERNAL_RESOURCE_ID runtime_property is set and returns it.
:param operation: A string representing what is happening.
:param ctx_instance: The CTX Node-Instance Context.
:param ctx: The Cloudify ctx context.
:returns The EXTERNAL_RESOURCE_ID runtime_property for a CTX Instance.
:raises NonRecoverableError: If EXTERNAL_RESOURCE_ID has not been set.
"""
ctx.logger.debug(
'Checking if {0} in instance runtime_properties, for {0} operation.'
.format("mist_resource_id", operation))
if "mist_resource_id" not in ctx.instance.runtime_properties:
ctx.logger.error('Cannot {0}, because {1} is not assigned.'.format(
operation, "mist_resource_id"))
return
return ctx.instance.runtime_properties["mist_resource_id"]
def unassign_runtime_property_from_resource(property_name):
"""Pops a runtime_property and reports to debug.
:param property_name: The runtime_property to remove.
:param ctx_instance: The CTX Node-Instance Context.
:param ctx: The Cloudify ctx context.
"""
value = ctx.instance.runtime_properties.pop(property_name)
ctx.logger.debug(
'Unassigned {0} runtime property: {1}'.format(property_name, value))
def is_external_resource(properties):
return is_external_resource_by_properties(properties)
def is_external_resource_by_properties(properties):
return 'use_external_resource' in properties and \
properties['use_external_resource']
def use_external_resource(properties):
if not properties.get('use_external_resource'):
return None
if not "resource_id" in properties or not properties["resource_id"]:
raise NonRecoverableError(
'External resource, but resource not set.')
ctx.logger.debug(
'Resource Id: {0}'.format(properties["resource_id"]))
return True
def set_external_resource_id(value):
"""Sets the EXTERNAL_RESOURCE_ID runtime_property for a Node-Instance.
"""
ctx.instance.runtime_properties["mist_resource_id"] = value
|
[
"os.remove",
"cloudify.exceptions.NonRecoverableError",
"os.chmod",
"cloudify.ctx.logger.error",
"cloudify.ctx.logger.debug",
"os.path.exists",
"cloudify.ctx.logger.info",
"cloudify.ctx.instance.runtime_properties.pop",
"os.path.split",
"os.path.expanduser",
"os.access",
"plugin.connection.MistConnectionClient"
] |
[((1535, 1568), 'plugin.connection.MistConnectionClient', 'connection.MistConnectionClient', ([], {}), '()\n', (1566, 1568), False, 'from plugin import connection\n'), ((2309, 2342), 'plugin.connection.MistConnectionClient', 'connection.MistConnectionClient', ([], {}), '()\n', (2340, 2342), False, 'from plugin import connection\n'), ((4458, 4517), 'cloudify.ctx.logger.info', 'ctx.logger.info', (['"""External resource. Not deleting keypair."""'], {}), "('External resource. Not deleting keypair.')\n", (4473, 4517), False, 'from cloudify import ctx\n'), ((5447, 5506), 'cloudify.ctx.logger.debug', 'ctx.logger.debug', (['"""Attempting to save the key_pair_object."""'], {}), "('Attempting to save the key_pair_object.')\n", (5463, 5506), False, 'from cloudify import ctx\n'), ((5729, 5754), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (5743, 5754), False, 'import os\n'), ((6066, 6094), 'os.access', 'os.access', (['key_file', 'os.W_OK'], {}), '(key_file, os.W_OK)\n', (6075, 6094), False, 'import os\n'), ((6511, 6544), 'plugin.connection.MistConnectionClient', 'connection.MistConnectionClient', ([], {}), '()\n', (6542, 6544), False, 'from plugin import connection\n'), ((7023, 7082), 'os.path.expanduser', 'os.path.expanduser', (["ctx.node.properties['private_key_path']"], {}), "(ctx.node.properties['private_key_path'])\n", (7041, 7082), False, 'import os\n'), ((9079, 9129), 'cloudify.ctx.instance.runtime_properties.pop', 'ctx.instance.runtime_properties.pop', (['property_name'], {}), '(property_name)\n', (9114, 9129), False, 'from cloudify import ctx\n'), ((2846, 2899), 'cloudify.ctx.logger.info', 'ctx.logger.info', (['"""Not deleting key pair from account"""'], {}), "('Not deleting key pair from account')\n", (2861, 2899), False, 'from cloudify import ctx\n'), ((3730, 3816), 'cloudify.exceptions.NonRecoverableError', 'NonRecoverableError', (['"""External resource, but the key pair is not in the account."""'], {}), "(\n 'External resource, but the key pair is not in the account.')\n", (3749, 3816), False, 'from cloudify.exceptions import NonRecoverableError\n'), ((5558, 5630), 'cloudify.exceptions.NonRecoverableError', 'NonRecoverableError', (['"""Cannot save key. KeyPair contains no private key."""'], {}), "('Cannot save key. KeyPair contains no private key.')\n", (5577, 5630), False, 'from cloudify.exceptions import NonRecoverableError\n'), ((6104, 6127), 'os.chmod', 'os.chmod', (['key_file', '(384)'], {}), '(key_file, 384)\n', (6112, 6127), False, 'import os\n'), ((6933, 6995), 'cloudify.ctx.logger.error', 'ctx.logger.error', (['"""No private_key_path supplied. Moving on..."""'], {}), "('No private_key_path supplied. Moving on...')\n", (6949, 6995), False, 'from cloudify import ctx\n'), ((7326, 7358), 'os.path.exists', 'os.path.exists', (['path_to_key_file'], {}), '(path_to_key_file)\n', (7340, 7358), False, 'import os\n'), ((9681, 9744), 'cloudify.exceptions.NonRecoverableError', 'NonRecoverableError', (['"""External resource, but resource not set."""'], {}), "('External resource, but resource not set.')\n", (9700, 9744), False, 'from cloudify.exceptions import NonRecoverableError\n'), ((589, 676), 'cloudify.exceptions.NonRecoverableError', 'NonRecoverableError', (['"""External resource, but the key file does not exist locally."""'], {}), "(\n 'External resource, but the key file does not exist locally.')\n", (608, 676), False, 'from cloudify.exceptions import NonRecoverableError\n'), ((1056, 1134), 'cloudify.exceptions.NonRecoverableError', 'NonRecoverableError', (['"""Not external resource, but the key file exists locally."""'], {}), "('Not external resource, but the key file exists locally.')\n", (1075, 1134), False, 'from cloudify.exceptions import NonRecoverableError\n'), ((1337, 1427), 'cloudify.exceptions.NonRecoverableError', 'NonRecoverableError', (['"""Not external resource, but the key pair exists in the account."""'], {}), "(\n 'Not external resource, but the key pair exists in the account.')\n", (1356, 1427), False, 'from cloudify.exceptions import NonRecoverableError\n'), ((4926, 4945), 'os.remove', 'os.remove', (['key_path'], {}), '(key_path)\n', (4935, 4945), False, 'import os\n'), ((7791, 7845), 'os.path.split', 'os.path.split', (["ctx.node.properties['private_key_path']"], {}), "(ctx.node.properties['private_key_path'])\n", (7804, 7845), False, 'import os\n')]
|
import numpy as np
import sys
from collections import Counter
class CFeval(object):
"""Classification evaluator class"""
def __init__(self, metrics, reshapeDims, classes):
"""
# Arguments
metrics: dictionary of metrics to be evaluated, currently supports only classification accuracy
reshapeDims: list of the reshape dimensions of the image
classes: integer representing the number of classes
"""
super(CFeval, self).__init__()
self.metrics = metrics
self.avgAcc = []
self.runThrough = False
def reset(self):
self.avgAcc = []
def evaluate(self, remoteOut, classValues):
"""Evaluates the predictions produced by the model in the cloud.
# Arguments
remoteOut: numpy ndarray containing the predictions of the model in the cloud
classValues: numpy array containing the ground truth labels
"""
predictions = np.argmax(remoteOut, axis=1)
self.avgAcc.append(np.sum(np.equal(predictions, classValues))/classValues.shape[0])
def simRes(self):
"""Returns the mean of the classification accuracies over all batches of predictions.
"""
self.avgAcc = np.array(self.avgAcc)
return [np.mean(self.avgAcc)]
class ODeval(object):
"""Object detection evaluator class."""
def __init__(self, metrics, reshapeDims, classes):
"""
# Arguments
metrics: dictionary of metrics to be evaluated, currently supports only mean average precision
reshapeDims: list of the reshape dimensions of the image
classes: integer representing the number of classes
"""
super(ODeval, self).__init__()
self.metrics = metrics
self.iou = metrics['map']['iou'] #iterate through for loop for multiple values
self.reshapeDims = reshapeDims
self.n_classes = classes
self.pred_format = {'class_id': 0, 'conf': 1, 'xmin': 2, 'ymin': 3, 'xmax': 4, 'ymax': 5}
self.gt_format = {'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}
#pred format: class id, conf, xmin, ymin, xmax, ymax
#ground truth: class id, xmin, ymin, xmax, ymax
# The following lists all contain per-class data, i.e. all list have the length `n_classes + 1`,
# where one element is for the background class, i.e. that element is just a dummy entry.
self.prediction_results = [list() for _ in range(self.n_classes + 1)]
self.num_gt_per_class = None
self.groundTruth = []
self.imageId = []
self.runThrough = False
def reset(self):
self.prediction_results = [list() for _ in range(self.n_classes + 1)]
def evaluate(self, remoteOut, labels):
"""Evaluates the output of the predictions of the model in the cloud.
# Arguments
remoteOut: numpy ndarray containing the predictions of the model in the cloud
labels: ground truth labels corresponding to each image
"""
groundTruth = labels[1]
imageId = labels[0]
if not self.runThrough:
self.groundTruth+= list(groundTruth)
[self.imageId.append(i) for i in imageId]
self.predictOnBatch( remoteOut, imageId)
def simRes(self):
"""Evaluates the results of the simulation over all the iou values and returns a list
containing iou and corresponding mAp values.
"""
userRes = {}
# print(self.iou)
for i in self.iou:
# print(i)
userRes[i] = self.iterateOverIOU(self.prediction_results, i, self.imageId)
return np.array(list(userRes.items()))
def iterateOverIOU(self, preds, iou, imageId):
"""Calculates the desired metrics over all iou values.
# Arguments
preds: list containing per class prediction results of the model in the cloud
iou: IOU value for which the mAp has to be evaluated
imageId: list containing the image ID's of the images in the test set
# Returns
Mean Average Precision calculated over all classes
"""
return self.calcmAp(self.groundTruth, self.prediction_results, iou, imageId, self.n_classes)
def predictOnBatch(self, remoteOut, imageId):
"""Generates per batch predictions.
# Arguments
remoteOut: numpy ndarray representing the prediction of the model in the cloud
imageId: list containing the image ID's of all images in the batch
"""
class_id_pred = self.pred_format['class_id']
conf_pred = self.pred_format['conf']
xmin_pred = self.pred_format['xmin']
ymin_pred = self.pred_format['ymin']
xmax_pred = self.pred_format['xmax']
ymax_pred = self.pred_format['ymax']
y_pred_filtered = []
for i in range(len(remoteOut)):
y_pred_filtered.append(remoteOut[i][remoteOut[i, :, 0] !=0])
remoteOut = y_pred_filtered
for k, batch_item in enumerate(remoteOut):
image_id = imageId[k]
for box in batch_item:
class_id = int(box[class_id_pred])
confidence = box[conf_pred]
xmin = round(box[xmin_pred], 1)
ymin = round(box[ymin_pred], 1)
xmax = round(box[xmax_pred], 1)
ymax = round(box[ymax_pred], 1)
prediction = (image_id, confidence, xmin, ymin, xmax, ymax)
self.prediction_results[class_id].append(prediction)
def calcmAp(self, labels, predictions, IOUThreshold, imageIds, n_classes):
"""Calculate the mean average precision over all classes for a given IOU thershold.
# Arguments
labels: array containing the ground truth labels
predictions: list containing per class predictions
IOUThreshold: float value that represents the IOU threshold to be considered
imageIds: list containing image ID's of all images in the test set
n_classes: number of classes
# Returns
The mean average precision calculated over all classes
"""
groundTruths = []
detections = predictions
ret = []
num_classes = 0
gtsPerClass = [0]
for i in range(len(imageIds)):
imageBoxes = labels[i]
for j in range(len(imageBoxes)):
boxes = imageBoxes[j]
b = list(boxes)
b.insert(0, imageIds[i])
b.insert(2, 1)
groundTruths.append(b)
for c in range(1, n_classes+1):
dects = detections[c]
#pred format: image_id, confidence, xmin, ymin, xmax, ymax
#gt format: image_id, 'class_id', conf, 'xmin', 'ymin', 'xmax', 'ymax'
gts = []
[gts.append(g) for g in groundTruths if g[1]==c]
npos = len(gts)
gtsPerClass.append(npos)
if npos!=0:
num_classes+=1
dects = sorted(dects, key=lambda conf: conf[1], reverse=True)
TP = np.zeros(len(dects))
FP = np.zeros(len(dects))
det = Counter([cc[0] for cc in gts])
for key, val in det.items():
det[key] = np.zeros(val)
for d in range(len(dects)):
gt = [gt for gt in gts if gt[0]==dects[d][0]]
iouMax = sys.float_info.min
for j in range(len(gt)):
iou = evalIOU(dects[d][2:], gt[j][3:])
if iou>iouMax:
iouMax = iou
jmax = j
if iouMax>=IOUThreshold:
if det[dects[d][0]][jmax] == 0:
TP[d] = 1
det[dects[d][0]][jmax] = 1
else:
FP[d] = 1
acc_FP = np.cumsum(FP)
acc_TP = np.cumsum(TP)
rec = acc_TP/npos
prec = np.divide(acc_TP,(acc_FP+acc_TP))
[ap, mpre, mrec, ii] = CalculateAveragePrecision(rec, prec)
# print(ap)
ret.append(ap)
# tot = len(ret)
print(gtsPerClass)
print(ret)
return np.nansum(ret)/num_classes
def evalIOU(boxes1, boxes2):
"""Computes the intersection over union for the given pair of boxes.
# Arguments
boxes1: list containing the corner locations of the bounding boxes in the format
<xmin, ymin, xmax, ymax>
boxes2: list containing the corner locations of the bounding boxes in the format
<xmin, ymin, xmax, ymax>
# Returns
The intersection over union of the regions under the boxes
"""
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
xmin = 0
ymin = 1
xmax = 2
ymax = 3
intersection_areas = intersection_area_(boxes1, boxes2)
boxes1_areas = (boxes1[:, xmax] - boxes1[:, xmin] + 1) * (boxes1[:, ymax] - boxes1[:, ymin] + 1)
boxes2_areas = (boxes2[:, xmax] - boxes2[:, xmin] + 1) * (boxes2[:, ymax] - boxes2[:, ymin] + 1)
union_areas = boxes1_areas + boxes2_areas - intersection_areas
return intersection_areas / union_areas
def intersection_area_(boxes1, boxes2):
"""Computes the intersection areas of the two boxes.
# Arguments
boxes1: array containing the corner locations of the bounding boxes in the format
<xmin, ymin, xmax, ymax>
boxes2: array containing the corner locations of the bounding boxes in the format
<xmin, ymin, xmax, ymax>
# Returns
The area common to both the boxes
"""
xmin = 0
ymin = 1
xmax = 2
ymax = 3
min_xy = np.maximum(boxes1[:,[xmin,ymin]], boxes2[:,[xmin,ymin]])
max_xy = np.minimum(boxes1[:,[xmax,ymax]], boxes2[:,[xmax,ymax]])
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + 1)
return side_lengths[:,0] * side_lengths[:,1]
def CalculateAveragePrecision(rec, prec):
"""Compute the average precision for a particular class
# Arguments
rec: cumulative recall of the class under consideration
prec: cumulative precision of the class under consideration
# Returns
Average precision per class
"""
mrec = []
mrec.append(0)
[mrec.append(e) for e in rec]
mrec.append(1)
mpre = []
mpre.append(0)
[mpre.append(e) for e in prec]
mpre.append(0)
for i in range(len(mpre)-1, 0, -1):
mpre[i-1]=max(mpre[i-1],mpre[i])
ii = []
for i in range(len(mrec)-1):
if mrec[1:][i]!=mrec[0:-1][i]:
ii.append(i+1)
ap = 0
for i in ii:
ap = ap + np.sum((mrec[i]-mrec[i-1])*mpre[i])
# return [ap, mpre[1:len(mpre)-1], mrec[1:len(mpre)-1], ii]
return [ap, mpre[0:len(mpre)-1], mrec[0:len(mpre)-1], ii]
|
[
"numpy.divide",
"numpy.nansum",
"numpy.minimum",
"numpy.maximum",
"numpy.sum",
"numpy.argmax",
"numpy.zeros",
"numpy.expand_dims",
"numpy.equal",
"numpy.cumsum",
"numpy.mean",
"numpy.array",
"collections.Counter"
] |
[((7449, 7465), 'numpy.array', 'np.array', (['boxes1'], {}), '(boxes1)\n', (7457, 7465), True, 'import numpy as np\n'), ((7476, 7492), 'numpy.array', 'np.array', (['boxes2'], {}), '(boxes2)\n', (7484, 7492), True, 'import numpy as np\n'), ((8460, 8520), 'numpy.maximum', 'np.maximum', (['boxes1[:, [xmin, ymin]]', 'boxes2[:, [xmin, ymin]]'], {}), '(boxes1[:, [xmin, ymin]], boxes2[:, [xmin, ymin]])\n', (8470, 8520), True, 'import numpy as np\n'), ((8527, 8587), 'numpy.minimum', 'np.minimum', (['boxes1[:, [xmax, ymax]]', 'boxes2[:, [xmax, ymax]]'], {}), '(boxes1[:, [xmax, ymax]], boxes2[:, [xmax, ymax]])\n', (8537, 8587), True, 'import numpy as np\n'), ((8661, 8695), 'numpy.maximum', 'np.maximum', (['(0)', '(max_xy - min_xy + 1)'], {}), '(0, max_xy - min_xy + 1)\n', (8671, 8695), True, 'import numpy as np\n'), ((852, 880), 'numpy.argmax', 'np.argmax', (['remoteOut'], {'axis': '(1)'}), '(remoteOut, axis=1)\n', (861, 880), True, 'import numpy as np\n'), ((1097, 1118), 'numpy.array', 'np.array', (['self.avgAcc'], {}), '(self.avgAcc)\n', (1105, 1118), True, 'import numpy as np\n'), ((7525, 7555), 'numpy.expand_dims', 'np.expand_dims', (['boxes1'], {'axis': '(0)'}), '(boxes1, axis=0)\n', (7539, 7555), True, 'import numpy as np\n'), ((7587, 7617), 'numpy.expand_dims', 'np.expand_dims', (['boxes2'], {'axis': '(0)'}), '(boxes2, axis=0)\n', (7601, 7617), True, 'import numpy as np\n'), ((1129, 1149), 'numpy.mean', 'np.mean', (['self.avgAcc'], {}), '(self.avgAcc)\n', (1136, 1149), True, 'import numpy as np\n'), ((6246, 6276), 'collections.Counter', 'Counter', (['[cc[0] for cc in gts]'], {}), '([cc[0] for cc in gts])\n', (6253, 6276), False, 'from collections import Counter\n'), ((6732, 6745), 'numpy.cumsum', 'np.cumsum', (['FP'], {}), '(FP)\n', (6741, 6745), True, 'import numpy as np\n'), ((6758, 6771), 'numpy.cumsum', 'np.cumsum', (['TP'], {}), '(TP)\n', (6767, 6771), True, 'import numpy as np\n'), ((6804, 6838), 'numpy.divide', 'np.divide', (['acc_TP', '(acc_FP + acc_TP)'], {}), '(acc_TP, acc_FP + acc_TP)\n', (6813, 6838), True, 'import numpy as np\n'), ((6996, 7010), 'numpy.nansum', 'np.nansum', (['ret'], {}), '(ret)\n', (7005, 7010), True, 'import numpy as np\n'), ((9369, 9410), 'numpy.sum', 'np.sum', (['((mrec[i] - mrec[i - 1]) * mpre[i])'], {}), '((mrec[i] - mrec[i - 1]) * mpre[i])\n', (9375, 9410), True, 'import numpy as np\n'), ((6325, 6338), 'numpy.zeros', 'np.zeros', (['val'], {}), '(val)\n', (6333, 6338), True, 'import numpy as np\n'), ((909, 943), 'numpy.equal', 'np.equal', (['predictions', 'classValues'], {}), '(predictions, classValues)\n', (917, 943), True, 'import numpy as np\n')]
|
import contextlib
import os
from subprocess import check_call, CalledProcessError
import sys
from pynt import task
__license__ = "MIT License"
__contact__ = "http://rags.github.com/pynt-contrib/"
@contextlib.contextmanager
def safe_cd(path):
"""
Changes to a directory, yields, and changes back.
Additionally any error will also change the directory back.
Usage:
>>> with safe_cd('some/repo'):
... call('git status')
"""
starting_directory = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(starting_directory)
@task()
def execute(script, *args, **kwargs):
"""
Executes a command through the shell. Spaces should breakup the args. Usage: execute('grep', 'TODO', '*')
NOTE: Any kwargs will be converted to args in the destination command.
E.g. execute('grep', 'TODO', '*', **{'--before-context': 5}) will be $grep todo * --before-context=5
"""
popen_args = [script] + list(args)
if kwargs:
popen_args.extend(_kwargs_to_execute_args(kwargs))
try:
return check_call(popen_args, shell=False)
except CalledProcessError as ex:
_print(ex)
sys.exit(ex.returncode)
except Exception as ex:
_print('Error: {} with script: {} and args {}'.format(ex, script, args))
sys.exit(1)
def _kwargs_to_execute_args(kwargs):
args = ['='.join([str(key), str(value)]) for key, value in kwargs.items()]
return args
def _print(*args):
print(args)
|
[
"subprocess.check_call",
"pynt.task",
"os.getcwd",
"os.chdir",
"sys.exit"
] |
[((592, 598), 'pynt.task', 'task', ([], {}), '()\n', (596, 598), False, 'from pynt import task\n'), ((481, 492), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (490, 492), False, 'import os\n'), ((510, 524), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (518, 524), False, 'import os\n'), ((560, 588), 'os.chdir', 'os.chdir', (['starting_directory'], {}), '(starting_directory)\n', (568, 588), False, 'import os\n'), ((1082, 1117), 'subprocess.check_call', 'check_call', (['popen_args'], {'shell': '(False)'}), '(popen_args, shell=False)\n', (1092, 1117), False, 'from subprocess import check_call, CalledProcessError\n'), ((1182, 1205), 'sys.exit', 'sys.exit', (['ex.returncode'], {}), '(ex.returncode)\n', (1190, 1205), False, 'import sys\n'), ((1323, 1334), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1331, 1334), False, 'import sys\n')]
|
"""
Copyright (C) 2022 <NAME>
Released under MIT License. See the file LICENSE for details.
Module for some classes that describe sequences of images.
If your custom dataset stores images in some other way,
create a subclass of ImageSequence and use it.
"""
from typing import List
import numpy as np
from pathlib import Path
import imageio as iio
class ImageSequence:
def load(self, im_num:int) -> np.ndarray:
pass
def number_of_frames(self) -> int:
pass
def start_frame(self) -> int:
pass
class FolderSequence(ImageSequence):
def __init__(self, folder:Path):
self.images = folder.glob('*.jpg')
self.images = list(self.images)
self.images.sort()
def load(self, im_num:int) -> np.ndarray:
return iio.imread(self.images[im_num])
def number_of_frames(self) -> int:
return len(self.images)
def start_frame(self) -> int:
return 0
class VideoSequence(ImageSequence):
def __init__(self, vid_file:Path):
assert vid_file.is_file()
self.vid = iio.get_reader(vid_file)
self.frame_count = None
def __del__(self):
# Attempt to clean up
self.vid.close()
def load(self, im_num:int) -> np.ndarray:
return self.vid.get_data(im_num)
def number_of_frames(self) -> int:
if self.frame_count is None:
self.frame_count = self.vid.count_frames()
return self.frame_count
def start_frame(self) -> int:
return 0
|
[
"imageio.imread",
"imageio.get_reader"
] |
[((806, 837), 'imageio.imread', 'iio.imread', (['self.images[im_num]'], {}), '(self.images[im_num])\n', (816, 837), True, 'import imageio as iio\n'), ((1096, 1120), 'imageio.get_reader', 'iio.get_reader', (['vid_file'], {}), '(vid_file)\n', (1110, 1120), True, 'import imageio as iio\n')]
|
from time import time
import hashlib
import json
from urllib.parse import urlparse
import requests
# Class definition of our shellchain (Blockchain-like) object
class Shellchain:
def __init__(self): # constructor
self.current_transactions = []
self.chain = []
self.rivers = set()
# the first seashell has to be created
self.dig_shell(previous_hash='1', proof=100) # first seashell in chain
# mine a new block
def dig_shell(self, proof, previous_hash):
shell = {
# point in chain that shell occurs at
'index': len(self.chain) + 1,
'timestamp': time(), # current timestamp using date/time library
'transactions': self.current_transactions,
'proof': proof,
'previous_hash': previous_hash or self.hash(self.chain[-1])
}
# reset current list of transactions
self.current_transactions = []
# add new shell to shellchain
self.chain.append(shell)
return shell
# sale of fish based on certain garbage amount removed
def fish_sale(self, sender, amount, garbageAmount):
self.current_transactions.append({
'type': 'fish_sale',
'sender': sender,
'amount': amount,
'garbageAmount': garbageAmount
})
# return index of new transaction
return self.last_shell['index'] + 1
# trades a number of fish between rivers
def fish_trade(self, sender, recipient, amount):
self.current_transactions.append({
'type': 'fish_trade',
'sender': sender,
'recipient': recipient,
'amount': amount
})
# return index of new transaction
return self.last_shell['index'] + 1
# a crab is caught thieving
def crab_catch(self, sender, mafia, amount, garbageAmount):
self.current_transactions.append({
'type': 'crab_catch',
'sender': sender,
'mafia': mafia,
'amount': amount,
'garbageAmount': garbageAmount
})
# return index of new transaction
return self.last_shell['index'] + 1
@property
def last_shell(self):
return self.chain[-1]
@staticmethod
def hash(shell):
shell_string = json.dumps(shell, sort_keys=True).encode()
return hashlib.sha256(shell_string).hexdigest()
def proof_of_work(self, last_shell):
# find a number p' such that hash(pp') contains 4 leading zeroes
# p is previous proof, p' is new proof
last_proof = last_shell['proof']
last_hash = self.hash(last_shell) # hashes last shell's proof of work
proof = 0
# checks every proof value until true
while self.valid_proof(last_proof, proof, last_hash) is False:
proof += 1
return proof
@staticmethod
def valid_proof(last_proof, proof, last_hash):
guess = f'{last_proof}{proof}{last_hash}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:4] == "0000"
|
[
"hashlib.sha256",
"json.dumps",
"time.time"
] |
[((645, 651), 'time.time', 'time', ([], {}), '()\n', (649, 651), False, 'from time import time\n'), ((2341, 2374), 'json.dumps', 'json.dumps', (['shell'], {'sort_keys': '(True)'}), '(shell, sort_keys=True)\n', (2351, 2374), False, 'import json\n'), ((2399, 2427), 'hashlib.sha256', 'hashlib.sha256', (['shell_string'], {}), '(shell_string)\n', (2413, 2427), False, 'import hashlib\n'), ((3054, 3075), 'hashlib.sha256', 'hashlib.sha256', (['guess'], {}), '(guess)\n', (3068, 3075), False, 'import hashlib\n')]
|
"""\
Acora - a multi-keyword search engine based on Aho-Corasick trees.
Usage::
>>> from acora import AcoraBuilder
Collect some keywords::
>>> builder = AcoraBuilder('ab', 'bc', 'de')
>>> builder.add('a', 'b')
Generate the Acora search engine::
>>> ac = builder.build()
Search a string for all occurrences::
>>> ac.findall('abc')
[('a', 0), ('ab', 0), ('b', 1), ('bc', 1)]
>>> ac.findall('abde')
[('a', 0), ('ab', 0), ('b', 1), ('de', 2)]
"""
from __future__ import absolute_import
import sys
IS_PY3 = sys.version_info[0] >= 3
if IS_PY3:
unicode = str
FILE_BUFFER_SIZE = 32 * 1024
class PyAcora(object):
"""A simple (and very slow) Python implementation of the Acora
search engine.
"""
transitions = None
def __init__(self, machine, transitions=None):
if transitions is not None:
# old style format
start_state = machine
self.transitions = dict([
((state.id, char), (target_state.id, target_state.matches))
for ((state, char), target_state) in transitions.items()])
else:
# new style Machine format
start_state = machine.start_state
ignore_case = machine.ignore_case
self.transitions = transitions = {}
child_states = machine.child_states
child_targets = {}
state_matches = {}
needs_bytes_conversion = None
for state in child_states:
state_id = state.id
child_targets[state_id], state_matches[state_id] = (
_merge_targets(state, ignore_case))
if needs_bytes_conversion is None and state_matches[state_id]:
if IS_PY3:
needs_bytes_conversion = any(
isinstance(s, bytes) for s in state_matches[state_id])
elif any(isinstance(s, unicode) for s in state_matches[state_id]):
# in Py2, some keywords might be str even though we're processing unicode
needs_bytes_conversion = False
if needs_bytes_conversion is None and not IS_PY3:
needs_bytes_conversion = True
if needs_bytes_conversion:
if IS_PY3:
convert = ord
else:
from codecs import latin_1_encode
def convert(s):
return latin_1_encode(s)[0]
else:
convert = None
get_child_targets = child_targets.get
get_matches = state_matches.get
state_id = start_state.id
for ch, child in _merge_targets(start_state, ignore_case)[0].items():
child_id = child.id
if convert is not None:
ch = convert(ch)
transitions[(state_id, ch)] = (child_id, get_matches(child_id))
for state in child_states:
state_id = state.id
for ch, child in get_child_targets(state_id).items():
child_id = child.id
if convert is not None:
ch = convert(ch)
transitions[(state_id, ch)] = (child_id, get_matches(child_id))
self.start_state = start_state.id
def finditer(self, s):
"""Iterate over all occurrences of any keyword in the string.
Returns (keyword, offset) pairs.
"""
state = self.start_state
start_state = (state, [])
next_state = self.transitions.get
pos = 0
for char in s:
pos += 1
state, matches = next_state((state, char), start_state)
if matches:
for match in matches:
yield (match, pos-len(match))
def findall(self, s):
"""Find all occurrences of any keyword in the string.
Returns a list of (keyword, offset) pairs.
"""
return list(self.finditer(s))
def filefind(self, f):
"""Iterate over all occurrences of any keyword in a file.
Returns (keyword, offset) pairs.
"""
opened = False
if not hasattr(f, 'read'):
f = open(f, 'rb')
opened = True
try:
state = self.start_state
start_state = (state, ())
next_state = self.transitions.get
pos = 0
while 1:
data = f.read(FILE_BUFFER_SIZE)
if not data:
break
for char in data:
pos += 1
state, matches = next_state((state, char), start_state)
if matches:
for match in matches:
yield (match, pos-len(match))
finally:
if opened:
f.close()
def filefindall(self, f):
"""Find all occurrences of any keyword in a file.
Returns a list of (keyword, offset) pairs.
"""
return list(self.filefind(f))
# import from shared Python/Cython module
from acora._acora import (
insert_bytes_keyword, insert_unicode_keyword,
build_trie as _build_trie, build_MachineState as _MachineState, merge_targets as _merge_targets)
# import from Cython module if available
try:
from acora._cacora import (
UnicodeAcora, BytesAcora, insert_bytes_keyword, insert_unicode_keyword)
except ImportError:
# C module not there ...
UnicodeAcora = BytesAcora = PyAcora
class AcoraBuilder(object):
"""The main builder class for an Acora search engine.
Add keywords by calling ``.add(*keywords)`` or by passing them
into the constructor. Then build the search engine by calling
``.build()``.
Builds a case insensitive search engine when passing
``ignore_case=True``, and a case sensitive engine otherwise.
"""
ignore_case = False
def __init__(self, *keywords, **kwargs):
if kwargs:
self.ignore_case = kwargs.pop('ignore_case', False)
if kwargs:
raise TypeError(
"%s() got unexpected keyword argument %s" % (
self.__class__.__name__, next(iter(kwargs))))
if len(keywords) == 1 and isinstance(keywords[0], (list, tuple)):
keywords = keywords[0]
self.for_unicode = None
self.state_counter = 1
self.keywords = set()
self.tree = _MachineState(0)
if keywords:
self.update(keywords)
def __update(self, keywords):
"""Add more keywords to the search engine builder.
Adding keywords does not impact previously built search
engines.
"""
if not keywords:
return
self.tree = None
self.keywords.update(keywords)
if self.for_unicode is None:
for keyword in keywords:
if isinstance(keyword, unicode):
self.for_unicode = True
elif isinstance(keyword, bytes):
self.for_unicode = False
else:
raise TypeError(
"keywords must be either bytes or unicode, not mixed (got %s)" %
type(keyword))
break
# validate input string types
marker = object()
if self.for_unicode:
for keyword in keywords:
if not isinstance(keyword, unicode):
break
else:
keyword = marker
else:
for keyword in keywords:
if not isinstance(keyword, bytes):
break
else:
keyword = marker
if keyword is not marker:
raise TypeError(
"keywords must be either bytes or unicode, not mixed (got %s)" %
type(keyword))
def add(self, *keywords):
"""Add more keywords to the search engine builder.
Adding keywords does not impact previously built search
engines.
"""
if keywords:
self.update(keywords)
def build(self, ignore_case=None, acora=None):
"""Build a search engine from the aggregated keywords.
Builds a case insensitive search engine when passing
``ignore_case=True``, and a case sensitive engine otherwise.
"""
if acora is None:
if self.for_unicode:
acora = UnicodeAcora
else:
acora = BytesAcora
if self.for_unicode == False and ignore_case:
import sys
if sys.version_info[0] >= 3:
raise ValueError(
"Case insensitive search is not supported for byte strings in Python 3")
if ignore_case is not None and ignore_case != self.ignore_case:
# must rebuild tree
builder = type(self)(ignore_case=ignore_case)
builder.update(self.keywords)
return builder.build(acora=acora)
return acora(_build_trie(self.tree, ignore_case=self.ignore_case))
def update(self, keywords):
for_unicode = self.for_unicode
ignore_case = self.ignore_case
insert_keyword = insert_unicode_keyword if for_unicode else insert_bytes_keyword
for keyword in keywords:
if for_unicode is None:
for_unicode = self.for_unicode = isinstance(keyword, unicode)
insert_keyword = (
insert_unicode_keyword if for_unicode else insert_bytes_keyword)
elif for_unicode != isinstance(keyword, unicode):
raise TypeError(
"keywords must be either bytes or unicode, not mixed (got %s)" %
type(keyword))
self.state_counter = insert_keyword(
self.tree, keyword, self.state_counter, ignore_case)
self.keywords.update(keywords)
### convenience functions
def search(s, *keywords):
"""Convenience function to search a string for keywords.
"""
acora = AcoraBuilder(keywords).build()
return acora.findall(s)
def search_ignore_case(s, *keywords):
"""Convenience function to search a string for keywords. Case
insensitive version.
"""
acora = AcoraBuilder(keywords, ignore_case=True).build()
return acora.findall(s)
|
[
"acora._acora.merge_targets",
"acora._acora.build_trie",
"codecs.latin_1_encode",
"acora._acora.build_MachineState"
] |
[((6550, 6566), 'acora._acora.build_MachineState', '_MachineState', (['(0)'], {}), '(0)\n', (6563, 6566), True, 'from acora._acora import insert_bytes_keyword, insert_unicode_keyword, build_trie as _build_trie, build_MachineState as _MachineState, merge_targets as _merge_targets\n'), ((9167, 9219), 'acora._acora.build_trie', '_build_trie', (['self.tree'], {'ignore_case': 'self.ignore_case'}), '(self.tree, ignore_case=self.ignore_case)\n', (9178, 9219), True, 'from acora._acora import insert_bytes_keyword, insert_unicode_keyword, build_trie as _build_trie, build_MachineState as _MachineState, merge_targets as _merge_targets\n'), ((1623, 1657), 'acora._acora.merge_targets', '_merge_targets', (['state', 'ignore_case'], {}), '(state, ignore_case)\n', (1637, 1657), True, 'from acora._acora import insert_bytes_keyword, insert_unicode_keyword, build_trie as _build_trie, build_MachineState as _MachineState, merge_targets as _merge_targets\n'), ((2732, 2772), 'acora._acora.merge_targets', '_merge_targets', (['start_state', 'ignore_case'], {}), '(start_state, ignore_case)\n', (2746, 2772), True, 'from acora._acora import insert_bytes_keyword, insert_unicode_keyword, build_trie as _build_trie, build_MachineState as _MachineState, merge_targets as _merge_targets\n'), ((2499, 2516), 'codecs.latin_1_encode', 'latin_1_encode', (['s'], {}), '(s)\n', (2513, 2516), False, 'from codecs import latin_1_encode\n')]
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'CountryCode'
db.delete_table('iss_countrycode')
def backwards(self, orm):
# Adding model 'CountryCode'
db.create_table('iss_countrycode', (
('country_name', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True)),
('iso_country_code', self.gf('django.db.models.fields.CharField')(max_length=2, unique=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('iss', ['CountryCode'])
models = {
'iss.membership': {
'Meta': {'object_name': 'Membership'},
'current_dues_amount': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'join_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'last_modified_date': ('django.db.models.fields.DateField', [], {}),
'membership_directory_opt_out': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['iss.Organization']", 'null': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['iss.MembershipProduct']"}),
'receives_membership_benefits': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'renewal_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'termination_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'iss.membershipproduct': {
'Meta': {'object_name': 'MembershipProduct'},
'id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'iss.organization': {
'Meta': {'object_name': 'Organization'},
'account_num': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'business_member_level': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'carnegie_class': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'class_profile': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country_iso': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'enrollment_fte': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'exclude_from_website': ('django.db.models.fields.IntegerField', [], {}),
'is_defunct': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_member': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_signatory': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'member_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'membersuite_account_num': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'membersuite_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'org_name': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'org_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['iss.OrganizationType']", 'null': 'True'}),
'picklist_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pilot_participant': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'primary_email': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'salesforce_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sector': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'setting': ('django.db.models.fields.CharField', [], {'max_length': '33', 'null': 'True', 'blank': 'True'}),
'stars_participant_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'street1': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'street2': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sustainability_website': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'iss.organizationtype': {
'Meta': {'object_name': 'OrganizationType'},
'id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['iss']
|
[
"south.db.db.delete_table",
"south.db.db.send_create_signal"
] |
[((278, 312), 'south.db.db.delete_table', 'db.delete_table', (['"""iss_countrycode"""'], {}), "('iss_countrycode')\n", (293, 312), False, 'from south.db import db\n'), ((742, 787), 'south.db.db.send_create_signal', 'db.send_create_signal', (['"""iss"""', "['CountryCode']"], {}), "('iss', ['CountryCode'])\n", (763, 787), False, 'from south.db import db\n')]
|
import sys, os
path = os.path.dirname(__file__)
path = os.path.join(path, '..', 'protein_inference')
if path not in sys.path:
sys.path.append(path)
|
[
"sys.path.append",
"os.path.dirname",
"os.path.join"
] |
[((23, 48), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (38, 48), False, 'import sys, os\n'), ((56, 101), 'os.path.join', 'os.path.join', (['path', '""".."""', '"""protein_inference"""'], {}), "(path, '..', 'protein_inference')\n", (68, 101), False, 'import sys, os\n'), ((131, 152), 'sys.path.append', 'sys.path.append', (['path'], {}), '(path)\n', (146, 152), False, 'import sys, os\n')]
|
from re import compile, MULTILINE
import telethon as tg
from pyrobud.util.bluscream import UserStr, telegram_uid_regex
from .. import command, module
class DebugModuleAddon(module.Module):
name = "Debug Extensions"
@command.desc("Dump all the data of a message to your cloud")
@command.alias("mdp")
async def cmd_mdumpprivate(self, msg: tg.custom.Message):
if not msg.is_reply: return
reply_msg = await msg.get_reply_message()
await msg.delete()
data = f"```{reply_msg.stringify()}```"
await self.bot.client.send_message("me", data)
@command.desc("Convert all tg uids to profile links")
@command.alias("idlink", "linkids", "linkid")
async def cmd_idlinks(self, msg: tg.custom.Message):
if not msg.is_reply: return
reply_msg = await msg.get_reply_message()
matches = telegram_uid_regex.finditer(reply_msg.text, MULTILINE)
uids = list()
for matchNum, match in enumerate(matches, start=1):
if not match.group() in uids:
uids.append(match.group())
if len(uids) < 1: return "No UIDs found in the given message."
ret = f"Found **{len(uids)}** UIDs:\n"
for uid in uids:
try:
user = await self.bot.client.get_entity(int(uid))
ret += f"\n - {UserStr(user, True)}"
except: ret += f"\n - [{uid}](tg://user?id={uid})"""
return ret
|
[
"pyrobud.util.bluscream.UserStr",
"pyrobud.util.bluscream.telegram_uid_regex.finditer"
] |
[((889, 943), 'pyrobud.util.bluscream.telegram_uid_regex.finditer', 'telegram_uid_regex.finditer', (['reply_msg.text', 'MULTILINE'], {}), '(reply_msg.text, MULTILINE)\n', (916, 943), False, 'from pyrobud.util.bluscream import UserStr, telegram_uid_regex\n'), ((1378, 1397), 'pyrobud.util.bluscream.UserStr', 'UserStr', (['user', '(True)'], {}), '(user, True)\n', (1385, 1397), False, 'from pyrobud.util.bluscream import UserStr, telegram_uid_regex\n')]
|
# coding: utf-8
# In[ ]:
import cv2
from keras.models import load_model
import numpy as np
from collections import deque
from keras.preprocessing import image
import keras
import os
# In[ ]:
model1 = load_model('mob_logo_model.h5')
val = ['Adidas','Apple','BMW','Citroen','Fedex','HP','Mcdonalds','Nike','none','Pepsi','Puma']
pred_class = 8
# In[ ]:
def nothing(x):
pass
cap = cv2.VideoCapture(0)
cv2.namedWindow("Trackbars")
cv2.createTrackbar("L - H", "Trackbars", 0, 179, nothing)
cv2.createTrackbar("L - S", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("L - V", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("U - H", "Trackbars", 179, 179, nothing)
cv2.createTrackbar("U - S", "Trackbars", 255, 255, nothing)
cv2.createTrackbar("U - V", "Trackbars", 255, 255, nothing)
# In[ ]:
def main():
logos = get_logos()
cap = cv2.VideoCapture(0)
Lower_green = np.array([10,130,130])
Upper_green = np.array([40,255,255])
pts = deque(maxlen=512)
blackboard = np.zeros((480, 640, 3), dtype=np.uint8)
value = np.zeros((224,224,3), dtype = np.uint8)
#print(blackboard)
digit = np.zeros((200, 200, 3), dtype=np.uint8)
pred_class = 8
while (cap.isOpened()):
ret, img = cap.read()
img = cv2.flip(img, 1)
cv2.rectangle(img,(400,250),(624,474),(255,0,255),5)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
l_h = cv2.getTrackbarPos("L - H", "Trackbars")
l_s = cv2.getTrackbarPos("L - S", "Trackbars")
l_v = cv2.getTrackbarPos("L - V", "Trackbars")
u_h = cv2.getTrackbarPos("U - H", "Trackbars")
u_s = cv2.getTrackbarPos("U - S", "Trackbars")
u_v = cv2.getTrackbarPos("U - V", "Trackbars")
Lower_green= np.array([l_h, l_s, l_v]) # use the trackbars to customize the colour to track to make the doodles
Upper_green = np.array([u_v, u_s, u_v]) #0,131,157 179,255,255 (orange color settings)
kernel = np.ones((5, 5), np.uint8)
mask = cv2.inRange(hsv, Lower_green, Upper_green)
mask = cv2.erode(mask, kernel, iterations=2)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
mask=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel)
mask = cv2.dilate(mask, kernel, iterations=1)
res = cv2.bitwise_and(img, img, mask=mask)
cnts, heir = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
center = None
if len(cnts) >= 1:
cnt = max(cnts, key=cv2.contourArea)
#print(cnt)
if cv2.contourArea(cnt) > 200:
((x, y), radius) = cv2.minEnclosingCircle(cnt)
cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 255), 2)
cv2.circle(img, center, 5, (0, 0, 255), -1)
M = cv2.moments(cnt)
center = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))
pts.appendleft(center)
for i in range(1, len(pts)):
if pts[i - 1] is None or pts[i] is None:
continue
cv2.line(blackboard, pts[i - 1], pts[i], (255, 255, 255), 7)
cv2.line(img, pts[i - 1], pts[i], (0, 0, 255), 2)
elif len(cnts) == 0:
if len(pts) != []:
blackboard_gray = cv2.cvtColor(blackboard, cv2.COLOR_BGR2GRAY)
blur1 = cv2.medianBlur(blackboard_gray, 15)
blur1 = cv2.GaussianBlur(blur1, (5, 5), 0)
thresh1 = cv2.threshold(blur1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
blackboard_cnts = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1]
if len(blackboard_cnts) >= 1:
cnt = max(blackboard_cnts, key=cv2.contourArea)
#print(cv2.contourArea(cnt))
if cv2.contourArea(cnt) > 2000:
value = blackboard[250:474, 400:624]
pred_probab, pred_class = keras_predict(model1, value)
print(val[pred_class], pred_probab)
pts = deque(maxlen=512)
blackboard = np.zeros((480, 640, 3), dtype=np.uint8)
img = overlay(img, logos[pred_class])
cv2.imshow("Frame", img)
cv2.imshow("Res", res)
cv2.imshow("mask", mask)
k = cv2.waitKey(10)
if k == 27:
break
# In[ ]:
def keras_predict(model, image):
processed = keras_process_image(image)
print("processed: " + str(processed.shape))
pred_probab = model1.predict(processed)[0]
pred_class = list(pred_probab).index(max(pred_probab))
return max(pred_probab), pred_class
# In[ ]:
def keras_process_image(img):
img_array = image.img_to_array(img)
img_array_expanded_dims = np.expand_dims(img_array, axis = 0)
return keras.applications.mobilenet.preprocess_input(img_array_expanded_dims)
# In[ ]:
def get_logos():
logos_folder = "../logo/"
logos = []
for logo in range(len(os.listdir(logos_folder))):
logos.append(cv2.imread(logos_folder + str(logo) + '.png', cv2.IMREAD_UNCHANGED))
print(logos)
return logos
# In[ ]:
def overlay(image, logo):
x,y,z = logo.shape
#try:
image[0:x, 0:y] = blend_transparent(image[0:x, 0:y ], logo)
#except:
#pass
return image
# In[ ]:
def blend_transparent(face_img, overlay_t_img):
# Split out the transparency mask from the colour info
overlay_img = overlay_t_img[:, :, :3] # Grab the BRG planes
overlay_mask = overlay_t_img[:, :, 3:] # And the alpha plane
# Again calculate the inverse mask
background_mask = 255 - overlay_mask
# Turn the masks into three channel, so we can use them as weights
overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)
# Create a masked out face image, and masked out overlay
# We convert the images to floating point in range 0.0 - 1.0
face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0))
overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))
# And finally just add them together, and rescale it back to an 8bit integer image
return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0))
# In[ ]:
keras_predict(model1, np.zeros((224, 224, 3), dtype=np.uint8))
main()
|
[
"keras.models.load_model",
"cv2.GaussianBlur",
"cv2.bitwise_and",
"cv2.medianBlur",
"numpy.ones",
"keras.preprocessing.image.img_to_array",
"cv2.rectangle",
"cv2.erode",
"cv2.imshow",
"cv2.inRange",
"collections.deque",
"cv2.line",
"cv2.contourArea",
"cv2.dilate",
"cv2.cvtColor",
"cv2.getTrackbarPos",
"cv2.createTrackbar",
"cv2.circle",
"cv2.minEnclosingCircle",
"cv2.waitKey",
"cv2.morphologyEx",
"cv2.addWeighted",
"cv2.flip",
"keras.applications.mobilenet.preprocess_input",
"os.listdir",
"cv2.threshold",
"cv2.moments",
"numpy.zeros",
"numpy.expand_dims",
"cv2.VideoCapture",
"numpy.array",
"cv2.namedWindow"
] |
[((210, 241), 'keras.models.load_model', 'load_model', (['"""mob_logo_model.h5"""'], {}), "('mob_logo_model.h5')\n", (220, 241), False, 'from keras.models import load_model\n'), ((399, 418), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (415, 418), False, 'import cv2\n'), ((419, 447), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Trackbars"""'], {}), "('Trackbars')\n", (434, 447), False, 'import cv2\n'), ((450, 507), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""L - H"""', '"""Trackbars"""', '(0)', '(179)', 'nothing'], {}), "('L - H', 'Trackbars', 0, 179, nothing)\n", (468, 507), False, 'import cv2\n'), ((508, 565), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""L - S"""', '"""Trackbars"""', '(0)', '(255)', 'nothing'], {}), "('L - S', 'Trackbars', 0, 255, nothing)\n", (526, 565), False, 'import cv2\n'), ((566, 623), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""L - V"""', '"""Trackbars"""', '(0)', '(255)', 'nothing'], {}), "('L - V', 'Trackbars', 0, 255, nothing)\n", (584, 623), False, 'import cv2\n'), ((624, 683), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""U - H"""', '"""Trackbars"""', '(179)', '(179)', 'nothing'], {}), "('U - H', 'Trackbars', 179, 179, nothing)\n", (642, 683), False, 'import cv2\n'), ((684, 743), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""U - S"""', '"""Trackbars"""', '(255)', '(255)', 'nothing'], {}), "('U - S', 'Trackbars', 255, 255, nothing)\n", (702, 743), False, 'import cv2\n'), ((744, 803), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""U - V"""', '"""Trackbars"""', '(255)', '(255)', 'nothing'], {}), "('U - V', 'Trackbars', 255, 255, nothing)\n", (762, 803), False, 'import cv2\n'), ((864, 883), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (880, 883), False, 'import cv2\n'), ((902, 926), 'numpy.array', 'np.array', (['[10, 130, 130]'], {}), '([10, 130, 130])\n', (910, 926), True, 'import numpy as np\n'), ((943, 967), 'numpy.array', 'np.array', (['[40, 255, 255]'], {}), '([40, 255, 255])\n', (951, 967), True, 'import numpy as np\n'), ((976, 993), 'collections.deque', 'deque', ([], {'maxlen': '(512)'}), '(maxlen=512)\n', (981, 993), False, 'from collections import deque\n'), ((1011, 1050), 'numpy.zeros', 'np.zeros', (['(480, 640, 3)'], {'dtype': 'np.uint8'}), '((480, 640, 3), dtype=np.uint8)\n', (1019, 1050), True, 'import numpy as np\n'), ((1063, 1102), 'numpy.zeros', 'np.zeros', (['(224, 224, 3)'], {'dtype': 'np.uint8'}), '((224, 224, 3), dtype=np.uint8)\n', (1071, 1102), True, 'import numpy as np\n'), ((1143, 1182), 'numpy.zeros', 'np.zeros', (['(200, 200, 3)'], {'dtype': 'np.uint8'}), '((200, 200, 3), dtype=np.uint8)\n', (1151, 1182), True, 'import numpy as np\n'), ((5020, 5043), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (5038, 5043), False, 'from keras.preprocessing import image\n'), ((5079, 5112), 'numpy.expand_dims', 'np.expand_dims', (['img_array'], {'axis': '(0)'}), '(img_array, axis=0)\n', (5093, 5112), True, 'import numpy as np\n'), ((5131, 5201), 'keras.applications.mobilenet.preprocess_input', 'keras.applications.mobilenet.preprocess_input', (['img_array_expanded_dims'], {}), '(img_array_expanded_dims)\n', (5176, 5201), False, 'import keras\n'), ((6116, 6162), 'cv2.cvtColor', 'cv2.cvtColor', (['overlay_mask', 'cv2.COLOR_GRAY2BGR'], {}), '(overlay_mask, cv2.COLOR_GRAY2BGR)\n', (6128, 6162), False, 'import cv2\n'), ((6185, 6234), 'cv2.cvtColor', 'cv2.cvtColor', (['background_mask', 'cv2.COLOR_GRAY2BGR'], {}), '(background_mask, cv2.COLOR_GRAY2BGR)\n', (6197, 6234), False, 'import cv2\n'), ((6719, 6758), 'numpy.zeros', 'np.zeros', (['(224, 224, 3)'], {'dtype': 'np.uint8'}), '((224, 224, 3), dtype=np.uint8)\n', (6727, 6758), True, 'import numpy as np\n'), ((1275, 1291), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (1283, 1291), False, 'import cv2\n'), ((1300, 1360), 'cv2.rectangle', 'cv2.rectangle', (['img', '(400, 250)', '(624, 474)', '(255, 0, 255)', '(5)'], {}), '(img, (400, 250), (624, 474), (255, 0, 255), 5)\n', (1313, 1360), False, 'import cv2\n'), ((1367, 1403), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (1379, 1403), False, 'import cv2\n'), ((1418, 1458), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""L - H"""', '"""Trackbars"""'], {}), "('L - H', 'Trackbars')\n", (1436, 1458), False, 'import cv2\n'), ((1473, 1513), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""L - S"""', '"""Trackbars"""'], {}), "('L - S', 'Trackbars')\n", (1491, 1513), False, 'import cv2\n'), ((1528, 1568), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""L - V"""', '"""Trackbars"""'], {}), "('L - V', 'Trackbars')\n", (1546, 1568), False, 'import cv2\n'), ((1583, 1623), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""U - H"""', '"""Trackbars"""'], {}), "('U - H', 'Trackbars')\n", (1601, 1623), False, 'import cv2\n'), ((1638, 1678), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""U - S"""', '"""Trackbars"""'], {}), "('U - S', 'Trackbars')\n", (1656, 1678), False, 'import cv2\n'), ((1693, 1733), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""U - V"""', '"""Trackbars"""'], {}), "('U - V', 'Trackbars')\n", (1711, 1733), False, 'import cv2\n'), ((1764, 1789), 'numpy.array', 'np.array', (['[l_h, l_s, l_v]'], {}), '([l_h, l_s, l_v])\n', (1772, 1789), True, 'import numpy as np\n'), ((1885, 1910), 'numpy.array', 'np.array', (['[u_v, u_s, u_v]'], {}), '([u_v, u_s, u_v])\n', (1893, 1910), True, 'import numpy as np\n'), ((1979, 2004), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (1986, 2004), True, 'import numpy as np\n'), ((2020, 2062), 'cv2.inRange', 'cv2.inRange', (['hsv', 'Lower_green', 'Upper_green'], {}), '(hsv, Lower_green, Upper_green)\n', (2031, 2062), False, 'import cv2\n'), ((2078, 2115), 'cv2.erode', 'cv2.erode', (['mask', 'kernel'], {'iterations': '(2)'}), '(mask, kernel, iterations=2)\n', (2087, 2115), False, 'import cv2\n'), ((2131, 2177), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_OPEN', 'kernel'], {}), '(mask, cv2.MORPH_OPEN, kernel)\n', (2147, 2177), False, 'import cv2\n'), ((2191, 2238), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(mask, cv2.MORPH_CLOSE, kernel)\n', (2207, 2238), False, 'import cv2\n'), ((2252, 2290), 'cv2.dilate', 'cv2.dilate', (['mask', 'kernel'], {'iterations': '(1)'}), '(mask, kernel, iterations=1)\n', (2262, 2290), False, 'import cv2\n'), ((2305, 2341), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (2320, 2341), False, 'import cv2\n'), ((4500, 4524), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'img'], {}), "('Frame', img)\n", (4510, 4524), False, 'import cv2\n'), ((4533, 4555), 'cv2.imshow', 'cv2.imshow', (['"""Res"""', 'res'], {}), "('Res', res)\n", (4543, 4555), False, 'import cv2\n'), ((4564, 4588), 'cv2.imshow', 'cv2.imshow', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (4574, 4588), False, 'import cv2\n'), ((4619, 4634), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (4630, 4634), False, 'import cv2\n'), ((6623, 6682), 'cv2.addWeighted', 'cv2.addWeighted', (['face_part', '(255.0)', 'overlay_part', '(255.0)', '(0.0)'], {}), '(face_part, 255.0, overlay_part, 255.0, 0.0)\n', (6638, 6682), False, 'import cv2\n'), ((5318, 5342), 'os.listdir', 'os.listdir', (['logos_folder'], {}), '(logos_folder)\n', (5328, 5342), False, 'import os\n'), ((2620, 2640), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (2635, 2640), False, 'import cv2\n'), ((2700, 2727), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['cnt'], {}), '(cnt)\n', (2722, 2727), False, 'import cv2\n'), ((2825, 2868), 'cv2.circle', 'cv2.circle', (['img', 'center', '(5)', '(0, 0, 255)', '(-1)'], {}), '(img, center, 5, (0, 0, 255), -1)\n', (2835, 2868), False, 'import cv2\n'), ((2889, 2905), 'cv2.moments', 'cv2.moments', (['cnt'], {}), '(cnt)\n', (2900, 2905), False, 'import cv2\n'), ((4309, 4326), 'collections.deque', 'deque', ([], {'maxlen': '(512)'}), '(maxlen=512)\n', (4314, 4326), False, 'from collections import deque\n'), ((4378, 4417), 'numpy.zeros', 'np.zeros', (['(480, 640, 3)'], {'dtype': 'np.uint8'}), '((480, 640, 3), dtype=np.uint8)\n', (4386, 4417), True, 'import numpy as np\n'), ((3182, 3242), 'cv2.line', 'cv2.line', (['blackboard', 'pts[i - 1]', 'pts[i]', '(255, 255, 255)', '(7)'], {}), '(blackboard, pts[i - 1], pts[i], (255, 255, 255), 7)\n', (3190, 3242), False, 'import cv2\n'), ((3263, 3312), 'cv2.line', 'cv2.line', (['img', 'pts[i - 1]', 'pts[i]', '(0, 0, 255)', '(2)'], {}), '(img, pts[i - 1], pts[i], (0, 0, 255), 2)\n', (3271, 3312), False, 'import cv2\n'), ((3458, 3502), 'cv2.cvtColor', 'cv2.cvtColor', (['blackboard', 'cv2.COLOR_BGR2GRAY'], {}), '(blackboard, cv2.COLOR_BGR2GRAY)\n', (3470, 3502), False, 'import cv2\n'), ((3553, 3588), 'cv2.medianBlur', 'cv2.medianBlur', (['blackboard_gray', '(15)'], {}), '(blackboard_gray, 15)\n', (3567, 3588), False, 'import cv2\n'), ((3613, 3647), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['blur1', '(5, 5)', '(0)'], {}), '(blur1, (5, 5), 0)\n', (3629, 3647), False, 'import cv2\n'), ((3674, 3739), 'cv2.threshold', 'cv2.threshold', (['blur1', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(blur1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (3687, 3739), False, 'import cv2\n'), ((4037, 4057), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (4052, 4057), False, 'import cv2\n')]
|
import json
from decimal import Decimal
from pymongo import MongoClient
from appkernel import PropertyRequiredException
from appkernel.configuration import config
from appkernel.repository import mongo_type_converter_to_dict, mongo_type_converter_from_dict
from .utils import *
import pytest
from jsonschema import validate
def setup_module(module):
config.mongo_database = MongoClient(host='localhost')['appkernel']
def setup_function(function):
""" executed before each method call
"""
print('\n\nSETUP ==> ')
Project.delete_all()
User.delete_all()
def test_required_field():
project = Project()
with pytest.raises(PropertyRequiredException):
project.finalise_and_validate()
with pytest.raises(PropertyRequiredException):
project.update(name=None)
project.finalise_and_validate()
project.update(name='some_name')
project.finalise_and_validate()
def test_append_to_non_existing_non_defined_element():
project = Project().update(name='strange project')
project.append_to(users=Task().update(name='some_task', description='some description'))
project.finalise_and_validate()
assert 'users' in project.__dict__
assert len(project.users) == 1
assert isinstance(project.users[0], Task)
print(('{}'.format(project)))
def test_append_to_non_existing_element():
project = Project().update(name='strange project')
project.append_to(tasks=Task().update(name='some_task', description='some description'))
project.finalise_and_validate()
assert 'tasks' in project.__dict__
assert len(project.tasks) == 1
assert isinstance(project.tasks[0], Task)
print(('{}'.format(project)))
def test_remove_non_existing_element():
with pytest.raises(AttributeError):
project = Project().update(name='strange project')
project.remove_from(tasks=Task())
with pytest.raises(AttributeError):
project = Project().update(name='strange project')
project.remove_from(tasks=None)
with pytest.raises(AttributeError):
project = Project().update(name='strange project')
project.remove_from(somehtings=Task())
def test_remove_existing_defined_element():
task1 = Task().update(name='some_task', description='some description')
task2 = Task().update(name='some_other_task', description='some other description')
task3 = Task().update(name='a third task', description='some third description')
project = Project().update(name='strange project')
project.append_to(tasks=[task1, task2])
project.finalise_and_validate()
assert len(project.tasks) == 2
project.append_to(tasks=task3)
project.finalise_and_validate()
assert len(project.tasks) == 3
print(('{}'.format(project)))
project.remove_from(tasks=task1)
assert len(project.tasks) == 2
print(('{}'.format(project)))
def test_generator():
task = Task()
task.name = 'some task name'
task.description = 'some task description'
task.finalise_and_validate()
print(('\nTask:\n {}'.format(task)))
assert task.id is not None and task.id.startswith('U')
def test_converter():
user = create_and_save_a_user('test user', 'test password', 'test description')
print(('\n{}'.format(user.dumps(pretty_print=True))))
assert user.password.startswith('<PASSWORD>')
hash1 = user.password
user.save()
assert user.password.startswith('<PASSWORD>')
assert hash1 == user.password
def test_nested_object_serialisation():
portfolio = create_a_portfolion_with_owner()
print((portfolio.dumps(pretty_print=True)))
check_portfolio(portfolio)
def test_describe_model():
user_spec = User.get_parameter_spec()
print(User.get_paramater_spec_as_json())
assert 'name' in user_spec
assert user_spec.get('name').get('required')
assert user_spec.get('name').get('type') == 'str'
assert len(user_spec.get('name').get('validators')) == 2
for validator in user_spec.get('name').get('validators'):
if validator.get('type') == 'Regexp':
assert validator.get('value') == '[A-Za-z0-9-_]'
assert user_spec.get('roles').get('sub_type') == 'str'
def test_describe_rich_model():
project_spec = Project.get_parameter_spec()
print(Project.get_paramater_spec_as_json())
assert project_spec.get('created').get('required')
assert project_spec.get('created').get('type') == 'datetime'
assert project_spec.get('name').get('required')
assert project_spec.get('name').get('type') == 'str'
name_validators = project_spec.get('name').get('validators')
assert len(name_validators) == 1
assert name_validators[0].get('type') == 'NotEmpty'
assert name_validators[0].get('value') is None or 'null'
tasks = project_spec.get('tasks')
assert not tasks.get('required')
assert 'sub_type' in tasks
assert tasks.get('type') == 'list'
task = tasks.get('sub_type')
assert task.get('type') == 'Task'
assert 'props' in task
props = task.get('props')
assert not props.get('closed_date').get('required')
assert props.get('closed_date').get('type') == 'datetime'
assert props.get('closed_date').get('validators')[0].get('type') == 'Past'
def test_json_schema():
json_schema = Project.get_json_schema()
print('\n{}'.format(json.dumps(json_schema, indent=2)))
print('===========')
project = create_rich_project()
print(project.dumps(pretty_print=True))
assert json_schema.get('title') == 'Project Schema'
assert 'title' in json_schema
assert json_schema.get('type') == 'object'
assert 'name' in json_schema.get('required')
assert 'created' in json_schema.get('required')
assert 'definitions' in json_schema
assert json_schema.get('additionalProperties')
definitions = json_schema.get('definitions')
assert 'Task' in definitions
assert len(definitions.get('Task').get('required')) == 6
assert 'id' in definitions.get('Task').get('properties')
closed_date = definitions.get('Task').get('properties').get('closed_date')
assert 'string' in closed_date.get('type')
assert len(closed_date.get('type')) == 2
assert closed_date.get('format') == 'date-time'
completed = definitions.get('Task').get('properties').get('completed')
assert 'boolean' in completed.get('type')
assert len(completed.get('type')) == 1
validate(json.loads(project.dumps()), json_schema)
# todo: check the enum / make a negative test
# validator = Draft4Validator(json_schema)
# errors = sorted(validator.iter_errors(project.dumps()), key=lambda e: e.path)
# for error in errors:
# print('{}'.format(error.message, list(error.path)))
def test_json_schema_primitives_types():
json_schema = Stock.get_json_schema()
print(json.dumps(json_schema, indent=2))
props = json_schema.get('properties')
opentypes = props.get('open').get('type')
assert 'number' in opentypes
assert len(opentypes) == 1
item_types = props.get('history').get('items').get('type')
assert 'number' in item_types
len(item_types) == 1
stock = create_a_stock()
validate(json.loads(stock.dumps()), json_schema)
def test_json_schema_complex():
# print json.dumps(Portfolio.get_parameter_spec(True), indent=2)
json_schema = Portfolio.get_json_schema()
print(json.dumps(json_schema, indent=2))
stock_definition = json_schema.get('definitions').get('Stock')
assert stock_definition.get('properties').get('updated').get('format') == 'date-time'
assert stock_definition.get('properties').get('code').get('pattern') == '[A-Za-z0-9-_]'
assert stock_definition.get('properties').get('code').get('maxLength') == 4
assert stock_definition.get('properties').get('open').get('minimum') == 0
open_types = stock_definition.get('properties').get('open').get('type')
assert 'number' in open_types
assert len(open_types) == 1
sequence_types = stock_definition.get('properties').get('sequence').get('type')
assert 'number' in sequence_types
assert len(sequence_types) == 2
assert stock_definition.get('properties').get('sequence').get('minimum') == 1
assert stock_definition.get('properties').get('sequence').get('maximum') == 100
assert stock_definition.get('properties').get('sequence').get('multipleOf') == 1.0
history_types = stock_definition.get('properties').get('history').get('type')
assert 'array' in history_types
assert len(history_types) == 2
portfolio = create_portfolio('My Portfolio')
validate(json.loads(portfolio.dumps()), json_schema)
def test_json_schema_in_mongo_compat_mode():
json_schema = Project.get_json_schema(mongo_compatibility=True)
print('\n\n{}'.format(json.dumps(json_schema, indent=2)))
print('===========')
task_spec = json_schema.get('properties').get('tasks')
assert len(task_spec.get('items').get('required')) == 5
priority_spec = task_spec.get('items').get('properties').get('priority')
assert len(priority_spec.get('enum')) == 3
closed_date_spec = task_spec.get('items').get('properties').get('closed_date')
assert len(closed_date_spec.get('bsonType')) == 2
assert 'bsonType' in json_schema
assert 'id' not in json_schema
assert '$schema' not in json_schema
assert 'definitions' not in json_schema
for prop in json_schema.get('properties').items():
assert 'format' not in prop[1]
assert 'bsonType' in prop[1]
for prop in task_spec.get('items').get('properties').items():
assert 'format' not in prop[1]
assert 'bsonType' or 'enum' in prop[1]
project = create_rich_project()
print(project.dumps(pretty_print=True))
validate(json.loads(project.dumps()), json_schema)
def __assert_product_dict(product_dict: dict):
assert 'name' in product_dict
assert 'description' in product_dict
assert 'size' in product_dict
assert product_dict.get('size') == 'M'
assert 'price' in product_dict
assert isinstance(product_dict.get('price'), dict)
price_dict = product_dict.get('price')
assert '_type' in price_dict
assert price_dict.get('_type') == 'money.money.Money'
assert price_dict.get('currency') == 'EUR'
def test_custom_object_marshalling():
product = Product(code='TRX', name='White T-Shirt', description='a stylish white shirt', size=ProductSize.M,
price=Money(10.50, 'EUR'))
product_dict = Model.to_dict(product)
__assert_product_dict(product_dict)
amount = product_dict.get('price').get('amount')
assert isinstance(amount, Decimal)
assert amount == 10.5
product_json = product.dumps(pretty_print=True)
print('JSON: \n{}'.format(product_json))
reloaded_product = Product.loads(product_json)
assert reloaded_product is not None and isinstance(reloaded_product, Product)
assert reloaded_product.name == product.name
assert reloaded_product.description == product.description
assert reloaded_product.size == product.size
assert isinstance(reloaded_product.price, Money)
assert reloaded_product.price == product.price
def test_custom_converter_function():
product = Product(code='TRX', name='White T-Shirt', description='a stylish white shirt', size=ProductSize.M,
price=Money(10.50, 'EUR'))
product_dict = Model.to_dict(product, converter_func=mongo_type_converter_to_dict)
__assert_product_dict(product_dict)
amount = product_dict.get('price').get('amount')
assert isinstance(amount, float)
product_json = product.dumps(pretty_print=True)
print('JSON: \n{}'.format(product_json))
reloaded_product = Model.from_dict(product_dict, Product, converter_func=mongo_type_converter_from_dict)
assert isinstance(reloaded_product.price, Money)
assert isinstance(reloaded_product.price.amount, Decimal)
|
[
"pymongo.MongoClient",
"pytest.raises",
"json.dumps"
] |
[((380, 409), 'pymongo.MongoClient', 'MongoClient', ([], {'host': '"""localhost"""'}), "(host='localhost')\n", (391, 409), False, 'from pymongo import MongoClient\n'), ((641, 681), 'pytest.raises', 'pytest.raises', (['PropertyRequiredException'], {}), '(PropertyRequiredException)\n', (654, 681), False, 'import pytest\n'), ((732, 772), 'pytest.raises', 'pytest.raises', (['PropertyRequiredException'], {}), '(PropertyRequiredException)\n', (745, 772), False, 'import pytest\n'), ((1750, 1779), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (1763, 1779), False, 'import pytest\n'), ((1892, 1921), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (1905, 1921), False, 'import pytest\n'), ((2032, 2061), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (2045, 2061), False, 'import pytest\n'), ((6814, 6847), 'json.dumps', 'json.dumps', (['json_schema'], {'indent': '(2)'}), '(json_schema, indent=2)\n', (6824, 6847), False, 'import json\n'), ((7364, 7397), 'json.dumps', 'json.dumps', (['json_schema'], {'indent': '(2)'}), '(json_schema, indent=2)\n', (7374, 7397), False, 'import json\n'), ((5332, 5365), 'json.dumps', 'json.dumps', (['json_schema'], {'indent': '(2)'}), '(json_schema, indent=2)\n', (5342, 5365), False, 'import json\n'), ((8759, 8792), 'json.dumps', 'json.dumps', (['json_schema'], {'indent': '(2)'}), '(json_schema, indent=2)\n', (8769, 8792), False, 'import json\n')]
|
from itertools import permutations
import numpy as np
import pytest
from pyomeca import Angles, Rototrans, Markers
SEQ = (
["".join(p) for i in range(1, 4) for p in permutations("xyz", i)]
+ ["zyzz"]
+ ["zxz"]
)
SEQ = [s for s in SEQ if s not in ["yxz"]]
EPSILON = 1e-12
ANGLES = Angles(np.random.rand(4, 1, 100))
@pytest.mark.parametrize("seq", SEQ)
def test_euler2rot_rot2euleur(seq, angles=ANGLES, epsilon=EPSILON):
if seq == "zyzz":
angles_to_test = angles[:3, ...]
else:
angles_to_test = angles[: len(seq), ...]
r = Rototrans.from_euler_angles(angles=angles_to_test, angle_sequence=seq)
a = Angles.from_rototrans(rototrans=r, angle_sequence=seq)
np.testing.assert_array_less((a - angles_to_test).meca.abs().sum(), epsilon)
def test_construct_rt():
eye = Rototrans()
np.testing.assert_equal(eye.time.size, 1)
np.testing.assert_equal(eye.sel(time=0), np.eye(4))
eye = Rototrans.from_euler_angles()
np.testing.assert_equal(eye.time.size, 1)
np.testing.assert_equal(eye.sel(time=0), np.eye(4))
# Test the way to create a rt, but not when providing bot angles and sequence
nb_frames = 10
random_vector = Angles(np.random.rand(3, 1, nb_frames))
# with angles
rt_random_angles = Rototrans.from_euler_angles(
angles=random_vector, angle_sequence="xyz"
)
np.testing.assert_equal(rt_random_angles.time.size, nb_frames)
np.testing.assert_equal(
rt_random_angles[:-1, -1:, :], np.zeros((3, 1, nb_frames))
) # Translation is 0
# with translation
rt_random_translation = Rototrans.from_euler_angles(translations=random_vector)
np.testing.assert_equal(rt_random_translation.time.size, nb_frames)
np.testing.assert_equal(
rt_random_translation[:3, :3, :],
np.repeat(np.eye(3)[:, :, np.newaxis], nb_frames, axis=2),
) # rotation is eye3
np.arange(0, rt_random_angles.time.size / 0.5, 1 / 0.5)
rt_with_time = Rototrans(
rt_random_angles, time=np.arange(0, rt_random_angles.time.size / 100, 1 / 100),
)
assert rt_with_time.time[-1] == 0.09
with pytest.raises(IndexError):
Rototrans(data=np.zeros(1))
with pytest.raises(IndexError):
Rototrans.from_euler_angles(
angles=random_vector[..., :5],
translations=random_vector,
angle_sequence="x",
)
with pytest.raises(IndexError):
Rototrans.from_euler_angles(angles=random_vector, angle_sequence="x")
with pytest.raises(ValueError):
Rototrans.from_euler_angles(angles=random_vector, angle_sequence="nop")
def test_rt_from_markers():
all_m = Markers.from_random_data()
rt_xy = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 1]),
axis_2=all_m.isel(channel=[0, 2]),
axes_name="xy",
axis_to_recalculate="y",
)
rt_yx = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 2]),
axis_2=all_m.isel(channel=[0, 1]),
axes_name="yx",
axis_to_recalculate="y",
)
rt_xy_x_recalc = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 1]),
axis_2=all_m.isel(channel=[0, 2]),
axes_name="yx",
axis_to_recalculate="x",
)
rt_xy_x_recalc = rt_xy_x_recalc.isel(col=[1, 0, 2, 3])
rt_xy_x_recalc[:, 2, :] = -rt_xy_x_recalc[:, 2, :]
rt_yz = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 1]),
axis_2=all_m.isel(channel=[0, 2]),
axes_name="yz",
axis_to_recalculate="z",
)
rt_zy = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 2]),
axis_2=all_m.isel(channel=[0, 1]),
axes_name="zy",
axis_to_recalculate="z",
)
rt_xy_from_yz = rt_yz.isel(col=[1, 2, 0, 3])
rt_xz = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 1]),
axis_2=all_m.isel(channel=[0, 2]),
axes_name="xz",
axis_to_recalculate="z",
)
rt_zx = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 2]),
axis_2=all_m.isel(channel=[0, 1]),
axes_name="zx",
axis_to_recalculate="z",
)
rt_xy_from_zx = rt_xz.isel(col=[0, 2, 1, 3])
rt_xy_from_zx[:, 2, :] = -rt_xy_from_zx[:, 2, :]
np.testing.assert_array_equal(rt_xy, rt_xy_x_recalc)
np.testing.assert_array_equal(rt_xy, rt_yx)
np.testing.assert_array_equal(rt_yz, rt_zy)
np.testing.assert_array_equal(rt_xz, rt_zx)
np.testing.assert_array_equal(rt_xy, rt_xy_from_yz)
np.testing.assert_array_equal(rt_xy, rt_xy_from_zx)
# Produce one that we know the solution
ref_m = Markers(np.array(((1, 2, 3), (4, 5, 6), (6, 5, 4))).T[:, :, np.newaxis])
rt_xy_from_known_m = Rototrans.from_markers(
origin=ref_m.isel(channel=[0]),
axis_1=ref_m.isel(channel=[0, 1]),
axis_2=ref_m.isel(channel=[0, 2]),
axes_name="xy",
axis_to_recalculate="y",
)
rt_xy_expected = Rototrans(
np.array(
[
[0.5773502691896257, 0.7071067811865475, -0.408248290463863, 1.0],
[0.5773502691896257, 0.0, 0.816496580927726, 2.0],
[0.5773502691896257, -0.7071067811865475, -0.408248290463863, 3.0],
[0, 0, 0, 1.0],
]
)
)
np.testing.assert_array_equal(rt_xy_from_known_m, rt_xy_expected)
exception_default_params = dict(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 1]),
axis_2=all_m.isel(channel=[0, 2]),
axes_name="xy",
axis_to_recalculate="y",
)
with pytest.raises(ValueError):
Rototrans.from_markers(
**{**exception_default_params, **dict(origin=all_m.isel(channel=[0, 1]))}
)
with pytest.raises(ValueError):
Rototrans.from_markers(
**{**exception_default_params, **dict(axis_1=all_m.isel(channel=[0]))}
)
with pytest.raises(ValueError):
Rototrans.from_markers(
**{**exception_default_params, **dict(axis_2=all_m.isel(channel=[0]))}
)
with pytest.raises(ValueError):
Rototrans.from_markers(
**{
**exception_default_params,
**dict(axis_1=all_m.isel(channel=[0, 1], time=slice(None, 50))),
}
)
with pytest.raises(ValueError):
Rototrans.from_markers(**{**exception_default_params, **dict(axes_name="yyz")})
with pytest.raises(ValueError):
Rototrans.from_markers(**{**exception_default_params, **dict(axes_name="xxz")})
with pytest.raises(ValueError):
Rototrans.from_markers(**{**exception_default_params, **dict(axes_name="zzz")})
with pytest.raises(ValueError):
Rototrans.from_markers(
**{**exception_default_params, **dict(axis_to_recalculate="h")}
)
def test_rt_transpose():
n_frames = 10
angles = Angles.from_random_data(size=(3, 1, n_frames))
rt = Rototrans.from_euler_angles(angles, angle_sequence="xyz")
rt_t = Rototrans.from_transposed_rototrans(rt)
rt_t_expected = np.zeros((4, 4, n_frames))
rt_t_expected[3, 3, :] = 1
for row in range(rt.row.size):
for col in range(rt.col.size):
for frame in range(rt.time.size):
rt_t_expected[col, row, frame] = rt[row, col, frame]
for frame in range(rt.time.size):
rt_t_expected[:3, 3, frame] = -rt_t_expected[:3, :3, frame].dot(
rt[:3, 3, frame]
)
np.testing.assert_array_almost_equal(rt_t, rt_t_expected, decimal=10)
def test_average_rt():
# TODO: investigate why this does not work
# angles = Angles.from_random_data(size=(3, 1, 100))
# or
# angles = Angles(np.arange(300).reshape((3, 1, 100)))
angles = Angles(np.random.rand(3, 1, 100))
seq = "xyz"
rt = Rototrans.from_euler_angles(angles, seq)
rt_mean = Rototrans.from_averaged_rototrans(rt)
angles_mean = Angles.from_rototrans(rt_mean, seq).isel(time=0)
angles_mean_ref = Angles.from_rototrans(rt, seq).mean(dim="time")
np.testing.assert_array_almost_equal(angles_mean, angles_mean_ref, decimal=2)
|
[
"pyomeca.Markers.from_random_data",
"numpy.eye",
"pyomeca.Angles.from_rototrans",
"numpy.testing.assert_array_equal",
"itertools.permutations",
"pyomeca.Rototrans.from_euler_angles",
"numpy.zeros",
"pyomeca.Rototrans.from_averaged_rototrans",
"pytest.raises",
"pyomeca.Angles.from_random_data",
"numpy.arange",
"numpy.array",
"numpy.testing.assert_equal",
"pyomeca.Rototrans.from_transposed_rototrans",
"numpy.random.rand",
"pytest.mark.parametrize",
"numpy.testing.assert_array_almost_equal",
"pyomeca.Rototrans"
] |
[((332, 367), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seq"""', 'SEQ'], {}), "('seq', SEQ)\n", (355, 367), False, 'import pytest\n'), ((302, 327), 'numpy.random.rand', 'np.random.rand', (['(4)', '(1)', '(100)'], {}), '(4, 1, 100)\n', (316, 327), True, 'import numpy as np\n'), ((566, 636), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', ([], {'angles': 'angles_to_test', 'angle_sequence': 'seq'}), '(angles=angles_to_test, angle_sequence=seq)\n', (593, 636), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((645, 699), 'pyomeca.Angles.from_rototrans', 'Angles.from_rototrans', ([], {'rototrans': 'r', 'angle_sequence': 'seq'}), '(rototrans=r, angle_sequence=seq)\n', (666, 699), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((819, 830), 'pyomeca.Rototrans', 'Rototrans', ([], {}), '()\n', (828, 830), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((835, 876), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['eye.time.size', '(1)'], {}), '(eye.time.size, 1)\n', (858, 876), True, 'import numpy as np\n'), ((944, 973), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', ([], {}), '()\n', (971, 973), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((978, 1019), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['eye.time.size', '(1)'], {}), '(eye.time.size, 1)\n', (1001, 1019), True, 'import numpy as np\n'), ((1280, 1351), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', ([], {'angles': 'random_vector', 'angle_sequence': '"""xyz"""'}), "(angles=random_vector, angle_sequence='xyz')\n", (1307, 1351), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((1370, 1432), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['rt_random_angles.time.size', 'nb_frames'], {}), '(rt_random_angles.time.size, nb_frames)\n', (1393, 1432), True, 'import numpy as np\n'), ((1607, 1662), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', ([], {'translations': 'random_vector'}), '(translations=random_vector)\n', (1634, 1662), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((1667, 1734), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['rt_random_translation.time.size', 'nb_frames'], {}), '(rt_random_translation.time.size, nb_frames)\n', (1690, 1734), True, 'import numpy as np\n'), ((1903, 1958), 'numpy.arange', 'np.arange', (['(0)', '(rt_random_angles.time.size / 0.5)', '(1 / 0.5)'], {}), '(0, rt_random_angles.time.size / 0.5, 1 / 0.5)\n', (1912, 1958), True, 'import numpy as np\n'), ((2671, 2697), 'pyomeca.Markers.from_random_data', 'Markers.from_random_data', ([], {}), '()\n', (2695, 2697), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((4559, 4611), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['rt_xy', 'rt_xy_x_recalc'], {}), '(rt_xy, rt_xy_x_recalc)\n', (4588, 4611), True, 'import numpy as np\n'), ((4616, 4659), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['rt_xy', 'rt_yx'], {}), '(rt_xy, rt_yx)\n', (4645, 4659), True, 'import numpy as np\n'), ((4664, 4707), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['rt_yz', 'rt_zy'], {}), '(rt_yz, rt_zy)\n', (4693, 4707), True, 'import numpy as np\n'), ((4712, 4755), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['rt_xz', 'rt_zx'], {}), '(rt_xz, rt_zx)\n', (4741, 4755), True, 'import numpy as np\n'), ((4760, 4811), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['rt_xy', 'rt_xy_from_yz'], {}), '(rt_xy, rt_xy_from_yz)\n', (4789, 4811), True, 'import numpy as np\n'), ((4816, 4867), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['rt_xy', 'rt_xy_from_zx'], {}), '(rt_xy, rt_xy_from_zx)\n', (4845, 4867), True, 'import numpy as np\n'), ((5602, 5667), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['rt_xy_from_known_m', 'rt_xy_expected'], {}), '(rt_xy_from_known_m, rt_xy_expected)\n', (5631, 5667), True, 'import numpy as np\n'), ((7205, 7251), 'pyomeca.Angles.from_random_data', 'Angles.from_random_data', ([], {'size': '(3, 1, n_frames)'}), '(size=(3, 1, n_frames))\n', (7228, 7251), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((7261, 7318), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', (['angles'], {'angle_sequence': '"""xyz"""'}), "(angles, angle_sequence='xyz')\n", (7288, 7318), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((7331, 7370), 'pyomeca.Rototrans.from_transposed_rototrans', 'Rototrans.from_transposed_rototrans', (['rt'], {}), '(rt)\n', (7366, 7370), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((7392, 7418), 'numpy.zeros', 'np.zeros', (['(4, 4, n_frames)'], {}), '((4, 4, n_frames))\n', (7400, 7418), True, 'import numpy as np\n'), ((7795, 7864), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['rt_t', 'rt_t_expected'], {'decimal': '(10)'}), '(rt_t, rt_t_expected, decimal=10)\n', (7831, 7864), True, 'import numpy as np\n'), ((8135, 8175), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', (['angles', 'seq'], {}), '(angles, seq)\n', (8162, 8175), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((8190, 8227), 'pyomeca.Rototrans.from_averaged_rototrans', 'Rototrans.from_averaged_rototrans', (['rt'], {}), '(rt)\n', (8223, 8227), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((8371, 8448), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['angles_mean', 'angles_mean_ref'], {'decimal': '(2)'}), '(angles_mean, angles_mean_ref, decimal=2)\n', (8407, 8448), True, 'import numpy as np\n'), ((922, 931), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (928, 931), True, 'import numpy as np\n'), ((1065, 1074), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1071, 1074), True, 'import numpy as np\n'), ((1205, 1236), 'numpy.random.rand', 'np.random.rand', (['(3)', '(1)', 'nb_frames'], {}), '(3, 1, nb_frames)\n', (1219, 1236), True, 'import numpy as np\n'), ((1501, 1528), 'numpy.zeros', 'np.zeros', (['(3, 1, nb_frames)'], {}), '((3, 1, nb_frames))\n', (1509, 1528), True, 'import numpy as np\n'), ((2135, 2160), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (2148, 2160), False, 'import pytest\n'), ((2208, 2233), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (2221, 2233), False, 'import pytest\n'), ((2243, 2354), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', ([], {'angles': 'random_vector[..., :5]', 'translations': 'random_vector', 'angle_sequence': '"""x"""'}), "(angles=random_vector[..., :5], translations=\n random_vector, angle_sequence='x')\n", (2270, 2354), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((2407, 2432), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (2420, 2432), False, 'import pytest\n'), ((2442, 2511), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', ([], {'angles': 'random_vector', 'angle_sequence': '"""x"""'}), "(angles=random_vector, angle_sequence='x')\n", (2469, 2511), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((2522, 2547), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2535, 2547), False, 'import pytest\n'), ((2557, 2628), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', ([], {'angles': 'random_vector', 'angle_sequence': '"""nop"""'}), "(angles=random_vector, angle_sequence='nop')\n", (2584, 2628), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((5277, 5497), 'numpy.array', 'np.array', (['[[0.5773502691896257, 0.7071067811865475, -0.408248290463863, 1.0], [\n 0.5773502691896257, 0.0, 0.816496580927726, 2.0], [0.5773502691896257, \n -0.7071067811865475, -0.408248290463863, 3.0], [0, 0, 0, 1.0]]'], {}), '([[0.5773502691896257, 0.7071067811865475, -0.408248290463863, 1.0],\n [0.5773502691896257, 0.0, 0.816496580927726, 2.0], [0.5773502691896257,\n -0.7071067811865475, -0.408248290463863, 3.0], [0, 0, 0, 1.0]])\n', (5285, 5497), True, 'import numpy as np\n'), ((5904, 5929), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5917, 5929), False, 'import pytest\n'), ((6069, 6094), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6082, 6094), False, 'import pytest\n'), ((6231, 6256), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6244, 6256), False, 'import pytest\n'), ((6393, 6418), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6406, 6418), False, 'import pytest\n'), ((6627, 6652), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6640, 6652), False, 'import pytest\n'), ((6752, 6777), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6765, 6777), False, 'import pytest\n'), ((6877, 6902), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6890, 6902), False, 'import pytest\n'), ((7002, 7027), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7015, 7027), False, 'import pytest\n'), ((8082, 8107), 'numpy.random.rand', 'np.random.rand', (['(3)', '(1)', '(100)'], {}), '(3, 1, 100)\n', (8096, 8107), True, 'import numpy as np\n'), ((2021, 2076), 'numpy.arange', 'np.arange', (['(0)', '(rt_random_angles.time.size / 100)', '(1 / 100)'], {}), '(0, rt_random_angles.time.size / 100, 1 / 100)\n', (2030, 2076), True, 'import numpy as np\n'), ((8246, 8281), 'pyomeca.Angles.from_rototrans', 'Angles.from_rototrans', (['rt_mean', 'seq'], {}), '(rt_mean, seq)\n', (8267, 8281), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((8318, 8348), 'pyomeca.Angles.from_rototrans', 'Angles.from_rototrans', (['rt', 'seq'], {}), '(rt, seq)\n', (8339, 8348), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((172, 194), 'itertools.permutations', 'permutations', (['"""xyz"""', 'i'], {}), "('xyz', i)\n", (184, 194), False, 'from itertools import permutations\n'), ((1824, 1833), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1830, 1833), True, 'import numpy as np\n'), ((2185, 2196), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (2193, 2196), True, 'import numpy as np\n'), ((4933, 4976), 'numpy.array', 'np.array', (['((1, 2, 3), (4, 5, 6), (6, 5, 4))'], {}), '(((1, 2, 3), (4, 5, 6), (6, 5, 4)))\n', (4941, 4976), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.