code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from .layer import SyncLayer
from yowsup.stacks import YowStackBuilder
from yowsup.layers import YowLayerEvent
from yowsup.layers.auth import YowAuthenticationProtocolLayer
from yowsup.layers.network import YowNetworkLayer
class YowsupSyncStack(object):
def __init__(self, profile, contacts):
"""
:param profile:
:param contacts: list of [jid ]
:return:
"""
stackBuilder = YowStackBuilder()
self._stack = stackBuilder \
.pushDefaultLayers() \
.push(SyncLayer) \
.build()
self._stack.setProp(SyncLayer.PROP_CONTACTS, contacts)
self._stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE, True)
self._stack.setProfile(profile)
def set_prop(self, key, val):
self._stack.setProp(key, val)
def start(self):
self._stack.broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_CONNECT))
self._stack.loop()
|
[
"yowsup.stacks.YowStackBuilder",
"yowsup.layers.YowLayerEvent"
] |
[((429, 446), 'yowsup.stacks.YowStackBuilder', 'YowStackBuilder', ([], {}), '()\n', (444, 446), False, 'from yowsup.stacks import YowStackBuilder\n'), ((885, 935), 'yowsup.layers.YowLayerEvent', 'YowLayerEvent', (['YowNetworkLayer.EVENT_STATE_CONNECT'], {}), '(YowNetworkLayer.EVENT_STATE_CONNECT)\n', (898, 935), False, 'from yowsup.layers import YowLayerEvent\n')]
|
from setuptools import setup
import os
try:
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f:
long_description = f.read()
except Exception:
long_description = ''
setup(
name='seaborn-file',
version='1.1.1',
description='Seaborn-File enables the manipulation of the'
'directories of a computer within a program.',
long_description='',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/SeabornGames/File',
download_url='https://github.com/SeabornGames/File'
'/tarball/download',
keywords=['os'],
install_requires=[
],
extras_require={},
packages=['seaborn_file'],
license='MIT License',
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: Other/Proprietary License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6'],
)
|
[
"os.path.dirname",
"setuptools.setup"
] |
[((201, 949), 'setuptools.setup', 'setup', ([], {'name': '"""seaborn-file"""', 'version': '"""1.1.1"""', 'description': '"""Seaborn-File enables the manipulation of thedirectories of a computer within a program."""', 'long_description': '""""""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/SeabornGames/File"""', 'download_url': '"""https://github.com/SeabornGames/File/tarball/download"""', 'keywords': "['os']", 'install_requires': '[]', 'extras_require': '{}', 'packages': "['seaborn_file']", 'license': '"""MIT License"""', 'classifiers': "['Intended Audience :: Developers', 'Natural Language :: English',\n 'License :: Other/Proprietary License',\n 'Operating System :: POSIX :: Linux', 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.6']"}), "(name='seaborn-file', version='1.1.1', description=\n 'Seaborn-File enables the manipulation of thedirectories of a computer within a program.'\n , long_description='', author='<NAME>', author_email='<EMAIL>', url=\n 'https://github.com/SeabornGames/File', download_url=\n 'https://github.com/SeabornGames/File/tarball/download', keywords=['os'\n ], install_requires=[], extras_require={}, packages=['seaborn_file'],\n license='MIT License', classifiers=['Intended Audience :: Developers',\n 'Natural Language :: English', 'License :: Other/Proprietary License',\n 'Operating System :: POSIX :: Linux', 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.6'])\n", (206, 949), False, 'from setuptools import setup\n'), ((72, 97), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (87, 97), False, 'import os\n')]
|
from ble_lightsensor import BLELightSensor
from lightsensor import LightSensor
import time
import bluetooth
def main():
ble = bluetooth.BLE()
ble.active(True)
ble_light = BLELightSensor(ble)
light = LightSensor(36)
light_density = light.value()
i = 0
while True:
# Write every second, notify every 10 seconds.
i = (i + 1) % 10
ble_light.set_light(light_density, notify=i == 0)
print("Light Lux:", light_density)
light_density = light.value()
time.sleep_ms(1000)
if __name__ == "__main__":
main()
|
[
"lightsensor.LightSensor",
"time.sleep_ms",
"bluetooth.BLE",
"ble_lightsensor.BLELightSensor"
] |
[((131, 146), 'bluetooth.BLE', 'bluetooth.BLE', ([], {}), '()\n', (144, 146), False, 'import bluetooth\n'), ((184, 203), 'ble_lightsensor.BLELightSensor', 'BLELightSensor', (['ble'], {}), '(ble)\n', (198, 203), False, 'from ble_lightsensor import BLELightSensor\n'), ((217, 232), 'lightsensor.LightSensor', 'LightSensor', (['(36)'], {}), '(36)\n', (228, 232), False, 'from lightsensor import LightSensor\n'), ((522, 541), 'time.sleep_ms', 'time.sleep_ms', (['(1000)'], {}), '(1000)\n', (535, 541), False, 'import time\n')]
|
import logging
import os
from typing import List, Optional
import altair
from ps2_census.enums import PlayerState
from ps2_analysis.enums import DamageLocation
from ps2_analysis.fire_groups.cone_of_fire import ConeOfFire
from ps2_analysis.fire_groups.data_files import (
update_data_files as update_fire_groups_data_files,
)
from ps2_analysis.fire_groups.fire_mode import FireMode
from ps2_analysis.utils import CodeTimer
from ps2_analysis.weapons.infantry.data_files import (
update_data_files as update_infantry_weapons_data_files,
)
from ps2_analysis.weapons.infantry.generate import generate_all_infantry_weapons
from ps2_analysis.weapons.infantry.infantry_weapon import InfantryWeapon
logging.basicConfig(level=logging.INFO)
SERVICE_ID: Optional[str] = os.environ.get("CENSUS_SERVICE_ID")
DATAFILES_DIRECTORY: str = "../datafiles"
if not SERVICE_ID:
raise ValueError("CENSUS_SERVICE_ID envvar not found")
update_fire_groups_data_files(
directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID,
)
update_infantry_weapons_data_files(
directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID,
)
infantry_weapons: List[InfantryWeapon] = list(
generate_all_infantry_weapons(data_files_directory=DATAFILES_DIRECTORY)
)
print(f"Generated {len(infantry_weapons)} infantry weapons")
wp: InfantryWeapon = next(x for x in infantry_weapons if x.item_id == 43)
fm: FireMode = wp.fire_groups[0].fire_modes[1]
cof: ConeOfFire = fm.player_state_cone_of_fire[PlayerState.STANDING]
rttks: List[dict] = []
distance: int = 30
burst_length: int
for burst_length in range(0, int(round(fm.max_consecutive_shots / 4)) + 1, 1):
control_time: int
for control_time in range(
0, cof.recover_time(cof.min_cof_angle() + cof.bloom * burst_length * 2) + 10, 10
):
with CodeTimer(
f"{burst_length} length and {control_time}ms control time simulation"
):
ttk: int
timed_out_ratio: float
ttk, timed_out_ratio = fm.real_time_to_kill(
distance=distance,
runs=500,
control_time=control_time,
auto_burst_length=burst_length,
aim_location=DamageLocation.TORSO,
recoil_compensation=True,
)
rttks.append(
{
"distance": distance,
"control_time": control_time + fm.fire_timing.refire_time,
"burst_length": burst_length,
"ttk": ttk if timed_out_ratio < 0.20 else -1,
"timed_out_ratio": timed_out_ratio,
}
)
dataset = altair.Data(values=rttks)
chart = (
altair.Chart(dataset)
.mark_rect()
.encode(
x="burst_length:O",
y=altair.Y(
"control_time:O",
sort=altair.EncodingSortField("control_time", order="descending"),
),
color=altair.Color(
"ttk:Q", scale=altair.Scale(scheme="plasma"), sort="descending"
),
tooltip=["ttk:Q", "timed_out_ratio:Q"],
)
.properties(
title=f"{wp.name} TTK by burst length and control time at {distance}m",
height=900,
width=900,
)
.interactive()
)
chart.save("bursts_ttk_simulation.html")
|
[
"altair.Data",
"logging.basicConfig",
"altair.Scale",
"altair.Chart",
"ps2_analysis.fire_groups.data_files.update_data_files",
"os.environ.get",
"altair.EncodingSortField",
"ps2_analysis.utils.CodeTimer",
"ps2_analysis.weapons.infantry.data_files.update_data_files",
"ps2_analysis.weapons.infantry.generate.generate_all_infantry_weapons"
] |
[((701, 740), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (720, 740), False, 'import logging\n'), ((770, 805), 'os.environ.get', 'os.environ.get', (['"""CENSUS_SERVICE_ID"""'], {}), "('CENSUS_SERVICE_ID')\n", (784, 805), False, 'import os\n'), ((928, 1016), 'ps2_analysis.fire_groups.data_files.update_data_files', 'update_fire_groups_data_files', ([], {'directory': 'DATAFILES_DIRECTORY', 'service_id': 'SERVICE_ID'}), '(directory=DATAFILES_DIRECTORY, service_id=\n SERVICE_ID)\n', (957, 1016), True, 'from ps2_analysis.fire_groups.data_files import update_data_files as update_fire_groups_data_files\n'), ((1020, 1112), 'ps2_analysis.weapons.infantry.data_files.update_data_files', 'update_infantry_weapons_data_files', ([], {'directory': 'DATAFILES_DIRECTORY', 'service_id': 'SERVICE_ID'}), '(directory=DATAFILES_DIRECTORY,\n service_id=SERVICE_ID)\n', (1054, 1112), True, 'from ps2_analysis.weapons.infantry.data_files import update_data_files as update_infantry_weapons_data_files\n'), ((2662, 2687), 'altair.Data', 'altair.Data', ([], {'values': 'rttks'}), '(values=rttks)\n', (2673, 2687), False, 'import altair\n'), ((1168, 1239), 'ps2_analysis.weapons.infantry.generate.generate_all_infantry_weapons', 'generate_all_infantry_weapons', ([], {'data_files_directory': 'DATAFILES_DIRECTORY'}), '(data_files_directory=DATAFILES_DIRECTORY)\n', (1197, 1239), False, 'from ps2_analysis.weapons.infantry.generate import generate_all_infantry_weapons\n'), ((1803, 1888), 'ps2_analysis.utils.CodeTimer', 'CodeTimer', (['f"""{burst_length} length and {control_time}ms control time simulation"""'], {}), "(f'{burst_length} length and {control_time}ms control time simulation'\n )\n", (1812, 1888), False, 'from ps2_analysis.utils import CodeTimer\n'), ((2703, 2724), 'altair.Chart', 'altair.Chart', (['dataset'], {}), '(dataset)\n', (2715, 2724), False, 'import altair\n'), ((2850, 2910), 'altair.EncodingSortField', 'altair.EncodingSortField', (['"""control_time"""'], {'order': '"""descending"""'}), "('control_time', order='descending')\n", (2874, 2910), False, 'import altair\n'), ((2978, 3007), 'altair.Scale', 'altair.Scale', ([], {'scheme': '"""plasma"""'}), "(scheme='plasma')\n", (2990, 3007), False, 'import altair\n')]
|
#coding: utf-8
class CommandUsage(object):
def __init__(self, profile_container):
self.pc = profile_container
def get_type_info(self):
r = []
for p in self.pc.profiles:
opts, args, input, output = self.profile_usage(p)
type_vars = ', '.join(map(lambda x:'<%s>' % x.var_name, self.pc.type_params))
if opts:
r.append('Usage: %s%s OPTION %s' % (self.pc.name, type_vars, args))
r.append('Option:\n%s' % self.indent(opts))
else:
if args == 'null':
r.append('Usage: %s%s' % (self.pc.name, type_vars))
else:
r.append('Usage: %s%s %s' % (self.pc.name, type_vars, args))
r.append('Input:\n%s' % self.indent(input))
r.append('Output:\n%s' % self.indent(output))
r.append('\n')
return u'\n'.join(r)
def get_usage(self):
return self.get_type_info() + 'Description:\n' + self.get_doc()
def get_doc(self):
return self.pc.doc
@property
def title(self):
return self.pc.doc.splitlines()[0].strip()
def indent(self, s):
r = []
for l in s.splitlines():
r.append(' ' + l)
return '\n'.join(r)
def profile_usage(self, prof):
opt = TreeDumper().visit(prof.opts_schema)
arg = ArgDumper().visit(prof.args_schema)
inp = MiniDumper().visit(prof.in_schema)
out = MiniDumper().visit(prof.out_schema)
return opt, arg, inp, out
from caty.core.casm.cursor.dump import TreeDumper
class ArgDumper(TreeDumper):
def _process_option(self, node, buff):
if node.options:
items = [(k, v) for k, v in node.options.items() if k not in ('subName', 'minCount', 'maxCount')]
if 'subName' in node.options:
buff.append(' ' + node.options['subName'])
class MiniDumper(TreeDumper):
def _visit_root(self, node):
return node.name
|
[
"caty.core.casm.cursor.dump.TreeDumper"
] |
[((1339, 1351), 'caty.core.casm.cursor.dump.TreeDumper', 'TreeDumper', ([], {}), '()\n', (1349, 1351), False, 'from caty.core.casm.cursor.dump import TreeDumper\n')]
|
import os
from tempfile import NamedTemporaryFile
from unittest import TestCase
from mbq.client.storage import FileStorage
class FileStorageTestCase(TestCase):
def setUp(self):
self.test_filename = NamedTemporaryFile(delete=False).name
self.storage = FileStorage(self.test_filename)
def tearDown(self):
os.remove(self.test_filename)
def test_storage(self):
# When the file is empty, we should receive None for any key.
self.assertIsNone(self.storage.get('key1'))
# We should be able to write a key/value,
self.storage.set('key1', 'value1')
# retrieve it,
self.assertEqual(self.storage.get('key1'), 'value1')
# and still receive None for missing keys.
self.assertIsNone(self.storage.get('key2'))
# We should be able to write a 2nd key,
self.storage.set('key2', 'value2')
# retrieve it,
self.assertEqual(self.storage.get('key2'), 'value2')
# still retrieve the earlier key we wrote,
self.assertEqual(self.storage.get('key1'), 'value1')
# and still receive None for missing keys.
self.assertIsNone(self.storage.get('key3'))
# We should be able to update an existing key,
self.storage.set('key2', 'some-new-value')
# see the value change when retrieving,
self.assertEqual(self.storage.get('key2'), 'some-new-value')
# the other values should remain unchanged,
self.assertEqual(self.storage.get('key1'), 'value1')
# and we should still receive None for missing keys.
self.assertIsNone(self.storage.get('key3'))
# If we re-init the storage object with the same file,
self.storage = FileStorage(self.test_filename)
# all keys should be persisted.
self.assertEqual(self.storage.get('key2'), 'some-new-value')
self.assertEqual(self.storage.get('key1'), 'value1')
self.assertIsNone(self.storage.get('key3'))
|
[
"tempfile.NamedTemporaryFile",
"os.remove",
"mbq.client.storage.FileStorage"
] |
[((275, 306), 'mbq.client.storage.FileStorage', 'FileStorage', (['self.test_filename'], {}), '(self.test_filename)\n', (286, 306), False, 'from mbq.client.storage import FileStorage\n'), ((340, 369), 'os.remove', 'os.remove', (['self.test_filename'], {}), '(self.test_filename)\n', (349, 369), False, 'import os\n'), ((1730, 1761), 'mbq.client.storage.FileStorage', 'FileStorage', (['self.test_filename'], {}), '(self.test_filename)\n', (1741, 1761), False, 'from mbq.client.storage import FileStorage\n'), ((214, 246), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (232, 246), False, 'from tempfile import NamedTemporaryFile\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 7 08:38:14 2018
@author: <NAME>
compute how quickly soccer league tables converge to the final distribution
"""
import pandas as pd
import numpy as np
import glob
import matplotlib.pyplot as plt
from matplotlib import animation
from scipy.stats import entropy
from scipy.optimize import curve_fit
import seaborn as sns
sns.set()
# function to compute Jensen-Shannon divergence
def JSD(p, q):
r = 0.5 * (p + q)
return 0.5 * (entropy(p, r) + entropy(q, r))
# the data files have already been acquired and cleaned
# see get_football-data_data.py
# build a list of filenames
filenames = glob.glob('data/*.csv')
# initialize an array to hold JSD values
# each row will contain the JSD curve data for one season
jsds = np.zeros((len(filenames),500))
# initialize an array to hold final league tables
finals = np.zeros((len(filenames),25))
# initialize a season counter
season = 0
# list of columns needed from the data files
cols = ['Date','HomeTeam','AwayTeam','FTHG','FTAG']
for file in filenames:
# load the season data
df = pd.read_csv(file,index_col='Date',encoding = "ISO-8859-1",usecols=cols).dropna(axis=0,how='any')
# get the unique team names for that season
teams = list(df.HomeTeam.unique())
# set up array for league tables
# each column corresponds to a team
# each row corresponds to the league table after that number of games
tables = np.zeros((df.shape[0]+1,len(teams)))
# initialize game counter
num_games = 1
# loop through the season data game by game
for idx,row in df.iterrows():
# initialize the current league table to be the same as the last
tables[num_games,:] = tables[num_games-1,:]
# get indices for the teams involved in thisgame
home_idx = teams.index(row['HomeTeam'])
away_idx = teams.index(row['AwayTeam'])
# compute home goals - away goals
goal_diff = row.FTHG - row.FTAG
# update the league table based on the result
if goal_diff > 0:
tables[num_games,home_idx] += 3
elif goal_diff < 0:
tables[num_games,away_idx] += 3
else:
tables[num_games,home_idx] += 1
tables[num_games,away_idx] += 1
# increment the game counter
num_games += 1
# delete first row of the table
tables = tables[1:,:]
# compute the probability distribution for the final league table
p = tables[-1,:]/np.sum(tables[-1,:])
# store p
for idx,team in enumerate(p):
finals[season,idx] = team
# for each of the running league tables, convert to a distribution
# and then compute the JSD
for i in range(len(tables[:,0])):
#if np.count_nonzero(tables[idx,:]) == len(tables[idx,:]):
q = tables[i,:]/np.sum(tables[i,:])
jsds[season,i] = JSD(p,q)
# increment the season counter
season += 1
# compute the average JSD curve
avg = np.sum(jsds,axis=0)/110
# array of x values for the games
xs = np.array([i for i in range(len(avg))])
# define function for curve-fitting
def f(x, a, b, c):
return a * np.exp(-b * x) + c
# perform the curve fit
popt, pcov = curve_fit(f, xs, avg)
# plot the individual JSD curves
for i in range(jsds.shape[0]):
plt.plot(jsds[i,:],alpha=.3,color='gray')
# add title and axis labels
plt.title('Convergence of league tables over time')
plt.xlabel('Number of games played')
plt.ylabel('JSD with final table')
# set axis limits, 461 most games in an individual season
axes = plt.gca()
axes.set_xlim([0,461])
plt.savefig('allseasons.png')
# zoom in on the first 100 games
axes.set_xlim([0,100])
plt.savefig('convbegin.png')
# zoom out again
axes.set_xlim([0,380])
# plot the average curve
plt.plot(xs,avg,'b-',label='average JSD')
# add a legend
plt.legend()
plt.savefig('convwithavg.png')
# plot the best-fit curve
plt.plot(xs, f(xs, *popt), 'r-',
label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
# update the legend
plt.legend()
plt.savefig('conv.png')
plt.show()
plt.clf()
plt.cla()
plt.close()
# compute examples of final probability distributions
# spain 16-17
xd = [i for i in range(18)]
plt.bar(xd,np.sort(finals[5,:18]))
plt.title('La Liga 2016-2017')
plt.xticks([],'')
plt.xlabel('Ranked teams')
plt.ylabel('Point distribution')
plt.savefig('sp1617.png')
plt.clf()
plt.cla()
plt.close()
# italy 16-17
xd = [i for i in range(20)]
plt.bar(xd,np.sort(finals[27,:20]))
plt.title('Serie A 2016-2017')
plt.xticks([],'')
plt.xlabel('Ranked teams')
plt.ylabel('Point distribution')
plt.savefig('it1617.png')
plt.clf()
plt.cla()
plt.close()
# france 16-17
xd = [i for i in range(20)]
plt.bar(xd,np.sort(finals[49,:20]))
plt.title('Ligue 1 2016-2017')
plt.xticks([],'')
plt.xlabel('Ranked teams')
plt.ylabel('Point distribution')
plt.savefig('fr1617.png')
plt.clf()
plt.cla()
plt.close()
# england 16-17
xd = [i for i in range(20)]
plt.bar(xd,np.sort(finals[71,:20]))
plt.title('Premier League 2016-2017')
plt.xticks([],'')
plt.xlabel('Ranked teams')
plt.ylabel('Point distribution')
plt.savefig('en1617.png')
plt.clf()
plt.cla()
plt.close()
# germany 16-17
xd = [i for i in range(18)]
plt.bar(xd,np.sort(finals[93,:18]))
plt.title('Bundesliga 2016-2017')
plt.xticks([],'')
plt.xlabel('Ranked teams')
plt.ylabel('Point distribution')
plt.savefig('ge1617.png')
plt.clf()
plt.cla()
plt.close()
# generate animation
# code below based on an example by <NAME>:
# email: <EMAIL>
# website: http://jakevdp.github.com
# license: BSD
# set up the figure
fig = plt.figure()
# set up the axes
ax = plt.axes(xlim=(-1, 20), ylim=(0, .12))
line, = ax.plot([], [],'o',linestyle='None')
# add title, legend, etc.
plt.title('\'99-\'00 Premier League points distribution over time')
plt.xticks([],'')
plt.xlabel('Ranked teams')
plt.ylabel('Proportion of total points')
# draw the background
def init():
line.set_data([],[])
plt.bar([i for i in range(20)],np.sort(tables[-1,:]/np.sum(tables[-1,:])),alpha=.3)
return line,
# animation function, each frame draws a distribution after one more game
def animate(i):
xd = [i for i in range(20)]
y = np.sort(tables[i+40,:]/np.sum(tables[i+40,:]))
line.set_data(xd, y)
return line,
# animate
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=340, interval=20, blit=True,repeat_delay=1000)
# save the animation
anim.save('basic_animation.mp4', fps=50, extra_args=['-vcodec', 'libx264'])
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.sum",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.axes",
"pandas.read_csv",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.figure",
"numpy.exp",
"matplotlib.pyplot.gca",
"glob.glob",
"matplotlib.pyplot.close",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.xticks",
"seaborn.set",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"scipy.optimize.curve_fit",
"numpy.sort",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"scipy.stats.entropy",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((389, 398), 'seaborn.set', 'sns.set', ([], {}), '()\n', (396, 398), True, 'import seaborn as sns\n'), ((677, 700), 'glob.glob', 'glob.glob', (['"""data/*.csv"""'], {}), "('data/*.csv')\n", (686, 700), False, 'import glob\n'), ((3399, 3420), 'scipy.optimize.curve_fit', 'curve_fit', (['f', 'xs', 'avg'], {}), '(f, xs, avg)\n', (3408, 3420), False, 'from scipy.optimize import curve_fit\n'), ((3572, 3623), 'matplotlib.pyplot.title', 'plt.title', (['"""Convergence of league tables over time"""'], {}), "('Convergence of league tables over time')\n", (3581, 3623), True, 'import matplotlib.pyplot as plt\n'), ((3625, 3661), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of games played"""'], {}), "('Number of games played')\n", (3635, 3661), True, 'import matplotlib.pyplot as plt\n'), ((3663, 3697), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""JSD with final table"""'], {}), "('JSD with final table')\n", (3673, 3697), True, 'import matplotlib.pyplot as plt\n'), ((3767, 3776), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3774, 3776), True, 'import matplotlib.pyplot as plt\n'), ((3804, 3833), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""allseasons.png"""'], {}), "('allseasons.png')\n", (3815, 3833), True, 'import matplotlib.pyplot as plt\n'), ((3895, 3923), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""convbegin.png"""'], {}), "('convbegin.png')\n", (3906, 3923), True, 'import matplotlib.pyplot as plt\n'), ((3997, 4041), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'avg', '"""b-"""'], {'label': '"""average JSD"""'}), "(xs, avg, 'b-', label='average JSD')\n", (4005, 4041), True, 'import matplotlib.pyplot as plt\n'), ((4058, 4070), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4068, 4070), True, 'import matplotlib.pyplot as plt\n'), ((4072, 4102), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""convwithavg.png"""'], {}), "('convwithavg.png')\n", (4083, 4102), True, 'import matplotlib.pyplot as plt\n'), ((4255, 4267), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4265, 4267), True, 'import matplotlib.pyplot as plt\n'), ((4269, 4292), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""conv.png"""'], {}), "('conv.png')\n", (4280, 4292), True, 'import matplotlib.pyplot as plt\n'), ((4294, 4304), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4302, 4304), True, 'import matplotlib.pyplot as plt\n'), ((4308, 4317), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4315, 4317), True, 'import matplotlib.pyplot as plt\n'), ((4319, 4328), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4326, 4328), True, 'import matplotlib.pyplot as plt\n'), ((4330, 4341), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4339, 4341), True, 'import matplotlib.pyplot as plt\n'), ((4482, 4512), 'matplotlib.pyplot.title', 'plt.title', (['"""La Liga 2016-2017"""'], {}), "('La Liga 2016-2017')\n", (4491, 4512), True, 'import matplotlib.pyplot as plt\n'), ((4514, 4532), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]', '""""""'], {}), "([], '')\n", (4524, 4532), True, 'import matplotlib.pyplot as plt\n'), ((4533, 4559), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ranked teams"""'], {}), "('Ranked teams')\n", (4543, 4559), True, 'import matplotlib.pyplot as plt\n'), ((4561, 4593), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Point distribution"""'], {}), "('Point distribution')\n", (4571, 4593), True, 'import matplotlib.pyplot as plt\n'), ((4595, 4620), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""sp1617.png"""'], {}), "('sp1617.png')\n", (4606, 4620), True, 'import matplotlib.pyplot as plt\n'), ((4622, 4631), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4629, 4631), True, 'import matplotlib.pyplot as plt\n'), ((4633, 4642), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4640, 4642), True, 'import matplotlib.pyplot as plt\n'), ((4644, 4655), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4653, 4655), True, 'import matplotlib.pyplot as plt\n'), ((4740, 4770), 'matplotlib.pyplot.title', 'plt.title', (['"""Serie A 2016-2017"""'], {}), "('Serie A 2016-2017')\n", (4749, 4770), True, 'import matplotlib.pyplot as plt\n'), ((4772, 4790), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]', '""""""'], {}), "([], '')\n", (4782, 4790), True, 'import matplotlib.pyplot as plt\n'), ((4791, 4817), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ranked teams"""'], {}), "('Ranked teams')\n", (4801, 4817), True, 'import matplotlib.pyplot as plt\n'), ((4819, 4851), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Point distribution"""'], {}), "('Point distribution')\n", (4829, 4851), True, 'import matplotlib.pyplot as plt\n'), ((4853, 4878), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""it1617.png"""'], {}), "('it1617.png')\n", (4864, 4878), True, 'import matplotlib.pyplot as plt\n'), ((4880, 4889), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4887, 4889), True, 'import matplotlib.pyplot as plt\n'), ((4891, 4900), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4898, 4900), True, 'import matplotlib.pyplot as plt\n'), ((4902, 4913), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4911, 4913), True, 'import matplotlib.pyplot as plt\n'), ((4999, 5029), 'matplotlib.pyplot.title', 'plt.title', (['"""Ligue 1 2016-2017"""'], {}), "('Ligue 1 2016-2017')\n", (5008, 5029), True, 'import matplotlib.pyplot as plt\n'), ((5031, 5049), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]', '""""""'], {}), "([], '')\n", (5041, 5049), True, 'import matplotlib.pyplot as plt\n'), ((5050, 5076), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ranked teams"""'], {}), "('Ranked teams')\n", (5060, 5076), True, 'import matplotlib.pyplot as plt\n'), ((5078, 5110), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Point distribution"""'], {}), "('Point distribution')\n", (5088, 5110), True, 'import matplotlib.pyplot as plt\n'), ((5112, 5137), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fr1617.png"""'], {}), "('fr1617.png')\n", (5123, 5137), True, 'import matplotlib.pyplot as plt\n'), ((5139, 5148), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5146, 5148), True, 'import matplotlib.pyplot as plt\n'), ((5150, 5159), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5157, 5159), True, 'import matplotlib.pyplot as plt\n'), ((5161, 5172), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5170, 5172), True, 'import matplotlib.pyplot as plt\n'), ((5259, 5296), 'matplotlib.pyplot.title', 'plt.title', (['"""Premier League 2016-2017"""'], {}), "('Premier League 2016-2017')\n", (5268, 5296), True, 'import matplotlib.pyplot as plt\n'), ((5298, 5316), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]', '""""""'], {}), "([], '')\n", (5308, 5316), True, 'import matplotlib.pyplot as plt\n'), ((5317, 5343), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ranked teams"""'], {}), "('Ranked teams')\n", (5327, 5343), True, 'import matplotlib.pyplot as plt\n'), ((5345, 5377), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Point distribution"""'], {}), "('Point distribution')\n", (5355, 5377), True, 'import matplotlib.pyplot as plt\n'), ((5379, 5404), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""en1617.png"""'], {}), "('en1617.png')\n", (5390, 5404), True, 'import matplotlib.pyplot as plt\n'), ((5406, 5415), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5413, 5415), True, 'import matplotlib.pyplot as plt\n'), ((5417, 5426), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5424, 5426), True, 'import matplotlib.pyplot as plt\n'), ((5428, 5439), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5437, 5439), True, 'import matplotlib.pyplot as plt\n'), ((5526, 5559), 'matplotlib.pyplot.title', 'plt.title', (['"""Bundesliga 2016-2017"""'], {}), "('Bundesliga 2016-2017')\n", (5535, 5559), True, 'import matplotlib.pyplot as plt\n'), ((5561, 5579), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]', '""""""'], {}), "([], '')\n", (5571, 5579), True, 'import matplotlib.pyplot as plt\n'), ((5580, 5606), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ranked teams"""'], {}), "('Ranked teams')\n", (5590, 5606), True, 'import matplotlib.pyplot as plt\n'), ((5608, 5640), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Point distribution"""'], {}), "('Point distribution')\n", (5618, 5640), True, 'import matplotlib.pyplot as plt\n'), ((5642, 5667), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ge1617.png"""'], {}), "('ge1617.png')\n", (5653, 5667), True, 'import matplotlib.pyplot as plt\n'), ((5669, 5678), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5676, 5678), True, 'import matplotlib.pyplot as plt\n'), ((5680, 5689), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5687, 5689), True, 'import matplotlib.pyplot as plt\n'), ((5691, 5702), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5700, 5702), True, 'import matplotlib.pyplot as plt\n'), ((5875, 5887), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5885, 5887), True, 'import matplotlib.pyplot as plt\n'), ((5915, 5954), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'xlim': '(-1, 20)', 'ylim': '(0, 0.12)'}), '(xlim=(-1, 20), ylim=(0, 0.12))\n', (5923, 5954), True, 'import matplotlib.pyplot as plt\n'), ((6030, 6095), 'matplotlib.pyplot.title', 'plt.title', (['"""\'99-\'00 Premier League points distribution over time"""'], {}), '("\'99-\'00 Premier League points distribution over time")\n', (6039, 6095), True, 'import matplotlib.pyplot as plt\n'), ((6099, 6117), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]', '""""""'], {}), "([], '')\n", (6109, 6117), True, 'import matplotlib.pyplot as plt\n'), ((6118, 6144), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ranked teams"""'], {}), "('Ranked teams')\n", (6128, 6144), True, 'import matplotlib.pyplot as plt\n'), ((6146, 6186), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Proportion of total points"""'], {}), "('Proportion of total points')\n", (6156, 6186), True, 'import matplotlib.pyplot as plt\n'), ((6607, 6720), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'animate'], {'init_func': 'init', 'frames': '(340)', 'interval': '(20)', 'blit': '(True)', 'repeat_delay': '(1000)'}), '(fig, animate, init_func=init, frames=340, interval=\n 20, blit=True, repeat_delay=1000)\n', (6630, 6720), False, 'from matplotlib import animation\n'), ((6851, 6861), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6859, 6861), True, 'import matplotlib.pyplot as plt\n'), ((3157, 3177), 'numpy.sum', 'np.sum', (['jsds'], {'axis': '(0)'}), '(jsds, axis=0)\n', (3163, 3177), True, 'import numpy as np\n'), ((3494, 3539), 'matplotlib.pyplot.plot', 'plt.plot', (['jsds[i, :]'], {'alpha': '(0.3)', 'color': '"""gray"""'}), "(jsds[i, :], alpha=0.3, color='gray')\n", (3502, 3539), True, 'import matplotlib.pyplot as plt\n'), ((4457, 4480), 'numpy.sort', 'np.sort', (['finals[5, :18]'], {}), '(finals[5, :18])\n', (4464, 4480), True, 'import numpy as np\n'), ((4714, 4738), 'numpy.sort', 'np.sort', (['finals[27, :20]'], {}), '(finals[27, :20])\n', (4721, 4738), True, 'import numpy as np\n'), ((4973, 4997), 'numpy.sort', 'np.sort', (['finals[49, :20]'], {}), '(finals[49, :20])\n', (4980, 4997), True, 'import numpy as np\n'), ((5233, 5257), 'numpy.sort', 'np.sort', (['finals[71, :20]'], {}), '(finals[71, :20])\n', (5240, 5257), True, 'import numpy as np\n'), ((5500, 5524), 'numpy.sort', 'np.sort', (['finals[93, :18]'], {}), '(finals[93, :18])\n', (5507, 5524), True, 'import numpy as np\n'), ((2639, 2660), 'numpy.sum', 'np.sum', (['tables[-1, :]'], {}), '(tables[-1, :])\n', (2645, 2660), True, 'import numpy as np\n'), ((510, 523), 'scipy.stats.entropy', 'entropy', (['p', 'r'], {}), '(p, r)\n', (517, 523), False, 'from scipy.stats import entropy\n'), ((526, 539), 'scipy.stats.entropy', 'entropy', (['q', 'r'], {}), '(q, r)\n', (533, 539), False, 'from scipy.stats import entropy\n'), ((1146, 1218), 'pandas.read_csv', 'pd.read_csv', (['file'], {'index_col': '"""Date"""', 'encoding': '"""ISO-8859-1"""', 'usecols': 'cols'}), "(file, index_col='Date', encoding='ISO-8859-1', usecols=cols)\n", (1157, 1218), True, 'import pandas as pd\n'), ((2993, 3013), 'numpy.sum', 'np.sum', (['tables[i, :]'], {}), '(tables[i, :])\n', (2999, 3013), True, 'import numpy as np\n'), ((3339, 3353), 'numpy.exp', 'np.exp', (['(-b * x)'], {}), '(-b * x)\n', (3345, 3353), True, 'import numpy as np\n'), ((6518, 6543), 'numpy.sum', 'np.sum', (['tables[i + 40, :]'], {}), '(tables[i + 40, :])\n', (6524, 6543), True, 'import numpy as np\n'), ((6309, 6330), 'numpy.sum', 'np.sum', (['tables[-1, :]'], {}), '(tables[-1, :])\n', (6315, 6330), True, 'import numpy as np\n')]
|
import FWCore.ParameterSet.Config as cms
isolationInputParameters = cms.PSet(
barrelBasicCluster = cms.InputTag("islandBasicClusters","islandBarrelBasicClusters"),
endcapBasicCluster = cms.InputTag("islandBasicClusters","islandEndcapBasicClusters"),
horeco = cms.InputTag("horeco"),
hfreco = cms.InputTag("hfreco"),
hbhereco = cms.InputTag("hbhereco"),
track = cms.InputTag("hiGeneralTracks"),
photons = cms.InputTag("cleanPhotons")
)
|
[
"FWCore.ParameterSet.Config.InputTag"
] |
[((103, 167), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""islandBasicClusters"""', '"""islandBarrelBasicClusters"""'], {}), "('islandBasicClusters', 'islandBarrelBasicClusters')\n", (115, 167), True, 'import FWCore.ParameterSet.Config as cms\n'), ((192, 256), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""islandBasicClusters"""', '"""islandEndcapBasicClusters"""'], {}), "('islandBasicClusters', 'islandEndcapBasicClusters')\n", (204, 256), True, 'import FWCore.ParameterSet.Config as cms\n'), ((269, 291), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""horeco"""'], {}), "('horeco')\n", (281, 291), True, 'import FWCore.ParameterSet.Config as cms\n'), ((305, 327), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""hfreco"""'], {}), "('hfreco')\n", (317, 327), True, 'import FWCore.ParameterSet.Config as cms\n'), ((343, 367), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""hbhereco"""'], {}), "('hbhereco')\n", (355, 367), True, 'import FWCore.ParameterSet.Config as cms\n'), ((380, 411), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""hiGeneralTracks"""'], {}), "('hiGeneralTracks')\n", (392, 411), True, 'import FWCore.ParameterSet.Config as cms\n'), ((426, 454), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""cleanPhotons"""'], {}), "('cleanPhotons')\n", (438, 454), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
import discord
from src.eventsHandler.on_message.commands.activate import disable, enable
from src.eventsHandler.on_message.commands.cancel import cancel_request
from src.eventsHandler.on_message.commands.end_request import end_request
from src.eventsHandler.on_message.commands.place import get_place
from src.eventsHandler.on_message.commands.request import make_request
class OnMessage:
@staticmethod
async def run(client: discord.Client, message: discord.Message):
if message.author.bot:
return
if message.content and message.content[0] != '!':
return
command = message.content.split()[0][1:]
args = message.content.split()[1:]
if command == 'request':
await make_request(client, message, args)
elif command == 'cancel':
await cancel_request(client, message, args)
elif command == 'place':
await get_place(client, message, args)
elif command == 'close':
await end_request(client, message)
elif command == 'enable':
await enable(client, message, args)
elif command == 'disable':
await disable(client, message, args)
|
[
"src.eventsHandler.on_message.commands.cancel.cancel_request",
"src.eventsHandler.on_message.commands.activate.enable",
"src.eventsHandler.on_message.commands.end_request.end_request",
"src.eventsHandler.on_message.commands.place.get_place",
"src.eventsHandler.on_message.commands.activate.disable",
"src.eventsHandler.on_message.commands.request.make_request"
] |
[((753, 788), 'src.eventsHandler.on_message.commands.request.make_request', 'make_request', (['client', 'message', 'args'], {}), '(client, message, args)\n', (765, 788), False, 'from src.eventsHandler.on_message.commands.request import make_request\n'), ((841, 878), 'src.eventsHandler.on_message.commands.cancel.cancel_request', 'cancel_request', (['client', 'message', 'args'], {}), '(client, message, args)\n', (855, 878), False, 'from src.eventsHandler.on_message.commands.cancel import cancel_request\n'), ((930, 962), 'src.eventsHandler.on_message.commands.place.get_place', 'get_place', (['client', 'message', 'args'], {}), '(client, message, args)\n', (939, 962), False, 'from src.eventsHandler.on_message.commands.place import get_place\n'), ((1014, 1042), 'src.eventsHandler.on_message.commands.end_request.end_request', 'end_request', (['client', 'message'], {}), '(client, message)\n', (1025, 1042), False, 'from src.eventsHandler.on_message.commands.end_request import end_request\n'), ((1095, 1124), 'src.eventsHandler.on_message.commands.activate.enable', 'enable', (['client', 'message', 'args'], {}), '(client, message, args)\n', (1101, 1124), False, 'from src.eventsHandler.on_message.commands.activate import disable, enable\n'), ((1178, 1208), 'src.eventsHandler.on_message.commands.activate.disable', 'disable', (['client', 'message', 'args'], {}), '(client, message, args)\n', (1185, 1208), False, 'from src.eventsHandler.on_message.commands.activate import disable, enable\n')]
|
import torch.nn as nn
from ..builder import VQA_MODELS, build_backbone, build_encoder, build_head
@VQA_MODELS.register_module()
class VISDIALPRINCIPLES(nn.Module):
def __init__(self, vocabulary_len, word_embedding_size, encoder, backbone, head):
super().__init__()
self.embedding_model = nn.Embedding(vocabulary_len, word_embedding_size, padding_idx=0)
self.encoder_model = build_encoder(encoder)
self.backbone = build_backbone(backbone)
self.head = build_head(head) # 包括 classification head, generation head
def forward(self, data):
img = data['img_feat']
ques = data['ques']
his = data['hist']
batch_size, rnd, max_his_length = his.size()
cap = his[:, 0, :]
ques_len = data['ques_len']
hist_len = data['hist_len']
cap_len = hist_len[:, 0]
ques_embed = self.embedding_model(ques)
cap_emb = self.embedding_model(cap.contiguous())
his = his.contiguous().view(-1, max_his_length)
his_embed = self.embedding_model(his)
q_output, c_output, his_feat = self.encoder_model(ques_embed, ques_len, cap_emb, cap_len, his_embed, hist_len)
ques_location = ques_len.view(-1).cpu().numpy() - 1
ques_encoded = q_output[range(batch_size), ques_location, :]
cap_location = cap_len.view(-1).cpu().numpy() - 1
cap_encoded = c_output[range(batch_size), cap_location, :]
his_feat = his_feat.view(batch_size, rnd, -1)
fuse_feat = self.backbone(ques_encoded, cap_encoded, his_feat, q_output, c_output, ques_len, cap_len,
ques_embed, cap_emb, img, batch_size)
scores = self.head(fuse_feat, data)
return scores
|
[
"torch.nn.Embedding"
] |
[((313, 377), 'torch.nn.Embedding', 'nn.Embedding', (['vocabulary_len', 'word_embedding_size'], {'padding_idx': '(0)'}), '(vocabulary_len, word_embedding_size, padding_idx=0)\n', (325, 377), True, 'import torch.nn as nn\n')]
|
# -*- coding: utf-8 -*-
"""
Forked in Hydra IMF from Hydra/MUSE on Feb 19, 2018
@author: <NAME>
Run pPXF in data
"""
import os
import yaml
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy import constants
from astropy.table import Table, vstack, hstack
from ppxf.ppxf import ppxf
from ppxf import ppxf_util
from spectres import spectres
import context
import misc
from der_snr import DER_SNR
def run_ppxf(specs, templates_file, outdir, velscale=None, redo=False, V0=None):
""" Running pPXF. """
velscale = context.velscale if velscale is None else velscale
V0 = context.V if V0 is None else V0
# Reading templates
ssp_templates = fits.getdata(templates_file, extname="SSPS").T
params = Table.read(templates_file, hdu=1)
nssps = ssp_templates.shape[1]
logwave_temp = Table.read(templates_file, hdu=2)["loglam"].data
wave_temp = np.exp(logwave_temp)
# Use first spectrum to set emission lines
start0 = [V0, 100., 0., 0.]
bounds0 = [[V0 - 2000., V0 + 2000], [velscale/10, 800.]]
for spec in specs:
print("Processing spectrum {}".format(spec))
name = spec.replace(".fits", "")
outyaml = os.path.join(outdir, "{}.yaml".format(name))
if os.path.exists(outyaml) and not redo:
continue
table = Table.read(spec)
wave_lin = table["wave"]
flux = table["flux"]
fluxerr = table["fluxerr"]
# Removing red part of the spectrum
idx = np.where(wave_lin < 7000)[0]
wave_lin = wave_lin[idx]
flux = flux[idx]
fluxerr = fluxerr[idx]
der_sn = misc.snr(flux)[2]
data_sn = np.nanmedian(flux / fluxerr)
###################################################################
# Rebinning the data to a logarithmic scale for ppxf
wave_range = [wave_lin[0], wave_lin[-1]]
logwave = ppxf_util.log_rebin(wave_range, flux, velscale=velscale)[1]
wave = np.exp(logwave)
wave = wave[(wave > wave_lin[0]) & (wave < wave_lin[-1])][1:-1]
flux, fluxerr = spectres(wave, wave_lin, flux, spec_errs=fluxerr)
####################################################################
# Setting up the gas templates
gas_templates, line_names, line_wave = \
ppxf_util.emission_lines(logwave_temp,
[wave_lin[0], wave_lin[-1]], 2.95)
ngas = gas_templates.shape[1]
####################################################################
# Masking bad pixels
skylines = np.array([4785, 5577, 5889, 6300, 6360, 6863])
goodpixels = np.arange(len(wave))
for line in skylines:
sky = np.argwhere((wave < line - 10) | (wave > line + 10)).ravel()
goodpixels = np.intersect1d(goodpixels, sky)
# Making goodpixels mask
goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(flux))[0])
goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(
fluxerr))[0])
# Cleaning input spectrum
fluxerr[~np.isfinite(fluxerr)] = np.nanmax(fluxerr)
flux[~np.isfinite(flux)] = 0.
########################################################################
# Preparing the fit
dv = (logwave_temp[0] - logwave[0]) * \
constants.c.to("km/s").value
templates = np.column_stack((ssp_templates, gas_templates))
components = np.hstack((np.zeros(nssps), np.arange(ngas)+1)).astype(
np.int)
gas_component = components > 0
start = [start0[:2]] * (ngas + 1)
bounds = [bounds0] * (ngas + 1)
moments = [2] * (ngas + 1)
########################################################################
# Fitting with two components
pp = ppxf(templates, flux, fluxerr, velscale=velscale,
plot=True, moments=moments, start=start, vsyst=dv,
lam=wave, component=components, mdegree=-1,
gas_component=gas_component, gas_names=line_names,
quiet=False, degree=15, bounds=bounds, goodpixels=goodpixels)
plt.savefig(os.path.join(outdir, "{}.png".format(name)), dpi=250)
plt.close()
pp.name = name
# Saving results and plot
save(pp, outdir)
def save(pp, outdir):
""" Save results from pPXF into files excluding fitting arrays. """
array_keys = ["lam", "galaxy", "noise", "bestfit", "gas_bestfit",
"mpoly", "apoly"]
array_keys = [_ for _ in array_keys if isinstance(getattr(pp, _),
np.ndarray)]
table = Table([getattr(pp, key) for key in array_keys], names=array_keys)
table.write(os.path.join(outdir, "{}_bestfit.fits".format(pp.name)),
overwrite=True)
ppdict = {}
save_keys = ["name", "regul", "degree", "mdegree", "reddening", "clean",
"ncomp", "chi2"]
# Chi2 is a astropy.unit.quantity object, we have to make it a scalar
pp.chi2 = float(pp.chi2)
for key in save_keys:
ppdict[key] = getattr(pp, key)
klist = ["V", "sigma"]
for j, sol in enumerate(pp.sol):
for i in range(len(sol)):
ppdict["{}_{}".format(klist[i], j)] = float(sol[i])
ppdict["{}err_{}".format(klist[i], j)] = float(pp.error[j][i])
with open(os.path.join(outdir, "{}.yaml".format(pp.name)), "w") as f:
yaml.dump(ppdict, f, default_flow_style=False)
# Saving table with emission lines
gas = pp.gas_component
emtable = []
for j, comp in enumerate(pp.component[gas]):
t = Table()
t["name"] = [ pp.gas_names[j]]
t["flux"] = [pp.gas_flux[j]]
t["fluxerr"] = [pp.gas_flux_error[j]]
t["V"] = [pp.sol[comp][0]]
t["Verr"] = [pp.error[comp][0]]
t["sigma"] = [pp.sol[comp][1]]
t["sigmaerr"] = [pp.error[comp][1]]
emtable.append(t)
emtable = vstack(emtable)
emtable.write(os.path.join(outdir, "{}_emission_lines.fits".format(
pp.name)), overwrite=True)
def make_table(direc, output):
""" Read all yaml files in a ppf directory to one make table for all
bins. """
filenames = sorted([_ for _ in os.listdir(direc) if _.endswith(".yaml")])
keys = ["name", "V_0", "Verr_0", "sigma_0", "sigmaerr_0", "der_sn"]
names = {"name": "spec", "V_0": "V", "Verr_0": "Verr",
"sigma_0": "sigma", "sigmaerr_0": "sigmaerr", "der_sn": "SNR"}
outtable = []
for fname in filenames:
with open(os.path.join(direc, fname)) as f:
props = yaml.load(f)
data = Table([[props[k]] for k in keys], names=[names[k] for k in keys])
outtable.append(data)
outtable = vstack(outtable)
outtable.write(output, format="fits", overwrite=True)
if __name__ == '__main__':
targetSN = 100
sample = "kinematics"
velscale = context.velscale
tempfile = os.path.join(context.data_dir, "templates",
"emiles_vel{}_{}_fwhm2.95.fits".format(int(velscale), sample))
wdir = os.path.join(context.data_dir, "MUSE/sn{}/sci".format(targetSN))
os.chdir(wdir)
outdir = os.path.join(os.path.split(wdir)[0], "ppxf")
if not os.path.exists(outdir):
os.mkdir(outdir)
specs = sorted([_ for _ in os.listdir(".") if _.endswith(".fits")])
run_ppxf(specs, tempfile, outdir, redo=False)
|
[
"os.mkdir",
"yaml.load",
"numpy.nanmedian",
"yaml.dump",
"numpy.arange",
"numpy.exp",
"ppxf.ppxf.ppxf",
"os.path.join",
"os.chdir",
"astropy.constants.c.to",
"astropy.io.fits.getdata",
"matplotlib.pyplot.close",
"os.path.exists",
"numpy.isfinite",
"spectres.spectres",
"ppxf.ppxf_util.log_rebin",
"numpy.intersect1d",
"numpy.argwhere",
"os.listdir",
"ppxf.ppxf_util.emission_lines",
"numpy.nanmax",
"astropy.table.Table.read",
"astropy.table.Table",
"numpy.zeros",
"astropy.table.vstack",
"numpy.where",
"numpy.array",
"numpy.column_stack",
"misc.snr",
"os.path.split"
] |
[((790, 823), 'astropy.table.Table.read', 'Table.read', (['templates_file'], {'hdu': '(1)'}), '(templates_file, hdu=1)\n', (800, 823), False, 'from astropy.table import Table, vstack, hstack\n'), ((946, 966), 'numpy.exp', 'np.exp', (['logwave_temp'], {}), '(logwave_temp)\n', (952, 966), True, 'import numpy as np\n'), ((6153, 6168), 'astropy.table.vstack', 'vstack', (['emtable'], {}), '(emtable)\n', (6159, 6168), False, 'from astropy.table import Table, vstack, hstack\n'), ((6964, 6980), 'astropy.table.vstack', 'vstack', (['outtable'], {}), '(outtable)\n', (6970, 6980), False, 'from astropy.table import Table, vstack, hstack\n'), ((7371, 7385), 'os.chdir', 'os.chdir', (['wdir'], {}), '(wdir)\n', (7379, 7385), False, 'import os\n'), ((729, 773), 'astropy.io.fits.getdata', 'fits.getdata', (['templates_file'], {'extname': '"""SSPS"""'}), "(templates_file, extname='SSPS')\n", (741, 773), False, 'from astropy.io import fits\n'), ((1383, 1399), 'astropy.table.Table.read', 'Table.read', (['spec'], {}), '(spec)\n', (1393, 1399), False, 'from astropy.table import Table, vstack, hstack\n'), ((1736, 1764), 'numpy.nanmedian', 'np.nanmedian', (['(flux / fluxerr)'], {}), '(flux / fluxerr)\n', (1748, 1764), True, 'import numpy as np\n'), ((2049, 2064), 'numpy.exp', 'np.exp', (['logwave'], {}), '(logwave)\n', (2055, 2064), True, 'import numpy as np\n'), ((2163, 2212), 'spectres.spectres', 'spectres', (['wave', 'wave_lin', 'flux'], {'spec_errs': 'fluxerr'}), '(wave, wave_lin, flux, spec_errs=fluxerr)\n', (2171, 2212), False, 'from spectres import spectres\n'), ((2394, 2467), 'ppxf.ppxf_util.emission_lines', 'ppxf_util.emission_lines', (['logwave_temp', '[wave_lin[0], wave_lin[-1]]', '(2.95)'], {}), '(logwave_temp, [wave_lin[0], wave_lin[-1]], 2.95)\n', (2418, 2467), False, 'from ppxf import ppxf_util\n'), ((2673, 2719), 'numpy.array', 'np.array', (['[4785, 5577, 5889, 6300, 6360, 6863]'], {}), '([4785, 5577, 5889, 6300, 6360, 6863])\n', (2681, 2719), True, 'import numpy as np\n'), ((3222, 3240), 'numpy.nanmax', 'np.nanmax', (['fluxerr'], {}), '(fluxerr)\n', (3231, 3240), True, 'import numpy as np\n'), ((3504, 3551), 'numpy.column_stack', 'np.column_stack', (['(ssp_templates, gas_templates)'], {}), '((ssp_templates, gas_templates))\n', (3519, 3551), True, 'import numpy as np\n'), ((3946, 4217), 'ppxf.ppxf.ppxf', 'ppxf', (['templates', 'flux', 'fluxerr'], {'velscale': 'velscale', 'plot': '(True)', 'moments': 'moments', 'start': 'start', 'vsyst': 'dv', 'lam': 'wave', 'component': 'components', 'mdegree': '(-1)', 'gas_component': 'gas_component', 'gas_names': 'line_names', 'quiet': '(False)', 'degree': '(15)', 'bounds': 'bounds', 'goodpixels': 'goodpixels'}), '(templates, flux, fluxerr, velscale=velscale, plot=True, moments=\n moments, start=start, vsyst=dv, lam=wave, component=components, mdegree\n =-1, gas_component=gas_component, gas_names=line_names, quiet=False,\n degree=15, bounds=bounds, goodpixels=goodpixels)\n', (3950, 4217), False, 'from ppxf.ppxf import ppxf\n'), ((4364, 4375), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4373, 4375), True, 'import matplotlib.pyplot as plt\n'), ((5620, 5666), 'yaml.dump', 'yaml.dump', (['ppdict', 'f'], {'default_flow_style': '(False)'}), '(ppdict, f, default_flow_style=False)\n', (5629, 5666), False, 'import yaml\n'), ((5816, 5823), 'astropy.table.Table', 'Table', ([], {}), '()\n', (5821, 5823), False, 'from astropy.table import Table, vstack, hstack\n'), ((6851, 6916), 'astropy.table.Table', 'Table', (['[[props[k]] for k in keys]'], {'names': '[names[k] for k in keys]'}), '([[props[k]] for k in keys], names=[names[k] for k in keys])\n', (6856, 6916), False, 'from astropy.table import Table, vstack, hstack\n'), ((7457, 7479), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (7471, 7479), False, 'import os\n'), ((7490, 7506), 'os.mkdir', 'os.mkdir', (['outdir'], {}), '(outdir)\n', (7498, 7506), False, 'import os\n'), ((880, 913), 'astropy.table.Table.read', 'Table.read', (['templates_file'], {'hdu': '(2)'}), '(templates_file, hdu=2)\n', (890, 913), False, 'from astropy.table import Table, vstack, hstack\n'), ((1306, 1329), 'os.path.exists', 'os.path.exists', (['outyaml'], {}), '(outyaml)\n', (1320, 1329), False, 'import os\n'), ((1560, 1585), 'numpy.where', 'np.where', (['(wave_lin < 7000)'], {}), '(wave_lin < 7000)\n', (1568, 1585), True, 'import numpy as np\n'), ((1699, 1713), 'misc.snr', 'misc.snr', (['flux'], {}), '(flux)\n', (1707, 1713), False, 'import misc\n'), ((1973, 2029), 'ppxf.ppxf_util.log_rebin', 'ppxf_util.log_rebin', (['wave_range', 'flux'], {'velscale': 'velscale'}), '(wave_range, flux, velscale=velscale)\n', (1992, 2029), False, 'from ppxf import ppxf_util\n'), ((2900, 2931), 'numpy.intersect1d', 'np.intersect1d', (['goodpixels', 'sky'], {}), '(goodpixels, sky)\n', (2914, 2931), True, 'import numpy as np\n'), ((6822, 6834), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (6831, 6834), False, 'import yaml\n'), ((7413, 7432), 'os.path.split', 'os.path.split', (['wdir'], {}), '(wdir)\n', (7426, 7432), False, 'import os\n'), ((3198, 3218), 'numpy.isfinite', 'np.isfinite', (['fluxerr'], {}), '(fluxerr)\n', (3209, 3218), True, 'import numpy as np\n'), ((3256, 3273), 'numpy.isfinite', 'np.isfinite', (['flux'], {}), '(flux)\n', (3267, 3273), True, 'import numpy as np\n'), ((3454, 3476), 'astropy.constants.c.to', 'constants.c.to', (['"""km/s"""'], {}), "('km/s')\n", (3468, 3476), False, 'from astropy import constants\n'), ((6447, 6464), 'os.listdir', 'os.listdir', (['direc'], {}), '(direc)\n', (6457, 6464), False, 'import os\n'), ((6767, 6793), 'os.path.join', 'os.path.join', (['direc', 'fname'], {}), '(direc, fname)\n', (6779, 6793), False, 'import os\n'), ((7539, 7554), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (7549, 7554), False, 'import os\n'), ((2813, 2865), 'numpy.argwhere', 'np.argwhere', (['((wave < line - 10) | (wave > line + 10))'], {}), '((wave < line - 10) | (wave > line + 10))\n', (2824, 2865), True, 'import numpy as np\n'), ((3024, 3041), 'numpy.isfinite', 'np.isfinite', (['flux'], {}), '(flux)\n', (3035, 3041), True, 'import numpy as np\n'), ((3105, 3125), 'numpy.isfinite', 'np.isfinite', (['fluxerr'], {}), '(fluxerr)\n', (3116, 3125), True, 'import numpy as np\n'), ((3585, 3600), 'numpy.zeros', 'np.zeros', (['nssps'], {}), '(nssps)\n', (3593, 3600), True, 'import numpy as np\n'), ((3602, 3617), 'numpy.arange', 'np.arange', (['ngas'], {}), '(ngas)\n', (3611, 3617), True, 'import numpy as np\n')]
|
import glyphsLib
import importlib
import argparse
import sys
from glob import glob
parser = argparse.ArgumentParser(description='Filter a font file')
parser.add_argument('input', metavar='GLYPHS',
help='the Glyphs file')
parser.add_argument('filter',metavar='FILTER',
help='the filter to use')
args = parser.parse_args()
base_path = "NaNGlyphFilters"
sys.path.append(base_path)
glyphsLib.Glyphs.font = glyphsLib.GSFont(args.input)
filter_script = args.filter
sys.modules['GlyphsApp'] = glyphsLib
try:
i = importlib.import_module(filter_script)
except ModuleNotFoundError as e:
modules = [x[len(base_path)+1:-3] for x in sorted(glob(base_path+"/*.py")) if "/NaN" not in x]
print("Couldn't find filter '%s'.\nTry one of: %s" % (filter_script, ", ".join(modules)))
sys.exit(1)
save_file = args.input.replace(".glyphs", "-"+filter_script+".glyphs")
glyphsLib.Glyphs.font.save(save_file)
print("Saved on %s" % save_file)
|
[
"sys.path.append",
"glyphsLib.GSFont",
"argparse.ArgumentParser",
"importlib.import_module",
"glyphsLib.Glyphs.font.save",
"glob.glob",
"sys.exit"
] |
[((93, 150), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Filter a font file"""'}), "(description='Filter a font file')\n", (116, 150), False, 'import argparse\n'), ((393, 419), 'sys.path.append', 'sys.path.append', (['base_path'], {}), '(base_path)\n', (408, 419), False, 'import sys\n'), ((444, 472), 'glyphsLib.GSFont', 'glyphsLib.GSFont', (['args.input'], {}), '(args.input)\n', (460, 472), False, 'import glyphsLib\n'), ((894, 931), 'glyphsLib.Glyphs.font.save', 'glyphsLib.Glyphs.font.save', (['save_file'], {}), '(save_file)\n', (920, 931), False, 'import glyphsLib\n'), ((550, 588), 'importlib.import_module', 'importlib.import_module', (['filter_script'], {}), '(filter_script)\n', (573, 588), False, 'import importlib\n'), ((810, 821), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (818, 821), False, 'import sys\n'), ((673, 698), 'glob.glob', 'glob', (["(base_path + '/*.py')"], {}), "(base_path + '/*.py')\n", (677, 698), False, 'from glob import glob\n')]
|
import tweepy
import time
import sys
auth = tweepy.OAuthHandler('FzQNofWMcCfK1ghaqpwM3sCJu', '<KEY>')
auth.set_access_token('<KEY>', '<KEY>')
api = tweepy.API(auth)
'''user=api.me()
print(user.name,user.screen_name,user.followers_count)
public_tweets = api.home_timeline()
for tweet in public_tweets:
print(tweet.text)
'''
def limit_handle(cursor):
try:
while True:
yield cursor.next()
except tweepy.RateLimitError:
print("Limit Handle Exceeded. Sleeping for 7 minutes.")
time.sleep(10)
except StopIteration:
return
#Generous bot
for follower in limit_handle(tweepy.Cursor(api.followers).items()):
print(follower.name,follower.followers_count)
#seach keywords python
numberOfTweets=2
search_str='indiaforsale'
for tweet in tweepy.Cursor(api.search,search_str).items(numberOfTweets):
try:
tweet.favorite()
print('I liked the tweet')
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
|
[
"tweepy.OAuthHandler",
"tweepy.Cursor",
"tweepy.API",
"time.sleep"
] |
[((45, 102), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['"""FzQNofWMcCfK1ghaqpwM3sCJu"""', '"""<KEY>"""'], {}), "('FzQNofWMcCfK1ghaqpwM3sCJu', '<KEY>')\n", (64, 102), False, 'import tweepy\n'), ((150, 166), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (160, 166), False, 'import tweepy\n'), ((797, 834), 'tweepy.Cursor', 'tweepy.Cursor', (['api.search', 'search_str'], {}), '(api.search, search_str)\n', (810, 834), False, 'import tweepy\n'), ((522, 536), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (532, 536), False, 'import time\n'), ((627, 655), 'tweepy.Cursor', 'tweepy.Cursor', (['api.followers'], {}), '(api.followers)\n', (640, 655), False, 'import tweepy\n')]
|
import datetime
from flask import json
import msgpack
import pikka_bird_server
from pikka_bird_server.models.collection import Collection
from pikka_bird_server.models.machine import Machine
from pikka_bird_server.models.report import Report
from pikka_bird_server.models.service import Service
class TestCollections:
def assert_create_success(self, res, data):
assert res.status_code == 201
assert data == {}
assert Machine.query.count() == 1
machine = Machine.query.first()
assert isinstance(machine.created_at, datetime.datetime)
assert isinstance(machine.updated_at, datetime.datetime)
assert machine.address == '127.0.0.1'
assert machine.hostname == 'localhost'
assert Service.query.count() == 1
service = Service.query.first()
assert isinstance(service.created_at, datetime.datetime)
assert service.code == 'system'
assert Collection.query.count() == 1
collection = Collection.query.first()
assert isinstance(collection.created_at, datetime.datetime)
assert collection.collected_at == datetime.datetime(2015, 4, 4, 19, 32, 20, 616977)
assert collection.collecting_at == datetime.datetime(2015, 4, 4, 19, 33, 1, 424242)
assert collection.hostname == 'localhost'
assert collection.machine == machine
assert collection.pid == 42
assert collection.version_server == pikka_bird_server.__version__
assert collection.version_collector == '1.2.3'
assert Report.query.count() == 1
report = Report.query.first()
assert report.collection == collection
assert report.data == {'load': {'avg_15_min': 1.62939453125}}
assert report.service == service
def test_create_json(self, client, collection_valid):
res = client.post('/collections',
data=json.dumps(collection_valid),
headers={
'Content-Type': 'application/json'},
environ_base={
'REMOTE_ADDR': '127.0.0.1'})
data = json.loads(res.data)
self.assert_create_success(res, data)
def test_create_binary(self, client, collection_valid):
res = client.post('/collections',
data=msgpack.packb(collection_valid),
headers={
'Content-Type': 'application/octet-stream'},
environ_base={
'REMOTE_ADDR': '127.0.0.1'})
data = json.loads(res.data)
self.assert_create_success(res, data)
def test_create_no_content_type(self, client, collection_valid):
res = client.post('/collections',
data=json.dumps(collection_valid))
data = json.loads(res.data)
assert res.status_code == 415
assert data == {
'message': '415: Unsupported Media Type'}
assert Machine.query.count() == 0
assert Service.query.count() == 0
assert Collection.query.count() == 0
assert Report.query.count() == 0
def test_create_collection_empty(self, client):
res = client.post('/collections',
data=json.dumps({}),
headers={
'Content-Type': 'application/json'},
environ_base={
'REMOTE_ADDR': '127.0.0.1'})
data = json.loads(res.data)
assert res.status_code == 422
assert data == {
'message': '422: Unprocessable Entity'}
assert Machine.query.count() == 1
assert Service.query.count() == 0
assert Collection.query.count() == 0
assert Report.query.count() == 0
def test_create_collection_partial(self, client, collection_valid):
collection_invalid = collection_valid.copy()
del collection_invalid['environment']['hostname']
res = client.post('/collections',
data=json.dumps(collection_invalid),
headers={
'Content-Type': 'application/json'},
environ_base={
'REMOTE_ADDR': '127.0.0.1'})
data = json.loads(res.data)
assert res.status_code == 422
assert data == {
'message': '422: Unprocessable Entity'}
assert Machine.query.count() == 1
assert Service.query.count() == 0
assert Collection.query.count() == 0
assert Report.query.count() == 0
def test_create_collection_invalid_url(self, client, collection_valid):
res = client.post('/this-is-not-the-service-you-are-looking-for',
data=json.dumps(collection_valid),
headers={
'Content-Type': 'application/json'},
environ_base={
'REMOTE_ADDR': '127.0.0.1'})
data = json.loads(res.data)
assert res.status_code == 404
assert data == {
'message': '404: Not Found'}
assert Machine.query.count() == 0
assert Service.query.count() == 0
assert Collection.query.count() == 0
assert Report.query.count() == 0
|
[
"pikka_bird_server.models.machine.Machine.query.first",
"pikka_bird_server.models.report.Report.query.first",
"pikka_bird_server.models.service.Service.query.count",
"pikka_bird_server.models.report.Report.query.count",
"datetime.datetime",
"pikka_bird_server.models.collection.Collection.query.first",
"flask.json.dumps",
"pikka_bird_server.models.collection.Collection.query.count",
"msgpack.packb",
"flask.json.loads",
"pikka_bird_server.models.service.Service.query.first",
"pikka_bird_server.models.machine.Machine.query.count"
] |
[((507, 528), 'pikka_bird_server.models.machine.Machine.query.first', 'Machine.query.first', ([], {}), '()\n', (526, 528), False, 'from pikka_bird_server.models.machine import Machine\n'), ((821, 842), 'pikka_bird_server.models.service.Service.query.first', 'Service.query.first', ([], {}), '()\n', (840, 842), False, 'from pikka_bird_server.models.service import Service\n'), ((1023, 1047), 'pikka_bird_server.models.collection.Collection.query.first', 'Collection.query.first', ([], {}), '()\n', (1045, 1047), False, 'from pikka_bird_server.models.collection import Collection\n'), ((1627, 1647), 'pikka_bird_server.models.report.Report.query.first', 'Report.query.first', ([], {}), '()\n', (1645, 1647), False, 'from pikka_bird_server.models.report import Report\n'), ((2120, 2140), 'flask.json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (2130, 2140), False, 'from flask import json\n'), ((2523, 2543), 'flask.json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (2533, 2543), False, 'from flask import json\n'), ((2777, 2797), 'flask.json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (2787, 2797), False, 'from flask import json\n'), ((3397, 3417), 'flask.json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (3407, 3417), False, 'from flask import json\n'), ((4171, 4191), 'flask.json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (4181, 4191), False, 'from flask import json\n'), ((4859, 4879), 'flask.json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (4869, 4879), False, 'from flask import json\n'), ((462, 483), 'pikka_bird_server.models.machine.Machine.query.count', 'Machine.query.count', ([], {}), '()\n', (481, 483), False, 'from pikka_bird_server.models.machine import Machine\n'), ((776, 797), 'pikka_bird_server.models.service.Service.query.count', 'Service.query.count', ([], {}), '()\n', (795, 797), False, 'from pikka_bird_server.models.service import Service\n'), ((972, 996), 'pikka_bird_server.models.collection.Collection.query.count', 'Collection.query.count', ([], {}), '()\n', (994, 996), False, 'from pikka_bird_server.models.collection import Collection\n'), ((1158, 1207), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(4)', '(4)', '(19)', '(32)', '(20)', '(616977)'], {}), '(2015, 4, 4, 19, 32, 20, 616977)\n', (1175, 1207), False, 'import datetime\n'), ((1251, 1299), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(4)', '(4)', '(19)', '(33)', '(1)', '(424242)'], {}), '(2015, 4, 4, 19, 33, 1, 424242)\n', (1268, 1299), False, 'import datetime\n'), ((1584, 1604), 'pikka_bird_server.models.report.Report.query.count', 'Report.query.count', ([], {}), '()\n', (1602, 1604), False, 'from pikka_bird_server.models.report import Report\n'), ((2948, 2969), 'pikka_bird_server.models.machine.Machine.query.count', 'Machine.query.count', ([], {}), '()\n', (2967, 2969), False, 'from pikka_bird_server.models.machine import Machine\n'), ((2990, 3011), 'pikka_bird_server.models.service.Service.query.count', 'Service.query.count', ([], {}), '()\n', (3009, 3011), False, 'from pikka_bird_server.models.service import Service\n'), ((3032, 3056), 'pikka_bird_server.models.collection.Collection.query.count', 'Collection.query.count', ([], {}), '()\n', (3054, 3056), False, 'from pikka_bird_server.models.collection import Collection\n'), ((3077, 3097), 'pikka_bird_server.models.report.Report.query.count', 'Report.query.count', ([], {}), '()\n', (3095, 3097), False, 'from pikka_bird_server.models.report import Report\n'), ((3566, 3587), 'pikka_bird_server.models.machine.Machine.query.count', 'Machine.query.count', ([], {}), '()\n', (3585, 3587), False, 'from pikka_bird_server.models.machine import Machine\n'), ((3608, 3629), 'pikka_bird_server.models.service.Service.query.count', 'Service.query.count', ([], {}), '()\n', (3627, 3629), False, 'from pikka_bird_server.models.service import Service\n'), ((3650, 3674), 'pikka_bird_server.models.collection.Collection.query.count', 'Collection.query.count', ([], {}), '()\n', (3672, 3674), False, 'from pikka_bird_server.models.collection import Collection\n'), ((3695, 3715), 'pikka_bird_server.models.report.Report.query.count', 'Report.query.count', ([], {}), '()\n', (3713, 3715), False, 'from pikka_bird_server.models.report import Report\n'), ((4340, 4361), 'pikka_bird_server.models.machine.Machine.query.count', 'Machine.query.count', ([], {}), '()\n', (4359, 4361), False, 'from pikka_bird_server.models.machine import Machine\n'), ((4382, 4403), 'pikka_bird_server.models.service.Service.query.count', 'Service.query.count', ([], {}), '()\n', (4401, 4403), False, 'from pikka_bird_server.models.service import Service\n'), ((4424, 4448), 'pikka_bird_server.models.collection.Collection.query.count', 'Collection.query.count', ([], {}), '()\n', (4446, 4448), False, 'from pikka_bird_server.models.collection import Collection\n'), ((4469, 4489), 'pikka_bird_server.models.report.Report.query.count', 'Report.query.count', ([], {}), '()\n', (4487, 4489), False, 'from pikka_bird_server.models.report import Report\n'), ((5017, 5038), 'pikka_bird_server.models.machine.Machine.query.count', 'Machine.query.count', ([], {}), '()\n', (5036, 5038), False, 'from pikka_bird_server.models.machine import Machine\n'), ((5059, 5080), 'pikka_bird_server.models.service.Service.query.count', 'Service.query.count', ([], {}), '()\n', (5078, 5080), False, 'from pikka_bird_server.models.service import Service\n'), ((5101, 5125), 'pikka_bird_server.models.collection.Collection.query.count', 'Collection.query.count', ([], {}), '()\n', (5123, 5125), False, 'from pikka_bird_server.models.collection import Collection\n'), ((5146, 5166), 'pikka_bird_server.models.report.Report.query.count', 'Report.query.count', ([], {}), '()\n', (5164, 5166), False, 'from pikka_bird_server.models.report import Report\n'), ((1928, 1956), 'flask.json.dumps', 'json.dumps', (['collection_valid'], {}), '(collection_valid)\n', (1938, 1956), False, 'from flask import json\n'), ((2320, 2351), 'msgpack.packb', 'msgpack.packb', (['collection_valid'], {}), '(collection_valid)\n', (2333, 2351), False, 'import msgpack\n'), ((2732, 2760), 'flask.json.dumps', 'json.dumps', (['collection_valid'], {}), '(collection_valid)\n', (2742, 2760), False, 'from flask import json\n'), ((3219, 3233), 'flask.json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (3229, 3233), False, 'from flask import json\n'), ((3977, 4007), 'flask.json.dumps', 'json.dumps', (['collection_invalid'], {}), '(collection_invalid)\n', (3987, 4007), False, 'from flask import json\n'), ((4667, 4695), 'flask.json.dumps', 'json.dumps', (['collection_valid'], {}), '(collection_valid)\n', (4677, 4695), False, 'from flask import json\n')]
|
#!/usr/bin/env python
from flask import Flask, render_template, request
from flask_bootstrap import Bootstrap
from models import QuizForm
class Config(object):
SECRET_KEY = '<KEY>'
application = Flask(__name__)
application.config.from_object(Config)
Bootstrap(application)
@application.route('/', methods=['GET', 'POST'])
def take_test():
form = QuizForm(request.form)
if not form.validate_on_submit():
return render_template('take_quiz_template.html', form=form)
if request.method == 'POST':
return 'Submitted!'
if __name__ == '__main__':
application.run(host='0.0.0.0', debug=True)
|
[
"models.QuizForm",
"flask.Flask",
"flask_bootstrap.Bootstrap",
"flask.render_template"
] |
[((201, 216), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (206, 216), False, 'from flask import Flask, render_template, request\n'), ((257, 279), 'flask_bootstrap.Bootstrap', 'Bootstrap', (['application'], {}), '(application)\n', (266, 279), False, 'from flask_bootstrap import Bootstrap\n'), ((358, 380), 'models.QuizForm', 'QuizForm', (['request.form'], {}), '(request.form)\n', (366, 380), False, 'from models import QuizForm\n'), ((434, 487), 'flask.render_template', 'render_template', (['"""take_quiz_template.html"""'], {'form': 'form'}), "('take_quiz_template.html', form=form)\n", (449, 487), False, 'from flask import Flask, render_template, request\n')]
|
from django.urls import path
from .views import (
HackathonListView, create_hackathon, update_hackathon, delete_hackathon, judging
)
urlpatterns = [
path('', HackathonListView.as_view(), name="hackathon-list"),
path("<int:hack_id>/team/<int:team_id>/judging/", judging, name="judging"),
path("create_hackathon", create_hackathon, name='create_hackathon'),
path("<int:hackathon_id>/update_hackathon", update_hackathon, name="update_hackathon"),
path("<int:hackathon_id>/delete_hackathon", delete_hackathon, name="delete_hackathon"),
]
|
[
"django.urls.path"
] |
[((224, 298), 'django.urls.path', 'path', (['"""<int:hack_id>/team/<int:team_id>/judging/"""', 'judging'], {'name': '"""judging"""'}), "('<int:hack_id>/team/<int:team_id>/judging/', judging, name='judging')\n", (228, 298), False, 'from django.urls import path\n'), ((304, 371), 'django.urls.path', 'path', (['"""create_hackathon"""', 'create_hackathon'], {'name': '"""create_hackathon"""'}), "('create_hackathon', create_hackathon, name='create_hackathon')\n", (308, 371), False, 'from django.urls import path\n'), ((377, 468), 'django.urls.path', 'path', (['"""<int:hackathon_id>/update_hackathon"""', 'update_hackathon'], {'name': '"""update_hackathon"""'}), "('<int:hackathon_id>/update_hackathon', update_hackathon, name=\n 'update_hackathon')\n", (381, 468), False, 'from django.urls import path\n'), ((469, 560), 'django.urls.path', 'path', (['"""<int:hackathon_id>/delete_hackathon"""', 'delete_hackathon'], {'name': '"""delete_hackathon"""'}), "('<int:hackathon_id>/delete_hackathon', delete_hackathon, name=\n 'delete_hackathon')\n", (473, 560), False, 'from django.urls import path\n')]
|
from django.conf import settings
from django.urls import reverse
from django.test import override_settings
from rest_framework import status
from rest_framework.test import APIClient
from freezegun import freeze_time
from main.tests.api import helpers
class TestAuth(helpers.BaseUserTestCase):
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',),
REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS)
def test_token_auth_end_point(self):
"""
Test that when hitting the auth_token end point we receive a token
:return:
"""
client = APIClient()
# request token
url = reverse('api:auth-token')
user = self.readonly_user
self.assertTrue(user.check_password('password'))
data = {
'username': "readonly",
"password": "password"
}
resp = client.post(url, data=data, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
# check that we have a token
self.assertTrue('token' in resp.data)
token = resp.data.get('token')
self.assertTrue(token)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',),
REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS)
def test_token_valid(self):
"""
Test that the token received can be used for authentication
:return:
"""
client = APIClient()
user = self.readonly_user
self.assertTrue(user.check_password('password'))
url = reverse('api:auth-token')
data = {
'username': user.username,
"password": "password"
}
resp = client.post(url, data=data, format='json')
token = resp.data.get('token')
self.assertTrue(token)
# can't get dataset list without token
url = reverse('api:dataset-list')
resp = client.get(url)
self.assertIn(resp.status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN])
# set credential token
client.credentials(HTTP_AUTHORIZATION='Token ' + token)
resp = client.get(url)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
class TestUserAuthThrottling(helpers.BaseUserTestCase):
"""
Use case: Prevent brute force authentication by preventing the API user issuing too many auth-token request
"""
def test_brute_force(self):
"""
test that a hacker sending auth request with wrong password will be blocked after n attempts
:return:
"""
rate = '6/hour'
drf_settings = settings.REST_FRAMEWORK
drf_settings['DEFAULT_THROTTLE_RATES']['auth'] = rate
with override_settings(REST_FRAMEWORK=drf_settings):
max_attempt = 6
client = APIClient()
# request token
url = reverse('api:auth-token')
user = self.readonly_user
self.assertTrue(user.check_password('password'))
data = {
'username': "readonly",
"password": "<PASSWORD>"
}
# Hacking attempt should return HTTP_400_BAD_REQUEST while attempts < throttle rate
with freeze_time("2018-05-29 12:00:00", tick=True):
for attempt in range(max_attempt):
resp = client.post(url, data=data, format='json')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
# next attempt should return a HTTP_429_TOO_MANY_REQUESTS
resp = client.post(url, data=data, format='json')
self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS)
# let's simulate a 30 min jump in time. Should still return HTTP_429_TOO_MANY_REQUESTS
with freeze_time("2018-05-29 12:30:00", tick=True):
resp = client.post(url, data=data, format='json')
self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS)
# let's jump more than one hour in time. Should be back at returning HTTP_400_BAD_REQUEST
with freeze_time("2018-05-29 13:00:05", tick=True):
resp = client.post(url, data=data, format='json')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
|
[
"django.urls.reverse",
"freezegun.freeze_time",
"rest_framework.test.APIClient",
"django.test.override_settings"
] |
[((303, 465), 'django.test.override_settings', 'override_settings', ([], {'PASSWORD_HASHERS': "('django.contrib.auth.hashers.MD5PasswordHasher',)", 'REST_FRAMEWORK_TEST_SETTINGS': 'helpers.REST_FRAMEWORK_TEST_SETTINGS'}), "(PASSWORD_HASHERS=(\n 'django.contrib.auth.hashers.MD5PasswordHasher',),\n REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS)\n", (320, 465), False, 'from django.test import override_settings\n'), ((1199, 1361), 'django.test.override_settings', 'override_settings', ([], {'PASSWORD_HASHERS': "('django.contrib.auth.hashers.MD5PasswordHasher',)", 'REST_FRAMEWORK_TEST_SETTINGS': 'helpers.REST_FRAMEWORK_TEST_SETTINGS'}), "(PASSWORD_HASHERS=(\n 'django.contrib.auth.hashers.MD5PasswordHasher',),\n REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS)\n", (1216, 1361), False, 'from django.test import override_settings\n'), ((654, 665), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (663, 665), False, 'from rest_framework.test import APIClient\n'), ((704, 729), 'django.urls.reverse', 'reverse', (['"""api:auth-token"""'], {}), "('api:auth-token')\n", (711, 729), False, 'from django.urls import reverse\n'), ((1534, 1545), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (1543, 1545), False, 'from rest_framework.test import APIClient\n'), ((1651, 1676), 'django.urls.reverse', 'reverse', (['"""api:auth-token"""'], {}), "('api:auth-token')\n", (1658, 1676), False, 'from django.urls import reverse\n'), ((1968, 1995), 'django.urls.reverse', 'reverse', (['"""api:dataset-list"""'], {}), "('api:dataset-list')\n", (1975, 1995), False, 'from django.urls import reverse\n'), ((2823, 2869), 'django.test.override_settings', 'override_settings', ([], {'REST_FRAMEWORK': 'drf_settings'}), '(REST_FRAMEWORK=drf_settings)\n', (2840, 2869), False, 'from django.test import override_settings\n'), ((2920, 2931), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (2929, 2931), False, 'from rest_framework.test import APIClient\n'), ((2978, 3003), 'django.urls.reverse', 'reverse', (['"""api:auth-token"""'], {}), "('api:auth-token')\n", (2985, 3003), False, 'from django.urls import reverse\n'), ((3332, 3377), 'freezegun.freeze_time', 'freeze_time', (['"""2018-05-29 12:00:00"""'], {'tick': '(True)'}), "('2018-05-29 12:00:00', tick=True)\n", (3343, 3377), False, 'from freezegun import freeze_time\n'), ((3927, 3972), 'freezegun.freeze_time', 'freeze_time', (['"""2018-05-29 12:30:00"""'], {'tick': '(True)'}), "('2018-05-29 12:30:00', tick=True)\n", (3938, 3972), False, 'from freezegun import freeze_time\n'), ((4247, 4292), 'freezegun.freeze_time', 'freeze_time', (['"""2018-05-29 13:00:05"""'], {'tick': '(True)'}), "('2018-05-29 13:00:05', tick=True)\n", (4258, 4292), False, 'from freezegun import freeze_time\n')]
|
# -*- coding: utf-8 -*-
from gorden_crawler.spiders.shiji_base import BaseSpider
from scrapy.selector import Selector
from gorden_crawler.items import BaseItem, ImageItem, Color, SkuItem
from scrapy import Request
from gorden_crawler.utils.item_field_handler import handle_price
import re
import execjs
class ShopbopEastdaneCommon(BaseSpider):
def parse_pages(self, response):
sel = Selector(response)
category = response.meta['category']
product_type = response.meta['product_type']
gender = response.meta['gender']
category_url = response.meta['category_url']
item_link_lis = sel.xpath('//li[contains(@class, "hproduct product")]')
if len(item_link_lis.extract())>0 :
for item_link_li in item_link_lis:
item_link_uri = item_link_li.xpath('./div/a/@href').extract()[0]
url = self.shopbop_base_url + item_link_uri
baseItem = BaseItem()
baseItem['type'] = 'base'
baseItem['category'] = category
baseItem['product_type'] = product_type
baseItem['url'] = url
baseItem['gender'] = gender
baseItem['brand'] = item_link_li.xpath('.//div[@class="brand"]/text()').extract()[0]
baseItem['title'] = item_link_li.xpath('.//div[@class="title"]/text()').extract()[0]
baseItem['cover'] = item_link_li.xpath('.//img/@src').extract()[0]
baseItem['list_price'] = handle_price(item_link_li.xpath('.//span[@class="retail-price"]/text()').extract()[0])
baseItem['current_price'] = handle_price(item_link_li.xpath('.//span[@class="sale-price-low"]/text()').extract()[0])
yield Request(url, callback=self.parse_item, meta={'baseItem' : baseItem})
next_page_link = sel.xpath('//span[@data-at="nextPage"]/@data-next-link').extract()
if len(next_page_link)>0 and (category_url[category] != next_page_link[0]):
url = self.shopbop_base_url + next_page_link[0]
yield Request(url, callback=self.parse_pages, meta={'category' : category, 'product_type' : product_type, 'gender' : gender, 'category_url' : category_url})
def parse_item(self, response):
baseItem = response.meta['baseItem']
return self.handle_parse_item(response, baseItem)
def handle_parse_item(self, response, baseItem):
product_detail_str="".join(re.findall(r"var\s+productDetail[^;]+", response.body))
if len(product_detail_str)>0:
context = execjs.compile('''
%s
function get_product_detail(){
return productDetail;
}
''' % (product_detail_str))
product_detail = context.call('get_product_detail')
sel = Selector(response)
product_id = sel.xpath('//div[@id="productId"]/text()').extract()[0]
skus = []
baseItem['from_site'] = self.name
baseItem['show_product_id'] = product_id
size_js_infos = product_detail['sizes']
size_infos = {}
size_values = []
for size_id in size_js_infos:
size_infos[size_js_infos[size_id]['sizeCode']] = size_id
size_values.append(size_id)
list_price = sel.xpath('//div[@id="productPrices"]//meta[@itemprop="price"]/@content').extract()[0]
color_price_blocks = sel.xpath('//div[@id="productPrices"]//div[@class="priceBlock"]')
color_price_mapping = {}
for color_price_block in color_price_blocks:
color_name = color_price_block.xpath('./span[@class="priceColors"]/text()').extract()
if len(color_name) > 0:
regular_price_span = color_price_block.xpath('./span[@class="regularPrice"]/text()').extract()
if len(regular_price_span) > 0:
color_price_mapping[color_name[0]] = regular_price_span[0]
else:
color_price_mapping[color_name[0]] = color_price_block.xpath('./span[@class="salePrice"]/text()').extract()[0]
image_items = product_detail['colors']
color_names = []
for key in image_items:
imageItems = image_items[key]['images']
color_name = image_items[key]['colorName'].strip()
color_names.append(color_name)
images=[]
tmp_images = []
for image_key in imageItems:
imageItem = ImageItem()
image = imageItems[image_key]
imageItem['thumbnail'] = image['thumbnail']
imageItem['image'] = image['zoom']
tmp_images.append((image['index'], imageItem))
tmp_images = sorted(tmp_images, key=lambda x:x[0])
for tmp_tuple in tmp_images:
images.append(tmp_tuple[1])
colorItem = Color()
colorItem['type'] = 'color'
colorItem['show_product_id'] = product_id
colorItem['from_site'] = self.name
colorItem['cover'] = image_items[key]['swatch']
colorItem['name'] = color_name
colorItem['images'] = images
yield colorItem
sizes = image_items[key]['sizes']
for size in sizes:
size_name = size_infos[size]
skuItem = SkuItem()
skuItem['type'] = 'sku'
skuItem['from_site'] = self.name
skuItem['color'] = color_name
skuItem['show_product_id'] = product_id
skuItem['id'] = key+"-"+size
skuItem['size'] = size_name
skuItem['list_price'] = list_price
if len(color_price_mapping)>0 and color_name in color_price_mapping.keys():
# skuItem['current_price'] = sale_price_span.re(r'\d+.?\d*')[0]
skuItem['current_price'] = color_price_mapping[colorItem['name']]
else:
skuItem['current_price'] = skuItem['list_price']
skuItem['is_outof_stock'] = False
skus.append(skuItem)
baseItem['sizes'] = size_values
baseItem['colors']= color_names
baseItem['skus'] = skus
size_fit_container = sel.xpath('//div[@id="sizeFitContainer"]')
if len(size_fit_container)>0:
size_fit = size_fit_container.extract()[0]
baseItem['desc'] = '<div>'+sel.xpath('//div[@itemprop="description"]').extract()[0]+size_fit+"</div>"
else:
baseItem['desc'] = sel.xpath('//div[@itemprop="description"]').extract()[0]
baseItem['dimensions'] = ['size', 'color']
yield baseItem
|
[
"gorden_crawler.items.SkuItem",
"scrapy.Request",
"scrapy.selector.Selector",
"execjs.compile",
"gorden_crawler.items.BaseItem",
"gorden_crawler.items.ImageItem",
"re.findall",
"gorden_crawler.items.Color"
] |
[((395, 413), 'scrapy.selector.Selector', 'Selector', (['response'], {}), '(response)\n', (403, 413), False, 'from scrapy.selector import Selector\n'), ((2839, 2857), 'scrapy.selector.Selector', 'Selector', (['response'], {}), '(response)\n', (2847, 2857), False, 'from scrapy.selector import Selector\n'), ((2460, 2514), 're.findall', 're.findall', (['"""var\\\\s+productDetail[^;]+"""', 'response.body'], {}), "('var\\\\s+productDetail[^;]+', response.body)\n", (2470, 2514), False, 'import re\n'), ((2576, 2772), 'execjs.compile', 'execjs.compile', (['("""\n %s\n function get_product_detail(){\n return productDetail;\n }\n """\n % product_detail_str)'], {}), '(\n """\n %s\n function get_product_detail(){\n return productDetail;\n }\n """\n % product_detail_str)\n', (2590, 2772), False, 'import execjs\n'), ((5012, 5019), 'gorden_crawler.items.Color', 'Color', ([], {}), '()\n', (5017, 5019), False, 'from gorden_crawler.items import BaseItem, ImageItem, Color, SkuItem\n'), ((945, 955), 'gorden_crawler.items.BaseItem', 'BaseItem', ([], {}), '()\n', (953, 955), False, 'from gorden_crawler.items import BaseItem, ImageItem, Color, SkuItem\n'), ((2076, 2230), 'scrapy.Request', 'Request', (['url'], {'callback': 'self.parse_pages', 'meta': "{'category': category, 'product_type': product_type, 'gender': gender,\n 'category_url': category_url}"}), "(url, callback=self.parse_pages, meta={'category': category,\n 'product_type': product_type, 'gender': gender, 'category_url':\n category_url})\n", (2083, 2230), False, 'from scrapy import Request\n'), ((4543, 4554), 'gorden_crawler.items.ImageItem', 'ImageItem', ([], {}), '()\n', (4552, 4554), False, 'from gorden_crawler.items import BaseItem, ImageItem, Color, SkuItem\n'), ((5537, 5546), 'gorden_crawler.items.SkuItem', 'SkuItem', ([], {}), '()\n', (5544, 5546), False, 'from gorden_crawler.items import BaseItem, ImageItem, Color, SkuItem\n'), ((1752, 1819), 'scrapy.Request', 'Request', (['url'], {'callback': 'self.parse_item', 'meta': "{'baseItem': baseItem}"}), "(url, callback=self.parse_item, meta={'baseItem': baseItem})\n", (1759, 1819), False, 'from scrapy import Request\n')]
|
import re # import regex module
# check if date is valid (yyyy-mm-dd)
def date_validation(self, date):
if re.fullmatch(r"/^\d{4}-\d{2}-\d{2}$/", date):
return True
else:
return False
date_validation("2022-02-29") # False/True
|
[
"re.fullmatch"
] |
[((112, 158), 're.fullmatch', 're.fullmatch', (['"""/^\\\\d{4}-\\\\d{2}-\\\\d{2}$/"""', 'date'], {}), "('/^\\\\d{4}-\\\\d{2}-\\\\d{2}$/', date)\n", (124, 158), False, 'import re\n')]
|
import inflect
import json
import random
import re
infl = inflect.engine()
class MadLibber():
def make(self):
template = self.actions["template"]()
tokens = template.split(" ")
result = ""
for token in tokens:
action = re.match("\{\{(.+?)\}\}", token)
if(action):
if(action[1] in self.actions):
result += self.actions[action[1]]()
else:
result += action[0]
else:
result += token
result += " "
return result.strip()
class Complimenter(MadLibber):
def __init__(self):
with open("./data/respect/adjectives.json") as adf:
self.adjectives = json.load(adf)
with open("./data/respect/amounts.json") as amf:
self.amounts = json.load(amf)
with open("./data/respect/parts.json") as parf:
self.parts = json.load(parf)
with open("./data/respect/persons.json") as perf:
self.persons = json.load(perf)
with open("./data/respect/templates.json") as temf:
self.templates = json.load(temf)
with open("./data/respect/things.json") as thinf:
self.things = json.load(thinf)
self.actions = {
"adjective" : lambda : random.choice(self.adjectives),
"an_adjective" : lambda : infl.an(self.actions["adjective"]()),
"amount" : lambda : random.choice(self.amounts),
"an_amount" : lambda : infl.an(self.actions["amount"]()),
"parts" : lambda : random.choice(self.parts),
"person" : lambda : random.choice(self.persons),
"thing" : lambda : random.choice(self.things),
"template" : lambda : random.choice(self.templates)
}
class Prompter(MadLibber):
def __init__(self):
with open("./data/prompt/adjectives.json") as adf:
self.adjectives = json.load(adf)
with open("./data/prompt/nouns.json") as nf:
self.nouns = json.load(nf)
self.actions = {
"adjective" : lambda : random.choice(self.adjectives),
"noun" : lambda : random.choice(self.nouns),
"template" : lambda : r"{{adjective}} {{noun}}"
}
def addNoun(self, noun):
self.nouns.append(noun)
with open("./data/prompt/nouns.json", "w") as nf:
json.dump(nf)
def remNoun(self, noun):
if(noun in self.nouns):
self.nouns.remove(noun)
with open("./data/prompt/nouns.json", "w") as nf:
json.dump(nf)
def addAdjective(self, adjective):
self.adjectives.append(adjective)
with open("./data/prompt/adjectives.json", "w") as adf:
json.dump(adf)
def remAdjective(self, adjective):
if(adjective in self.adjectives):
self.adjectives.remove(adjective)
with open("./data/prompt/adjectives.json", "w") as adf:
json.dump(adf)
|
[
"inflect.engine",
"json.dump",
"json.load",
"random.choice",
"re.match"
] |
[((59, 75), 'inflect.engine', 'inflect.engine', ([], {}), '()\n', (73, 75), False, 'import inflect\n'), ((270, 306), 're.match', 're.match', (['"""\\\\{\\\\{(.+?)\\\\}\\\\}"""', 'token'], {}), "('\\\\{\\\\{(.+?)\\\\}\\\\}', token)\n", (278, 306), False, 'import re\n'), ((746, 760), 'json.load', 'json.load', (['adf'], {}), '(adf)\n', (755, 760), False, 'import json\n'), ((845, 859), 'json.load', 'json.load', (['amf'], {}), '(amf)\n', (854, 859), False, 'import json\n'), ((941, 956), 'json.load', 'json.load', (['parf'], {}), '(parf)\n', (950, 956), False, 'import json\n'), ((1042, 1057), 'json.load', 'json.load', (['perf'], {}), '(perf)\n', (1051, 1057), False, 'import json\n'), ((1147, 1162), 'json.load', 'json.load', (['temf'], {}), '(temf)\n', (1156, 1162), False, 'import json\n'), ((1247, 1263), 'json.load', 'json.load', (['thinf'], {}), '(thinf)\n', (1256, 1263), False, 'import json\n'), ((1957, 1971), 'json.load', 'json.load', (['adf'], {}), '(adf)\n', (1966, 1971), False, 'import json\n'), ((2050, 2063), 'json.load', 'json.load', (['nf'], {}), '(nf)\n', (2059, 2063), False, 'import json\n'), ((2416, 2429), 'json.dump', 'json.dump', (['nf'], {}), '(nf)\n', (2425, 2429), False, 'import json\n'), ((2778, 2792), 'json.dump', 'json.dump', (['adf'], {}), '(adf)\n', (2787, 2792), False, 'import json\n'), ((1325, 1355), 'random.choice', 'random.choice', (['self.adjectives'], {}), '(self.adjectives)\n', (1338, 1355), False, 'import random\n'), ((1465, 1492), 'random.choice', 'random.choice', (['self.amounts'], {}), '(self.amounts)\n', (1478, 1492), False, 'import random\n'), ((1595, 1620), 'random.choice', 'random.choice', (['self.parts'], {}), '(self.parts)\n', (1608, 1620), False, 'import random\n'), ((1654, 1681), 'random.choice', 'random.choice', (['self.persons'], {}), '(self.persons)\n', (1667, 1681), False, 'import random\n'), ((1714, 1740), 'random.choice', 'random.choice', (['self.things'], {}), '(self.things)\n', (1727, 1740), False, 'import random\n'), ((1776, 1805), 'random.choice', 'random.choice', (['self.templates'], {}), '(self.templates)\n', (1789, 1805), False, 'import random\n'), ((2125, 2155), 'random.choice', 'random.choice', (['self.adjectives'], {}), '(self.adjectives)\n', (2138, 2155), False, 'import random\n'), ((2187, 2212), 'random.choice', 'random.choice', (['self.nouns'], {}), '(self.nouns)\n', (2200, 2212), False, 'import random\n'), ((2606, 2619), 'json.dump', 'json.dump', (['nf'], {}), '(nf)\n', (2615, 2619), False, 'import json\n'), ((3005, 3019), 'json.dump', 'json.dump', (['adf'], {}), '(adf)\n', (3014, 3019), False, 'import json\n')]
|
from math import exp
import cv2 as cv
import numpy as np
from concurrent.futures import ProcessPoolExecutor
from numba import jit
from numpy import float32
from tqdm import tqdm
from utils import (
get_region_indexes,
get_region_centers,
associate_index_to_centers,
get_window,
)
@jit
def P(v):
return v / 255
@jit
def deltaIx(img, channel, x, y):
res = 0
if x + 1 < img.shape[0] and y < img.shape[1]:
res = abs(img[x + 1][y][channel] - img[x][y][channel])
else:
res = 0
return res
@jit
def deltaIy(img, channel, x, y):
res = 0
if y - 1 > 0 and x < img.shape[0]:
res = abs(img[x][y - 1][channel] - img[x][y][channel])
else:
res = 0
return res
def getDetailsRegions(imgs):
region_indexes = get_region_indexes(imgs[0].shape[0], imgs[0].shape[1], 10)
M = []
for i in range(len(imgs)):
M.append([])
for j in tqdm(range(region_indexes.shape[0])):
M_B = 0
M_G = 0
M_R = 0
for x in range(region_indexes[j][0][0], region_indexes[j][0][1]):
for y in range(region_indexes[j][1][0], region_indexes[j][1][1]):
M_B += P(max(deltaIx(imgs[i], 0, x, y), deltaIy(imgs[i], 0, x, y)))
M_G += P(max(deltaIx(imgs[i], 1, x, y), deltaIy(imgs[i], 1, x, y)))
M_R += P(max(deltaIx(imgs[i], 2, x, y), deltaIy(imgs[i], 2, x, y)))
M[i].append([M_B, M_G, M_R])
return np.array(M), region_indexes
def joinBestRegions(imgs, M, region_indexes):
res = np.zeros(imgs[0].shape)
for channel_indx in range(3):
for r_indx in tqdm(range(M.shape[1])): # iterate over each region
max_r = {}
for i in range(len(imgs)):
max_r[np.sum(M[i][r_indx])] = i
index_image = max_r[max(max_r)]
for i in range(region_indexes[r_indx][0][0], region_indexes[r_indx][0][1]):
for j in range(
region_indexes[r_indx][1][0], region_indexes[r_indx][1][1]
):
res[i][j][channel_indx] = imgs[index_image][i][j][channel_indx]
return res
@jit
def U(x_c_reg, y_c_reg, x_c, y_c):
epsilon = 2
return abs(x_c_reg - x_c) <= epsilon and abs(y_c_reg - y_c) <= epsilon
@jit
def exp_g(x, y, x_c, y_c) -> float:
sigma_x = 100
sigma_y = 100
return exp(
-((((x - x_c) ** 2) / (2 * sigma_x)) + (((y - y_c) ** 2) / (2 * sigma_y)))
)
@jit
def gaussianBlendingFunction(x, y, x_c, y_c, region_indexes, center_indexes):
num = exp_g(x, y, x_c, y_c)
den = 0.0
for i in range(center_indexes.shape[0]):
den += exp_g(x, y, center_indexes[i][0], center_indexes[i][1])
den *= center_indexes.shape[0]
return num / den
def compute_channel(channel, region_indexes, center_indexes, map_px_center):
center_indexes = np.float32(center_indexes)
res = np.zeros(shape=channel.shape, dtype=float32)
for x in tqdm(range(res.shape[0])):
for y in range(res.shape[1]):
window = get_window(x, y, channel, 5) # WINDOW VERSION
for i in range(window[0][0], window[0][1]):
for j in range(window[1][0], window[1][1]):
# for i in range(res.shape[0]):
# for j in range(res.shape[1]):
add = 0
if U(
map_px_center[(i, j)][0],
map_px_center[(i, j)][1],
map_px_center[(x, y)][0],
map_px_center[(x, y)][1],
):
add = 1
add *= gaussianBlendingFunction(
map_px_center[(x, y)][0],
map_px_center[(x, y)][1],
map_px_center[(i, j)][0],
map_px_center[(i, j)][1],
region_indexes,
center_indexes,
)
add *= channel[x][y]
res[x][y] += add
return res
def blend(img, regions_indexes):
centers_indexes = get_region_centers(regions_indexes)
pixel_region_center = associate_index_to_centers(regions_indexes, centers_indexes)
b, g, r = cv.split(img)
with ProcessPoolExecutor() as excecutor:
proc1 = excecutor.submit(
compute_channel, b, regions_indexes, centers_indexes, pixel_region_center
)
proc2 = excecutor.submit(
compute_channel, g, regions_indexes, centers_indexes, pixel_region_center
)
proc3 = excecutor.submit(
compute_channel, r, regions_indexes, centers_indexes, pixel_region_center
)
b = proc1.result()
g = proc2.result()
r = proc3.result()
return cv.merge((b, g, r))
def compute(imgs):
for i in range(len(imgs)):
imgs[i] = np.float32(imgs[i])
M, regions_indexes = getDetailsRegions(imgs)
res = blend(joinBestRegions(imgs, M, regions_indexes), regions_indexes)
res = res / np.amax(res)
res = 255 * res
return res
|
[
"math.exp",
"utils.get_region_centers",
"utils.get_region_indexes",
"numpy.sum",
"utils.associate_index_to_centers",
"numpy.float32",
"concurrent.futures.ProcessPoolExecutor",
"numpy.zeros",
"utils.get_window",
"numpy.amax",
"cv2.split",
"numpy.array",
"cv2.merge"
] |
[((787, 845), 'utils.get_region_indexes', 'get_region_indexes', (['imgs[0].shape[0]', 'imgs[0].shape[1]', '(10)'], {}), '(imgs[0].shape[0], imgs[0].shape[1], 10)\n', (805, 845), False, 'from utils import get_region_indexes, get_region_centers, associate_index_to_centers, get_window\n'), ((1586, 1609), 'numpy.zeros', 'np.zeros', (['imgs[0].shape'], {}), '(imgs[0].shape)\n', (1594, 1609), True, 'import numpy as np\n'), ((2413, 2484), 'math.exp', 'exp', (['(-((x - x_c) ** 2 / (2 * sigma_x) + (y - y_c) ** 2 / (2 * sigma_y)))'], {}), '(-((x - x_c) ** 2 / (2 * sigma_x) + (y - y_c) ** 2 / (2 * sigma_y)))\n', (2416, 2484), False, 'from math import exp\n'), ((2913, 2939), 'numpy.float32', 'np.float32', (['center_indexes'], {}), '(center_indexes)\n', (2923, 2939), True, 'import numpy as np\n'), ((2950, 2994), 'numpy.zeros', 'np.zeros', ([], {'shape': 'channel.shape', 'dtype': 'float32'}), '(shape=channel.shape, dtype=float32)\n', (2958, 2994), True, 'import numpy as np\n'), ((4215, 4250), 'utils.get_region_centers', 'get_region_centers', (['regions_indexes'], {}), '(regions_indexes)\n', (4233, 4250), False, 'from utils import get_region_indexes, get_region_centers, associate_index_to_centers, get_window\n'), ((4277, 4337), 'utils.associate_index_to_centers', 'associate_index_to_centers', (['regions_indexes', 'centers_indexes'], {}), '(regions_indexes, centers_indexes)\n', (4303, 4337), False, 'from utils import get_region_indexes, get_region_centers, associate_index_to_centers, get_window\n'), ((4352, 4365), 'cv2.split', 'cv.split', (['img'], {}), '(img)\n', (4360, 4365), True, 'import cv2 as cv\n'), ((4884, 4903), 'cv2.merge', 'cv.merge', (['(b, g, r)'], {}), '((b, g, r))\n', (4892, 4903), True, 'import cv2 as cv\n'), ((1500, 1511), 'numpy.array', 'np.array', (['M'], {}), '(M)\n', (1508, 1511), True, 'import numpy as np\n'), ((4376, 4397), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {}), '()\n', (4395, 4397), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((4975, 4994), 'numpy.float32', 'np.float32', (['imgs[i]'], {}), '(imgs[i])\n', (4985, 4994), True, 'import numpy as np\n'), ((5139, 5151), 'numpy.amax', 'np.amax', (['res'], {}), '(res)\n', (5146, 5151), True, 'import numpy as np\n'), ((3094, 3122), 'utils.get_window', 'get_window', (['x', 'y', 'channel', '(5)'], {}), '(x, y, channel, 5)\n', (3104, 3122), False, 'from utils import get_region_indexes, get_region_centers, associate_index_to_centers, get_window\n'), ((1803, 1823), 'numpy.sum', 'np.sum', (['M[i][r_indx]'], {}), '(M[i][r_indx])\n', (1809, 1823), True, 'import numpy as np\n')]
|
import re
from typing import List, Union, Iterable
class NaturalSort:
@staticmethod
def atoi(text: str) -> int:
return int(text) if text.isdigit() else text
@staticmethod
def natural_keys(text: str) -> List[Union[str, int]]:
"""
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
"""
return [NaturalSort.atoi(c) for c in re.split(r'(\d+)', text)]
@staticmethod
def sorted(data: Iterable):
return sorted(data, key=NaturalSort.natural_keys)
|
[
"re.split"
] |
[((498, 522), 're.split', 're.split', (['"""(\\\\d+)"""', 'text'], {}), "('(\\\\d+)', text)\n", (506, 522), False, 'import re\n')]
|
"""
2018, University of Freiburg.
<NAME> <<EMAIL>>
"""
import os
import argparse
import pickle
import numpy as np
import re
from sklearn.metrics import accuracy_score, precision_score, recall_score
from concept_neuron import split_train_valid_test, process_sentence_pos_tags
from concept_neuron import print_pos_tag_statistics, compute_LSTM_states
# hidden_states or cell_states of LSTMs
state_type = 'cell_states'
# List of concepts to analyse - Upenn POS tags
# http://www.nltk.org/api/nltk.tag.html
# To find the available POS tags:
# import nltk.help; nltk.help.upenn_tagset()
concepts = ['(', ')', ',', '.', 'CC', 'CD', 'DT', 'IN', 'JJ', 'MD',
'NN', 'NNP', 'PRP', 'RB', 'TO', 'VB']
concepts.extend(['SPACE', 'OTHER'])
def concept_neurons_accuracy(args):
"""
Computes the accuracy for various logistic regression classifiers
for different POS tags, as a multiclass classifier.
Args:
args (argparse): arguments.
Returns:
None.
"""
# Directory with LSTM model.
save_dir = args.save_dir
# Folder to save results.
if not os.path.isdir(args.results_dir):
os.makedirs(args.results_dir)
results_dir = args.results_dir
# Data to analyse.
input_file = args.data_file
# Get training data, tokenize and POS tag sentences.
# X holds the sentences (word1, word2, ...)
# Y holds the corresponding ((word1, tag1), (word2, tags), ...)
X, Y = process_sentence_pos_tags(input_file, args.group_tags)
# Set the concepts to the whole set if no grouping is required.
unique_tags, counts = np.unique([y[1] for sublist in Y for y in sublist],
return_counts=True)
if not args.group_tags:
global concepts
concepts = unique_tags
# Print some statistics about the initial distribution of POS tags.
print_pos_tag_statistics(unique_tags, counts)
# Computes the LSTM state for each byte in X.
X_t, X_t_pos_tags = compute_LSTM_states(save_dir, X, Y)
# Compute the overall metrics for the logistic regression classifiers.
print('\n-----> Test results')
classifiers_id = ['all', 'top1', 'top2', 'top3']
for classifier_id in classifiers_id:
print('\n- {}'.format(classifier_id))
concept_classifiers = []
predicted_probs = []
classes = []
for concept in concepts:
lr_file = os.path.join(
results_dir, 'log_reg_model_' + concept +
'_' + classifier_id + '.sav')
if not os.path.exists(lr_file):
continue
concept_classifiers.append(concept)
lr_model = pickle.load(open(lr_file, 'rb'))
classes.append(lr_model.classes_[0])
# Largest coefficients
lr_file_all = os.path.join(
results_dir, 'log_reg_model_' + concept + '_all.sav')
coef_sorted = np.argsort(-np.abs(np.squeeze(
pickle.load(open(lr_file_all, 'rb')).coef_)))
x = re.search(r'^top(?P<k>\d)$', classifier_id)
if x is None: # all weights
X_t_ = X_t
else: # top k weights
k = int(x.group('k'))
X_t_ = [x[coef_sorted[0:k]] for x in X_t]
trX, vaX, teX, trY, vaY, teY = split_train_valid_test(X_t_,
X_t_pos_tags)
predicted_probs.append(lr_model.predict_proba(teX)[:, 0])
# Find the class with largest predicted probability.
concept_classifiers = np.array(concept_classifiers)
predicted_probs = np.array(predicted_probs)
max_prob_ind = np.argmax(predicted_probs, axis=0)
pred_classes = concept_classifiers[max_prob_ind].tolist()
y_true, y_pred = teY, pred_classes
print('Test accuracy: {:.3f}'.format(accuracy_score(y_true, y_pred)))
print('Test precision: {:.3f}'.format(
precision_score(y_true, y_pred, average='weighted')))
print('Test recall: {:.3f}'.format(
recall_score(y_true, y_pred, average='weighted')))
if __name__ == '__main__':
"""
Parse CLI arguments.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--save_dir', type=str,
default='../byte_LSTM_trained_models/wikitext/save/95/',
help='directory containing LSTM-model')
parser.add_argument('--data_file', type=str, default=None,
help="""file to use as input to the classifier.
If no file is provided, the
nltk.corpus.treebank is used
""")
parser.add_argument('--results_dir', type=str, default='results',
help='directory with saved classifiers')
parser.add_argument('--group_tags', action='store_true',
help="""group all VB* tags into VB;
JJ* into JJ;
NN* into NN;
NNP* into NNP;
RB* into RB.
""")
args = parser.parse_args()
concept_neurons_accuracy(args)
|
[
"concept_neuron.print_pos_tag_statistics",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.argmax",
"os.path.isdir",
"sklearn.metrics.accuracy_score",
"os.path.exists",
"concept_neuron.split_train_valid_test",
"sklearn.metrics.recall_score",
"numpy.array",
"re.search",
"concept_neuron.process_sentence_pos_tags",
"sklearn.metrics.precision_score",
"os.path.join",
"concept_neuron.compute_LSTM_states",
"numpy.unique"
] |
[((1446, 1500), 'concept_neuron.process_sentence_pos_tags', 'process_sentence_pos_tags', (['input_file', 'args.group_tags'], {}), '(input_file, args.group_tags)\n', (1471, 1500), False, 'from concept_neuron import split_train_valid_test, process_sentence_pos_tags\n'), ((1596, 1667), 'numpy.unique', 'np.unique', (['[y[1] for sublist in Y for y in sublist]'], {'return_counts': '(True)'}), '([y[1] for sublist in Y for y in sublist], return_counts=True)\n', (1605, 1667), True, 'import numpy as np\n'), ((1864, 1909), 'concept_neuron.print_pos_tag_statistics', 'print_pos_tag_statistics', (['unique_tags', 'counts'], {}), '(unique_tags, counts)\n', (1888, 1909), False, 'from concept_neuron import print_pos_tag_statistics, compute_LSTM_states\n'), ((1985, 2020), 'concept_neuron.compute_LSTM_states', 'compute_LSTM_states', (['save_dir', 'X', 'Y'], {}), '(save_dir, X, Y)\n', (2004, 2020), False, 'from concept_neuron import print_pos_tag_statistics, compute_LSTM_states\n'), ((4222, 4301), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (4245, 4301), False, 'import argparse\n'), ((1099, 1130), 'os.path.isdir', 'os.path.isdir', (['args.results_dir'], {}), '(args.results_dir)\n', (1112, 1130), False, 'import os\n'), ((1140, 1169), 'os.makedirs', 'os.makedirs', (['args.results_dir'], {}), '(args.results_dir)\n', (1151, 1169), False, 'import os\n'), ((3590, 3619), 'numpy.array', 'np.array', (['concept_classifiers'], {}), '(concept_classifiers)\n', (3598, 3619), True, 'import numpy as np\n'), ((3646, 3671), 'numpy.array', 'np.array', (['predicted_probs'], {}), '(predicted_probs)\n', (3654, 3671), True, 'import numpy as np\n'), ((3695, 3729), 'numpy.argmax', 'np.argmax', (['predicted_probs'], {'axis': '(0)'}), '(predicted_probs, axis=0)\n', (3704, 3729), True, 'import numpy as np\n'), ((2410, 2498), 'os.path.join', 'os.path.join', (['results_dir', "('log_reg_model_' + concept + '_' + classifier_id + '.sav')"], {}), "(results_dir, 'log_reg_model_' + concept + '_' + classifier_id +\n '.sav')\n", (2422, 2498), False, 'import os\n'), ((2812, 2878), 'os.path.join', 'os.path.join', (['results_dir', "('log_reg_model_' + concept + '_all.sav')"], {}), "(results_dir, 'log_reg_model_' + concept + '_all.sav')\n", (2824, 2878), False, 'import os\n'), ((3032, 3075), 're.search', 're.search', (['"""^top(?P<k>\\\\d)$"""', 'classifier_id'], {}), "('^top(?P<k>\\\\d)$', classifier_id)\n", (3041, 3075), False, 'import re\n'), ((3319, 3361), 'concept_neuron.split_train_valid_test', 'split_train_valid_test', (['X_t_', 'X_t_pos_tags'], {}), '(X_t_, X_t_pos_tags)\n', (3341, 3361), False, 'from concept_neuron import split_train_valid_test, process_sentence_pos_tags\n'), ((2547, 2570), 'os.path.exists', 'os.path.exists', (['lr_file'], {}), '(lr_file)\n', (2561, 2570), False, 'import os\n'), ((3886, 3916), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3900, 3916), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((3978, 4029), 'sklearn.metrics.precision_score', 'precision_score', (['y_true', 'y_pred'], {'average': '"""weighted"""'}), "(y_true, y_pred, average='weighted')\n", (3993, 4029), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((4088, 4136), 'sklearn.metrics.recall_score', 'recall_score', (['y_true', 'y_pred'], {'average': '"""weighted"""'}), "(y_true, y_pred, average='weighted')\n", (4100, 4136), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n')]
|
# The MIT License (MIT) # Copyright (c) 2014-2017 University of Bristol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from hyperstream.stream import StreamInstance
from hyperstream.tool import Tool, check_input_stream_count
from hyperstream.utils import MIN_DATE, get_timedelta
from datetime import datetime
class Clock(Tool):
def __init__(self, first=MIN_DATE, stride=1.0):
"""
Simple clock ticker tool
:param first: Start of the clock
:param stride: Tick stride as timedelta
"""
super(Clock, self).__init__(first=first, stride=stride)
if not isinstance(first, datetime):
raise ValueError("Expected datetime.datetime, got {}".format(first.__type__.__name__))
self._stride = get_timedelta(stride)
def message(self, interval):
return '{} running from {} to {} with stride {}s'.format(
self.__class__.__name__, str(interval.start), str(interval.end), str(self.stride))
@check_input_stream_count(0)
def _execute(self, sources, alignment_stream, interval):
if interval.start < self.first:
interval.start = self.first
n_strides = int((interval.start - self.first).total_seconds() // self._stride.total_seconds())
t = self.first + n_strides * self._stride
while t <= interval.end:
if t > interval.start:
yield StreamInstance(t, t)
t += self._stride
|
[
"hyperstream.stream.StreamInstance",
"hyperstream.utils.get_timedelta",
"hyperstream.tool.check_input_stream_count"
] |
[((2009, 2036), 'hyperstream.tool.check_input_stream_count', 'check_input_stream_count', (['(0)'], {}), '(0)\n', (2033, 2036), False, 'from hyperstream.tool import Tool, check_input_stream_count\n'), ((1786, 1807), 'hyperstream.utils.get_timedelta', 'get_timedelta', (['stride'], {}), '(stride)\n', (1799, 1807), False, 'from hyperstream.utils import MIN_DATE, get_timedelta\n'), ((2421, 2441), 'hyperstream.stream.StreamInstance', 'StreamInstance', (['t', 't'], {}), '(t, t)\n', (2435, 2441), False, 'from hyperstream.stream import StreamInstance\n')]
|
# -*- coding:utf-8 -*-
# coding=<utf8>
from django.db import models
# Модели для логирования действий пользователей с активами
class Logging(models.Model):
user = models.CharField(max_length=140)
request = models.TextField(blank = True, null = True)
goal = models.TextField(blank = True, null = True)
done = models.BooleanField(default=False)
datetime = models.DateTimeField()
def __unicode__(self):
return str(self.id)+';'.join((str(self.datetime),self.user,self.goal,str(self.done)))
|
[
"django.db.models.CharField",
"django.db.models.TextField",
"django.db.models.DateTimeField",
"django.db.models.BooleanField"
] |
[((169, 201), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(140)'}), '(max_length=140)\n', (185, 201), False, 'from django.db import models\n'), ((216, 255), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (232, 255), False, 'from django.db import models\n'), ((271, 310), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (287, 310), False, 'from django.db import models\n'), ((326, 360), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (345, 360), False, 'from django.db import models\n'), ((376, 398), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (396, 398), False, 'from django.db import models\n')]
|
from directory_constants.choices import COUNTRY_CHOICES
from django import forms
from django.conf import settings
from django.forms import Select
from django.template.loader import render_to_string
from django.utils import translation
from directory_components.forms import fields
from directory_components import helpers
__all__ = [
'CountryForm',
'DirectoryComponentsFormMixin',
'Form',
'get_country_form_initial_data',
'get_language_form_initial_data',
'LanguageForm',
]
BLANK_COUNTRY_CHOICE = [("", "Select a country")]
COUNTRIES = BLANK_COUNTRY_CHOICE + COUNTRY_CHOICES
class DirectoryComponentsFormMixin:
use_required_attribute = False
error_css_class = 'form-group-error'
def __str__(self):
return render_to_string('directory_components/form_widgets/form.html', {'form': self})
class Form(DirectoryComponentsFormMixin, forms.Form):
pass
class CountryForm(Form):
country = fields.ChoiceField(
label='Country',
widget=Select(attrs={'id': 'great-header-country-select'}),
choices=COUNTRIES
)
def get_country_form_initial_data(request):
return {
'country': helpers.get_user_country(request).upper() or None
}
class LanguageForm(forms.Form):
lang = fields.ChoiceField(
widget=Select(attrs={'id': 'great-header-language-select'}),
choices=[] # set by __init__
)
def __init__(self, language_choices=settings.LANGUAGES, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['lang'].choices = language_choices
def is_language_available(self, language_code):
language_codes = [code for code, _ in self.fields['lang'].choices]
return language_code in language_codes
def get_language_form_initial_data():
return {
'lang': translation.get_language()
}
|
[
"directory_components.helpers.get_user_country",
"django.template.loader.render_to_string",
"django.utils.translation.get_language",
"django.forms.Select"
] |
[((758, 837), 'django.template.loader.render_to_string', 'render_to_string', (['"""directory_components/form_widgets/form.html"""', "{'form': self}"], {}), "('directory_components/form_widgets/form.html', {'form': self})\n", (774, 837), False, 'from django.template.loader import render_to_string\n'), ((1821, 1847), 'django.utils.translation.get_language', 'translation.get_language', ([], {}), '()\n', (1845, 1847), False, 'from django.utils import translation\n'), ((1004, 1055), 'django.forms.Select', 'Select', ([], {'attrs': "{'id': 'great-header-country-select'}"}), "(attrs={'id': 'great-header-country-select'})\n", (1010, 1055), False, 'from django.forms import Select\n'), ((1303, 1355), 'django.forms.Select', 'Select', ([], {'attrs': "{'id': 'great-header-language-select'}"}), "(attrs={'id': 'great-header-language-select'})\n", (1309, 1355), False, 'from django.forms import Select\n'), ((1167, 1200), 'directory_components.helpers.get_user_country', 'helpers.get_user_country', (['request'], {}), '(request)\n', (1191, 1200), False, 'from directory_components import helpers\n')]
|
import json
import numpy
from bm.controllers.prediction.ModelController import predict_values_from_model
from bm.db_helper.AttributesHelper import get_features, get_model_name, get_labels
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (numpy.int_, numpy.intc, numpy.intp, numpy.int8,
numpy.int16, numpy.int32, numpy.int64, numpy.uint8,
numpy.uint16, numpy.uint32, numpy.uint64)):
return int(obj)
elif isinstance(obj, (numpy.float_, numpy.float16, numpy.float32,
numpy.float64)):
return float(obj)
elif isinstance(obj, (numpy.ndarray,)): # add this line
return obj.tolist() # add this line
return json.JSONEncoder.default(self, obj)
def predictvalues(content):
model_name = get_model_name()
features_list = get_features()
lables_list = get_labels()
testing_values = []
for i in features_list:
feature_value = str(content[i])
final_feature_value = feature_value # float(feature_value) if feature_value.isnumeric() else feature_value
testing_values.append(final_feature_value)
predicted_value = predict_values_from_model(model_name, testing_values)
# Create predicted values json object
predicted_values_json = {}
for j in range(len(predicted_value)):
for i in range(len(lables_list)):
bb = predicted_value[j][i]
predicted_values_json[lables_list[i]] = predicted_value[j][i]
# NpEncoder = NpEncoder(json.JSONEncoder)
json_data = json.dumps(predicted_values_json, cls=NpEncoder)
return json_data
def getplotiamge(content):
return 0
def getmodelfeatures():
features_list = get_features()
features_json = {}
j = 0
for i in features_list:
yy = str(i)
features_json[i] = i
j += 1
# NpEncoder = NpEncoder(json.JSONEncoder)
json_data = json.dumps(features_json, cls=NpEncoder)
return json_data
def getmodellabels():
labels_list = get_labels()
labelss_json = {}
j = 0
for i in labels_list:
yy = str(i)
labelss_json[i] = i
j += 1
# NpEncoder = NpEncoder(json.JSONEncoder)
json_data = json.dumps(labelss_json, cls=NpEncoder)
return json_data
def getmodelprofile(contents):
return 0
def nomodelfound():
no_model_found = {'no_model':'No Model found' }
json_data = json.dumps(no_model_found, cls=NpEncoder)
return json_data
|
[
"bm.db_helper.AttributesHelper.get_labels",
"bm.db_helper.AttributesHelper.get_model_name",
"json.dumps",
"bm.controllers.prediction.ModelController.predict_values_from_model",
"bm.db_helper.AttributesHelper.get_features",
"json.JSONEncoder.default"
] |
[((873, 889), 'bm.db_helper.AttributesHelper.get_model_name', 'get_model_name', ([], {}), '()\n', (887, 889), False, 'from bm.db_helper.AttributesHelper import get_features, get_model_name, get_labels\n'), ((910, 924), 'bm.db_helper.AttributesHelper.get_features', 'get_features', ([], {}), '()\n', (922, 924), False, 'from bm.db_helper.AttributesHelper import get_features, get_model_name, get_labels\n'), ((943, 955), 'bm.db_helper.AttributesHelper.get_labels', 'get_labels', ([], {}), '()\n', (953, 955), False, 'from bm.db_helper.AttributesHelper import get_features, get_model_name, get_labels\n'), ((1236, 1289), 'bm.controllers.prediction.ModelController.predict_values_from_model', 'predict_values_from_model', (['model_name', 'testing_values'], {}), '(model_name, testing_values)\n', (1261, 1289), False, 'from bm.controllers.prediction.ModelController import predict_values_from_model\n'), ((1796, 1810), 'bm.db_helper.AttributesHelper.get_features', 'get_features', ([], {}), '()\n', (1808, 1810), False, 'from bm.db_helper.AttributesHelper import get_features, get_model_name, get_labels\n'), ((1998, 2038), 'json.dumps', 'json.dumps', (['features_json'], {'cls': 'NpEncoder'}), '(features_json, cls=NpEncoder)\n', (2008, 2038), False, 'import json\n'), ((2103, 2115), 'bm.db_helper.AttributesHelper.get_labels', 'get_labels', ([], {}), '()\n', (2113, 2115), False, 'from bm.db_helper.AttributesHelper import get_features, get_model_name, get_labels\n'), ((2299, 2338), 'json.dumps', 'json.dumps', (['labelss_json'], {'cls': 'NpEncoder'}), '(labelss_json, cls=NpEncoder)\n', (2309, 2338), False, 'import json\n'), ((2496, 2537), 'json.dumps', 'json.dumps', (['no_model_found'], {'cls': 'NpEncoder'}), '(no_model_found, cls=NpEncoder)\n', (2506, 2537), False, 'import json\n'), ((790, 825), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (814, 825), False, 'import json\n'), ((1636, 1684), 'json.dumps', 'json.dumps', (['predicted_values_json'], {'cls': 'NpEncoder'}), '(predicted_values_json, cls=NpEncoder)\n', (1646, 1684), False, 'import json\n')]
|
import numpy as np
import random as rnd
import pdb
def dist(loc1,loc2):
return np.sqrt((loc1[0]-loc2[0])**2 + (loc2[1]-loc1[1])**2)
#### BUG WHEN LEN(x) != LEN(y)
class Generate_field():
def __init__(self,a,b,n,x,y,opt=''):
self.xlen=len(x)
self.ylen=len(y)
self.a = a*rnd.uniform(0.7, 1.3)
self.b = b*rnd.uniform(0.7, 1.3)
self.x = x
self.y = y
self.n = n
self.opt = opt
if type(self.n) != list or type(self.n) != tuple:
self.eddies = {'eddy_n%s' % ii:{'loc':[[rnd.randint(0,self.xlen-1),\
rnd.randint(0,self.ylen-1)]],'grow':True,\
'radius':[self.a,self.b],'angle':rnd.uniform(0, 2*np.pi),\
'amp':rnd.choice([-1,1])*rnd.uniform(0.7, 1.3)} for ii in range(self.n)}
else:
raise ValueError("No right input.")
def go_right(self,indexs,step):
return [0,step]
def go_upright(self,indexs,step):
return [step,step]
def go_up(self,indexs,step):
return [step,0]
def go_upleft(self,indexs,step):
return [step,-step]
def go_left(self,indexs,step):
return [0,-step]
def go_downleft(self,indexs,step):
return [-step,-step]
def go_down(self,indexs,step):
return [-step,0]
def go_downright(self,indexs,step):
return [-step,step]
def twoD_Gaussian(self, coords, sigma_x, sigma_y, theta, slopex=0, slopey=0, offset=0):
'''
*************** twoD_Gaussian *******************
Build a 2D gaussian.
Notes:
Remmember to do g.ravel().reshape(len(x),len(y)) for plotting purposes.
Args:
coords [x,y] (list|array): Coordinates in x and y.
amplitude (float): Amplitud of gaussian.
x0 , yo (float): Center of Gausian.
sigma_x,sigma_y (float): Deviation.
theta (Float): Orientation.
offset (Float): Gaussian Offset.
Returns:
g.ravel() (list|array) - Gaussian surface in a list.
Usage:
Check scan_eddym function.
'''
x=coords[0]
y=coords[1]
amplitude = coords[2]
xo = float(coords[3])
yo = float(coords[4])
xo = float(xo)
yo = float(yo)
if sigma_y or sigma_x != 0:
a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
g = amplitude*np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2)))
else:
g = (x-xo)*0 + (y-yo)*0
return g.ravel()
def checkposition(self,away_val=5,loc=False):
if loc == True:
eddies_loc=[[rnd.randint(0,self.xlen-1),rnd.randint(0,self.ylen-1)] for key,item in self.eddies.items()]
else:
eddies_loc=[item['loc'][-1] for key,item in self.eddies.items()]
for key1,item1 in self.eddies.items():
xc1=item1['loc'][0][0]
yc1=item1['loc'][0][1]
distance=np.array([dist([self.x[xc1],self.y[yc1]],[self.x[ii],self.y[jj]]) for ii,jj in eddies_loc])
distance[distance==0]=away_val*self.a
checker = ((distance < away_val*self.a).any() or (distance < away_val*self.b).any() ) or loc==True
count = 0
while checker or count >= 10000:
newx=rnd.randint(0,self.xlen-1)
newy=rnd.randint(0,self.ylen-1)
self.eddies[key1]['loc']=[[newx, newy]]
eddies_loc=[item['loc'][-1] for key,item in self.eddies.items()]
#pdb.set_trace()
xc1=newx
yc1=newy
distance=np.array([dist([self.x[xc1],self.y[yc1]],[self.x[ii],self.y[jj]]) for ii,jj in eddies_loc])
numzeros = [ii for ii in distance if ii == 0]
if len(numzeros) <= 1:
distance[distance==0]=np.inf
else:
distance[distance==0] = away_val*self.a
checker = ((distance < away_val*self.a).any() or (distance < away_val*self.b).any() )
count = count + 1
if loc == True:
return self.eddies
def make_random_walk(self,indexs, steps):
move_dict = {
1: self.go_up,
2: self.go_right,
3: self.go_left,
4: self.go_down,
5: self.go_downleft,
6: self.go_downright,
7: self.go_upleft,
8: self.go_upright,
}
#for _ in range(steps):
for ii in indexs:
move_in_a_direction = move_dict[rnd.randint(1, 8)]
movcood=move_in_a_direction(ii,steps)
return indexs[0]+movcood[0],indexs[1]+movcood[1]
def assemble_field(self, N,margin=50):
data=np.zeros((N,self.xlen+2*margin,self.ylen+2*margin))
for t in range(N):
#pdb.set_trace()
if self.opt == 'no_interaction' or self.opt == 'Nint':
self.eddies=self.checkposition(away_val=5,loc=True)
else:
pass
for keys, item in self.eddies.items():
gauss=self.twoD_Gaussian(self.pass_args(keys,margin),item['radius'][0], item['radius'][1], item['angle']).reshape(np.shape(data[0,:,:]))
data[t,:,:]=data[t,:,:]+gauss
return data
def reconstruct_field(self):
data=np.zeros((self.xlen,self.ylen))
for keys, item in self.eddies.items():
gauss=self.twoD_Gaussian(self.pass_args(keys),item['radius'][0], item['radius'][1], item['angle']).reshape(np.shape(data))
data=data+gauss
return data
def pass_args(self,key,margin=50):
self.x = np.linspace(min(self.x),max(self.x),self.xlen+2*margin)
self.y = np.linspace(min(self.y),max(self.y),self.ylen+2*margin)
X,Y=np.meshgrid(self.x,self.y)
if self.opt == 'interaction' or self.opt == 'int':
xloc=rnd.randint(0,self.xlen-1)+margin
yloc=rnd.randint(0,self.ylen-1)+margin
eddy_parms=(X,Y,self.eddies[key]['amp'],self.x[xloc],self.y[yloc])
else:
eddy_parms=(X,Y,self.eddies[key]['amp'],self.x[self.eddies[key]['loc'][0][0]+margin],self.y[self.eddies[key]['loc'][0][1]+margin])
return eddy_parms
|
[
"numpy.meshgrid",
"random.randint",
"random.uniform",
"numpy.zeros",
"random.choice",
"numpy.shape",
"numpy.sin",
"numpy.exp",
"numpy.cos",
"numpy.sqrt"
] |
[((88, 148), 'numpy.sqrt', 'np.sqrt', (['((loc1[0] - loc2[0]) ** 2 + (loc2[1] - loc1[1]) ** 2)'], {}), '((loc1[0] - loc2[0]) ** 2 + (loc2[1] - loc1[1]) ** 2)\n', (95, 148), True, 'import numpy as np\n'), ((5131, 5192), 'numpy.zeros', 'np.zeros', (['(N, self.xlen + 2 * margin, self.ylen + 2 * margin)'], {}), '((N, self.xlen + 2 * margin, self.ylen + 2 * margin))\n', (5139, 5192), True, 'import numpy as np\n'), ((5730, 5762), 'numpy.zeros', 'np.zeros', (['(self.xlen, self.ylen)'], {}), '((self.xlen, self.ylen))\n', (5738, 5762), True, 'import numpy as np\n'), ((6190, 6217), 'numpy.meshgrid', 'np.meshgrid', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (6201, 6217), True, 'import numpy as np\n'), ((308, 329), 'random.uniform', 'rnd.uniform', (['(0.7)', '(1.3)'], {}), '(0.7, 1.3)\n', (319, 329), True, 'import random as rnd\n'), ((349, 370), 'random.uniform', 'rnd.uniform', (['(0.7)', '(1.3)'], {}), '(0.7, 1.3)\n', (360, 370), True, 'import random as rnd\n'), ((2708, 2786), 'numpy.exp', 'np.exp', (['(-(a * (x - xo) ** 2 + 2 * b * (x - xo) * (y - yo) + c * (y - yo) ** 2))'], {}), '(-(a * (x - xo) ** 2 + 2 * b * (x - xo) * (y - yo) + c * (y - yo) ** 2))\n', (2714, 2786), True, 'import numpy as np\n'), ((3609, 3638), 'random.randint', 'rnd.randint', (['(0)', '(self.xlen - 1)'], {}), '(0, self.xlen - 1)\n', (3620, 3638), True, 'import random as rnd\n'), ((3657, 3686), 'random.randint', 'rnd.randint', (['(0)', '(self.ylen - 1)'], {}), '(0, self.ylen - 1)\n', (3668, 3686), True, 'import random as rnd\n'), ((4947, 4964), 'random.randint', 'rnd.randint', (['(1)', '(8)'], {}), '(1, 8)\n', (4958, 4964), True, 'import random as rnd\n'), ((5928, 5942), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (5936, 5942), True, 'import numpy as np\n'), ((6293, 6322), 'random.randint', 'rnd.randint', (['(0)', '(self.xlen - 1)'], {}), '(0, self.xlen - 1)\n', (6304, 6322), True, 'import random as rnd\n'), ((6344, 6373), 'random.randint', 'rnd.randint', (['(0)', '(self.ylen - 1)'], {}), '(0, self.ylen - 1)\n', (6355, 6373), True, 'import random as rnd\n'), ((715, 740), 'random.uniform', 'rnd.uniform', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (726, 740), True, 'import random as rnd\n'), ((2564, 2581), 'numpy.sin', 'np.sin', (['(2 * theta)'], {}), '(2 * theta)\n', (2570, 2581), True, 'import numpy as np\n'), ((2947, 2976), 'random.randint', 'rnd.randint', (['(0)', '(self.xlen - 1)'], {}), '(0, self.xlen - 1)\n', (2958, 2976), True, 'import random as rnd\n'), ((2974, 3003), 'random.randint', 'rnd.randint', (['(0)', '(self.ylen - 1)'], {}), '(0, self.ylen - 1)\n', (2985, 3003), True, 'import random as rnd\n'), ((5594, 5617), 'numpy.shape', 'np.shape', (['data[0, :, :]'], {}), '(data[0, :, :])\n', (5602, 5617), True, 'import numpy as np\n'), ((771, 790), 'random.choice', 'rnd.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (781, 790), True, 'import random as rnd\n'), ((790, 811), 'random.uniform', 'rnd.uniform', (['(0.7)', '(1.3)'], {}), '(0.7, 1.3)\n', (801, 811), True, 'import random as rnd\n'), ((2442, 2455), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2448, 2455), True, 'import numpy as np\n'), ((2478, 2491), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2484, 2491), True, 'import numpy as np\n'), ((2529, 2546), 'numpy.sin', 'np.sin', (['(2 * theta)'], {}), '(2 * theta)\n', (2535, 2546), True, 'import numpy as np\n'), ((2613, 2626), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2619, 2626), True, 'import numpy as np\n'), ((2649, 2662), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2655, 2662), True, 'import numpy as np\n'), ((562, 591), 'random.randint', 'rnd.randint', (['(0)', '(self.xlen - 1)'], {}), '(0, self.xlen - 1)\n', (573, 591), True, 'import random as rnd\n'), ((615, 644), 'random.randint', 'rnd.randint', (['(0)', '(self.ylen - 1)'], {}), '(0, self.ylen - 1)\n', (626, 644), True, 'import random as rnd\n')]
|
"""Functions for getting data needed to fit the models."""
import bs4
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
from tqdm import tqdm
from typing import Union
from urllib.error import HTTPError
import urllib.request, json
import os
from datetime import timedelta, date
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
JHU_FILTER_DEFAULTS = {'confirmed': 5, 'recovered': 1, 'deaths': 0}
COVIDTRACKER_FILTER_DEFAULTS = {'cum_cases': 5, 'cum_recover': 1, 'cum_deaths': 0}
US_STATE_ABBREV = {
'Alabama': 'US_AL',
'Alaska': 'US_AK',
'American Samoa': 'US_AS',
'Arizona': 'US_AZ',
'Arkansas': 'US_AR',
'California': 'US_CA',
'Colorado': 'US_CO',
'Connecticut': 'US_CT',
'Delaware': 'US_DE',
'District of Columbia': 'US_DC',
'Florida': 'US_FL',
'Georgia': 'US_GA',
'Guam': 'US_GU',
'Hawaii': 'US_HI',
'Idaho': 'US_ID',
'Illinois': 'US_IL',
'Indiana': 'US_IN',
'Iowa': 'US_IA',
'Kansas': 'US_KS',
'Kentucky': 'US_KY',
'Louisiana': 'US_LA',
'Maine': 'US_ME',
'Maryland': 'US_MD',
'Massachusetts': 'US_MA',
'Michigan': 'US_MI',
'Minnesota': 'US_MN',
'Mississippi': 'US_MS',
'Missouri': 'US_MO',
'Montana': 'US_MT',
'Nebraska': 'US_NE',
'Nevada': 'US_NV',
'New Hampshire': 'US_NH',
'New Jersey': 'US_NJ',
'New Mexico': 'US_NM',
'New York': 'US_NY',
'North Carolina': 'US_NC',
'North Dakota': 'US_ND',
'Northern Mariana Islands':'US_MP',
'Ohio': 'US_OH',
'Oklahoma': 'US_OK',
'Oregon': 'US_OR',
'Pennsylvania': 'US_PA',
'Puerto Rico': 'US_PR',
'Rhode Island': 'US_RI',
'South Carolina': 'US_SC',
'South Dakota': 'US_SD',
'Tennessee': 'US_TN',
'Texas': 'US_TX',
'Utah': 'US_UT',
'Vermont': 'US_VT',
'Virgin Islands': 'US_VI',
'Virginia': 'US_VA',
'Washington': 'US_WA',
'West Virginia': 'US_WV',
'Wisconsin': 'US_WI',
'Wyoming': 'US_WY'
}
def get_jhu(data_path: str, filter_: Union[dict, bool] = True) -> None:
"""Gets data from Johns Hopkins CSSEGIS (countries only).
https://coronavirus.jhu.edu/map.html
https://github.com/CSSEGISandData/COVID-19
Args:
data_path (str): Full path to data directory.
Returns:
None
"""
# Where JHU stores their data
url_template = ("https://raw.githubusercontent.com/CSSEGISandData/"
"COVID-19/master/csse_covid_19_data/"
"csse_covid_19_time_series/time_series_covid19_%s_%s.csv")
# Scrape the data
dfs = {}
for region in ['global', 'US']:
dfs[region] = {}
for kind in ['confirmed', 'deaths', 'recovered']:
url = url_template % (kind, region) # Create the full data URL
try:
df = pd.read_csv(url) # Download the data into a dataframe
except HTTPError:
print("Could not download data for %s, %s" % (kind, region))
else:
if region == 'global':
has_no_province = df['Province/State'].isnull()
# Whole countries only; use country name as index
df1 = df[has_no_province].set_index('Country/Region')
more_dfs = []
for country in ['China', 'Canada', 'Australia']:
if country == 'Canada' and kind in 'recovered':
continue
is_c = df['Country/Region'] == country
df2 = df[is_c].sum(axis=0, skipna=False).to_frame().T
df2['Country/Region'] = country
df2 = df2.set_index('Country/Region')
more_dfs.append(df2)
df = pd.concat([df1] + more_dfs)
elif region == 'US':
# Use state name as index
# for k, v in US_STATE_ABBREV.items(): # get US state abbrev
# if not US_STATE_ABBREV[k].startswith('US_'):
# US_STATE_ABBREV[k] = 'US_' + v # Add 'US_' to abbrev
df.replace(US_STATE_ABBREV, inplace=True)
df = df.set_index('Province_State')
df = df.groupby('Province_State').sum() # combine counties to create state level data
df = df[[x for x in df if any(year in x for year in ['20', '21'])]] # Use only data columns
# 20 or 21 signifies 2020 or 2021
dfs[region][kind] = df # Add to dictionary of dataframes
# Generate a list of countries that have "good" data,
# according to these criteria:
good_countries = get_countries(dfs['global'], filter_=filter_)
# For each "good" country,
# reformat and save that data in its own .csv file.
source = dfs['global']
for country in tqdm(good_countries, desc='Countries'): # For each country
if country in ['Diamond Princess', 'Grand Princess', 'MS Zaandam', 'Samoa',
'Vanuatu', 'Marshall Islands', 'US', 'Micronesia','Kiribati']:
print("Skipping {}".format(country))
continue
# If we have data in the downloaded JHU files for that country
if country in source['confirmed'].index:
df = pd.DataFrame(columns=['dates2', 'cum_cases', 'cum_deaths',
'cum_recover', 'new_cases',
'new_deaths', 'new_recover',
'new_uninfected'])
df['dates2'] = source['confirmed'].columns
df['dates2'] = df['dates2'].apply(fix_jhu_dates)
df['cum_cases'] = source['confirmed'].loc[country].values
df['cum_deaths'] = source['deaths'].loc[country].values
df['cum_recover'] = source['recovered'].loc[country].values
df[['new_cases', 'new_deaths', 'new_recover']] = \
df[['cum_cases', 'cum_deaths', 'cum_recover']].diff()
df['new_uninfected'] = df['new_recover'] + df['new_deaths']
try:
population = get_population_count(data_path, country)
df['population'] = population
except:
pass
# Fill NaN with 0 and convert to int
dfs[country] = df.set_index('dates2').fillna(0).astype(int)
dfs[country].to_csv(data_path / ('covidtimeseries_%s.csv' % country))
else:
print("No data for %s" % country)
source = dfs['US']
states = source['confirmed'].index.tolist()
us_recovery_data = covid_tracking_recovery(data_path)
for state in tqdm(states, desc='US States'): # For each country
if state in ['Diamond Princess', 'Grand Princess', 'MS Zaandam', 'US_AS']:
print("Skipping {}".format(state))
continue
# If we have data in the downloaded JHU files for that country
if state in source['confirmed'].index:
df = pd.DataFrame(columns=['dates2', 'cum_cases', 'cum_deaths',
'new_cases','new_deaths','new_uninfected'])
df['dates2'] = source['confirmed'].columns
df['dates2'] = df['dates2'].apply(fix_jhu_dates)
df['cum_cases'] = source['confirmed'].loc[state].values
df['cum_deaths'] = source['deaths'].loc[state].values
df[['new_cases', 'new_deaths']] = df[['cum_cases', 'cum_deaths']].diff()
# add recovery data
df.set_index('dates2', inplace=True)
df = df.merge(us_recovery_data[state], on='dates2', how='left')
df['tmp_new_recover'] = df['new_recover'].fillna(0).astype(int) # create temp new recover for
df['new_uninfected'] = df['tmp_new_recover'] + df['new_deaths'] # new uninfected calculation
df = df.fillna(-1).astype(int)
df = df.drop(['tmp_new_recover'], axis=1)
try:
population = get_population_count(data_path, state)
df['population'] = population
except:
pass
dfs[state] = df
dfs[state].to_csv(data_path /
('covidtimeseries_%s.csv' % state))
else:
print("No data for %s" % state)
def fix_jhu_dates(x):
y = datetime.strptime(x, '%m/%d/%y')
return datetime.strftime(y, '%m/%d/%y')
def fix_ct_dates(x):
return datetime.strptime(str(x), '%Y%m%d')
def get_countries(d: pd.DataFrame, filter_: Union[dict, bool] = True):
"""Get a list of countries from a global dataframe optionally passing
a quality check
Args:
d (pd.DataFrame): Data from JHU tracker (e.g. df['global]).
filter (bool, optional): Whether to filter by quality criteria.
"""
good = set(d['confirmed'].index)
if filter_ and not isinstance(filter_, dict):
filter_ = JHU_FILTER_DEFAULTS
if filter_:
for key, minimum in filter_.items():
enough = d[key].index[d[key].max(axis=1) >= minimum].tolist()
good = good.intersection(enough)
bad = set(d['confirmed'].index).difference(good)
# print("JHU data acceptable for %s" % ','.join(good))
# print("JHU data not acceptable for %s" % ','.join(bad))
return good
def get_population_count(data_path:str, roi):
""" Check if we have population count for roi and
add to timeseries df if we do.
Args:
data_path (str): Full path to data directory.
roi (str): Region.
Returns:
population (int): Population count for ROI (if exists).
"""
try: # open population file
df_pop = pd.read_csv(data_path / 'population_estimates.csv')
except:
print("Missing population_estimates.csv in data-path")
try:
population = df_pop.query('roi == "{}"'.format(roi))['population'].values
except:
print("{} population estimate not found in population_estimates.csv".format(args.roi))
return int(population)
def covid_tracking_recovery(data_path: str):
"""Gets archived US recovery data from The COVID Tracking Project.
https://covidtracking.com
Args:
data_path (str): Full path to data directory.
Returns:
ctp_dfs (dict): Dictionary containing US States (keys) and dataframes
containing dates, recovery data (values).
"""
archived_data = data_path / 'covid-tracking-project-recovery.csv'
df_raw = pd.read_csv(archived_data)
states = df_raw['state'].unique()
ctp_dfs = {}
for state in states: # For each country
source = df_raw[df_raw['state'] == state] # Only the given state
df = pd.DataFrame(columns=['dates2','cum_recover','new_recover'])
df['dates2'] = source['date'].apply(fix_ct_dates) # Convert date format
# first check if roi reports recovery data as recovered
if source['recovered'].isnull().all() == False:
df['cum_recover'] = source['recovered'].values
# check if roi reports recovery data as hospitalizedDischarged
elif source['hospitalizedDischarged'].isnull().all() == False:
df['cum_recover'] = source['hospitalizedDischarged'].values
else:
df['cum_recover'] = np.NaN
df.sort_values(by=['dates2'], inplace=True) # sort by datetime obj before converting to string
df['dates2'] = pd.to_datetime(df['dates2']).dt.strftime('%m/%d/%y') # convert dates to string
df = df.set_index('dates2') # Convert to int
df['new_recover'] = df['cum_recover'].diff()
ctp_dfs['US_'+state] = df
return ctp_dfs
def get_canada(data_path: str, filter_: Union[dict, bool] = True,
fixes: bool = False) -> None:
""" Gets data from Canada's Open Covid group for Canadian Provinces.
https://opencovid.ca/
"""
dfs = [] # we will append dfs for cases, deaths, recovered here
# URL for API call to get Province-level timeseries data starting on Jan 22 2020
url_template = 'https://api.opencovid.ca/timeseries?stat=%s&loc=prov&date=01-22-2020'
for kind in ['cases', 'mortality', 'recovered']:
url_path = url_template % kind # Create the full data URL
with urllib.request.urlopen(url_path) as url:
data = json.loads(url.read().decode())
source = pd.json_normalize(data[kind])
if kind == 'cases':
source.drop('cases', axis=1, inplace=True) # removing this column so
# we can index into date on all 3 dfs at same position
source.rename(columns={source.columns[1]: "date" }, inplace=True)
dfs.append(source)
cases = dfs[0]
deaths = dfs[1]
recovered = dfs[2]
# combine dfs
df_rawtemp = cases.merge(recovered, on=['date', 'province'], how='outer')
df_raw = df_rawtemp.merge(deaths, on=['date', 'province'], how='outer')
df_raw.fillna(0, inplace=True)
provinces = ['Alberta', 'BC', 'Manitoba', 'New Brunswick', 'NL',
'Nova Scotia', 'Nunavut', 'NWT', 'Ontario', 'PEI', 'Quebec',
'Saskatchewan', 'Yukon']
# Export timeseries data for each province
for province in tqdm(provinces, desc='Canadian Provinces'):
source = df_raw[df_raw['province'] == province] # Only the given province
df = pd.DataFrame(columns=['dates2','cum_cases', 'cum_deaths',
'cum_recover', 'new_cases',
'new_deaths', 'new_recover',
'new_uninfected'])
df['dates2'] = source['date'].apply(fix_canada_dates) # Convert date format
df['cum_cases'] = source['cumulative_cases'].values
df['cum_deaths'] = source['cumulative_deaths'].values
df['cum_recover'] = source['cumulative_recovered'].values
df[['new_cases', 'new_deaths', 'new_recover']] = \
df[['cum_cases', 'cum_deaths', 'cum_recover']].diff()
df['new_uninfected'] = df['new_recover'] + df['new_deaths']
try:
population = get_population_count(data_path, 'CA_' + province)
df['population'] = population
except:
pass
df.sort_values(by=['dates2'], inplace=True) # sort by datetime obj before converting to string
df['dates2'] = pd.to_datetime(df['dates2']).dt.strftime('%m/%d/%y') # convert dates to string
df = df.set_index('dates2').fillna(0).astype(int) # Fill NaN with 0 and convert to int
df.to_csv(data_path / ('covidtimeseries_CA_%s.csv' % province))
def fix_canada_dates(x):
return datetime.strptime(x, '%d-%m-%Y')
def get_brazil(data_path: str, filter_: Union[dict, bool] = True,
fixes: bool = False) -> None:
""" Get state-level data for Brazil.
https://github.com/wcota/covid19br (<NAME>)
"""
url = "https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-states.csv"
try:
df_raw = pd.read_csv(url)
except HTTPError:
print("Could not download state-level data for Brazil")
state_code = {'AC':'Acre', 'AL':'Alagoas', 'AM':'Amazonas', 'AP':'Amapa',
'BA':'Bahia','CE':'Ceara', 'DF':'Distrito Federal',
'ES':'Espirito Santo', 'GO':'Goias', 'MA':'Maranhao',
'MG':'Minas Gerais', 'MS':'Mato Grosso do Sul', 'MT':'Mato Grosso',
'PA':'Para', 'PB':'Paraiba', 'PE':'Pernambuco', 'PI':'Piaui',
'PR':'Parana', 'RJ':'Rio de Janeiro', 'RN':'Rio Grande do Norte',
'RO':'Rondonia', 'RR':'Roraima', 'RS':'Rio Grande do Sul',
'SC':'Santa Catarina', 'SE':'Sergipe', 'SP':'Sao Paulo', 'TO':'Tocantins'}
for state in tqdm(state_code, desc='Brazilian States'):
source = df_raw[df_raw['state'] == state] # Only the given province
df = pd.DataFrame(columns=['dates2','cum_cases', 'cum_deaths',
'cum_recover', 'new_cases',
'new_deaths', 'new_recover',
'new_uninfected'])
df['dates2'] = source['date']
df['cum_cases'] = source['totalCases'].values
df['cum_deaths'] = source['deaths'].values
df['cum_recover'] = source['recovered'].values
df['new_cases'] = source['newCases'].values
df['new_deaths'] = source['newDeaths'].values
df['new_recover'] = df['cum_recover'].diff()
df['new_uninfected'] = df['new_recover'] + df['new_deaths']
try:
roi = 'BR_' + state_code[state]
population = get_population_count(data_path, roi)
df['population'] = population
except:
print("Could not add population data for {}".format(state))
pass
df.sort_values(by=['dates2'], inplace=True) # sort by datetime obj before converting to string
df['dates2'] = pd.to_datetime(df['dates2']).dt.strftime('%m/%d/%y') # convert dates to string
df = df.set_index('dates2').fillna(0).astype(int) # Fill NaN with 0 and convert to int
df.to_csv(data_path / ('covidtimeseries_BR_%s.csv' % state_code[state]))
def get_owid_tests(data_path: str, filter_: Union[dict, bool] = True,
fixes: bool = False) -> None:
""" Get testing data from Our World In Data
https://github.com/owid/covid-19-data
Add columns cum_tests and new_tests to csvs in data_path. """
url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/testing/covid-testing-all-observations.csv'
src = pd.read_csv(url)
roi_codes = pd.read_csv(data_path / 'country_iso_codes.csv')
roi_codes_dict = pd.Series(roi_codes.Country.values,index=roi_codes['Alpha-3 code']).to_dict()
# trim down source dataframe
src_trim = pd.DataFrame(columns=['dates2','Alpha-3 code','cum_tests'])
src_trim['dates2'] = src['Date'].apply(fix_owid_dates).values # fix dates
src_trim['Alpha-3 code'] = src['ISO code'].values
src_trim['cum_tests'] = src['Cumulative total'].fillna(-1).astype(int).values
src_trim.set_index('dates2',inplace=True, drop=True)
src_rois = src_trim['Alpha-3 code'].unique()
unavailable_testing_data = [] # for appending rois that don't have testing data
for roi in roi_codes_dict:
if roi not in src_rois:
unavailable_testing_data.append(roi)
continue
if roi_codes_dict[roi] in ["US", "Marshall Islands", "Micronesia", "Samoa", "Vanuatu"]: # skipping because bad data
continue
try:
timeseries_path = data_path / ('covidtimeseries_%s.csv' % roi_codes_dict[roi])
df_timeseries = pd.read_csv(timeseries_path, index_col='dates2')
except FileNotFoundError as fnf_error:
print(fnf_error, 'Could not add OWID data.')
pass
for i in df_timeseries.columns: # Check if OWID testng data already included
if 'tests' in i:
df_timeseries.drop([i], axis=1, inplace=True) # drop so we can add new
src_roi = src_trim[src_trim['Alpha-3 code'] == roi] # filter rows that match roi
df_combined = df_timeseries.merge(src_roi[['cum_tests']], how='left', on='dates2')
df_combined['new_tests'] = df_combined['cum_tests'].diff()
df_combined.loc[df_combined['new_tests'] < 0, 'new_tests'] = -1 # Handle cases where
# cumulative counts decrease and new_tests becomes a large negative number
df_combined[['cum_tests', 'new_tests']] = df_combined[['cum_tests', 'new_tests']].fillna(-1).astype(int).values
df_combined = df_combined.loc[:, ~df_combined.columns.str.contains('^Unnamed')]
df_combined.to_csv(timeseries_path) # overwrite timeseries CSV
print("OWID global test results missing for: ")
for roi in roi_codes_dict:
if roi in unavailable_testing_data:
print(roi_codes_dict[roi], end=" ")
print("")
def get_owid_global_vaccines(data_path: str, filter_: Union[dict, bool] = True,
fixes: bool = False) -> None:
""" Get global vaccines data from Our World In Data
https://github.com/owid/covid-19-data
Add columns to global csvs in data_path. """
url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/vaccinations.csv'
src = pd.read_csv(url)
src_trim = pd.DataFrame(columns=['dates2', 'Alpha-3 code', 'cum_vaccinations', 'daily_vaccinations',
'cum_people_vaccinated', 'cum_people_fully_vaccinated'])
src_trim['dates2'] = src['date'].apply(fix_owid_dates).values # fix dates
src_trim['Alpha-3 code'] = src['iso_code'].values
src_trim['cum_vaccinations'] = src['total_vaccinations'].values
src_trim['daily_vaccinations'] = src['daily_vaccinations'].values
src_trim['cum_people_vaccinated'] = src['people_vaccinated'].values
src_trim['cum_people_fully_vaccinated'] = src['people_fully_vaccinated'].values
roi_codes = pd.read_csv(data_path / 'country_iso_codes.csv')
roi_codes_dict = pd.Series(roi_codes.Country.values,index=roi_codes['Alpha-3 code']).to_dict()
# trim down source dataframe
src_trim.set_index('dates2',inplace=True, drop=True)
src_rois = src_trim['Alpha-3 code'].unique()
unavailable_testing_data = [] # for appending rois that don't have testing data
for roi in roi_codes_dict:
if roi not in src_rois:
unavailable_testing_data.append(roi)
continue
if roi_codes_dict[roi] in ["US", "Marshall Islands", "Micronesia", "Samoa", "Vanuatu"]: # skipping because no data
continue
try:
timeseries_path = data_path / ('covidtimeseries_%s.csv' % roi_codes_dict[roi])
df_timeseries = pd.read_csv(timeseries_path, index_col='dates2')
except FileNotFoundError as fnf_error:
print(fnf_error, 'Could not add OWID global vaccines data.')
pass
for i in df_timeseries.columns: # Check if OWID testing data already included
if 'vaccin' in i:
df_timeseries.drop([i], axis=1, inplace=True) # drop so we can add new
src_roi = src_trim[src_trim['Alpha-3 code'] == roi] # filter rows that match roi
df_combined = df_timeseries.merge(src_roi[['cum_vaccinations', 'daily_vaccinations', 'cum_people_vaccinated',
'cum_people_fully_vaccinated']], how='left', on='dates2')
cum_vacc_columns = ['vaccinations', 'people_vaccinated', 'people_fully_vaccinated']
df = dummy_cumulative_new_counts(roi_codes_dict[roi], df_combined, cum_vacc_columns)
df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
df.to_csv(timeseries_path) # overwrite timeseries CSV
print("OWID global vaccine results missing for: ")
for roi in roi_codes_dict:
if roi in unavailable_testing_data:
print(roi_codes_dict[roi], end=" ")
print("")
def dummy_cumulative_new_counts(roi, df, columns: list):
""" There are cases where cum counts go missing and new counts get missed.
New counts spike when cumulative counts go to -1 for missing data and
the difference is taken between a new cumulative count and -1.
We don't want it to spike, and we don't want to miss new counts before the gap.
So create a dummy dataframe with forward filled cumulative counts and
perform new cases calculation, then merge those new cases back into dataframe.
Args:
roi (str): Region we are working with; used for print statements.
df (pd.DataFrame): DataFrame containing counts but not new counts.
columns (list): List of columns (without cum_ prefix) so create new counts for.
Returns:
df_fixed (pd.DataFrame): DataFrame containing cumulative and now new counts. """
dfs = []
df_tmp = df.copy()
df_tmp.reset_index(inplace=True)
for col in columns:
cum_col = 'cum_' + col
dummy_cum_col = 'dummy_' + cum_col
new_col = 'new_' + col
try:
start = df_tmp[df_tmp[cum_col] > 0].index.values[0]
df_ffill = df_tmp.iloc[start:]
df_ffill.set_index('dates2', drop=True, inplace=True)
df_ffill[dummy_cum_col] = df_ffill[cum_col].ffill().astype(int).values
df_ffill[new_col] = df_ffill[dummy_cum_col].diff().astype('Int64')
# If cumulative counts are missing, set new counts to -1 so they don't become 0.
df_ffill.loc[df_ffill[cum_col].isnull(), new_col] = -1
except:
print(f'No {cum_col} data to add for {roi}.')
df_ffill[new_col] = -1
df_ffill = df_ffill[~df_ffill.index.duplicated()] # fix duplication issue
dfs.append(df_ffill[new_col])
df_new = pd.concat(dfs, axis=1)
df_new = df_new.fillna(-1).astype(int)
df_fixed = df.join(df_new)
df_fixed = df_fixed.fillna(-1).astype(int)
return df_fixed
def get_owid_us_vaccines(data_path: str, filter_: Union[dict, bool] = True,
fixes: bool = False) -> None:
""" Get US vaccines data from Our World In Data
https://github.com/owid/covid-19-data
Add columns to US csvs in data_path. """
url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/us_state_vaccinations.csv'
src = pd.read_csv(url)
src_trim = pd.DataFrame(columns=['dates2', 'region', 'cum_vaccinations', 'daily_vaccinations',
'people_vaccinated', 'people_fully_vaccinated'])
src_trim['dates2'] = src['date'].apply(fix_owid_dates).values # fix dates
src_trim['region'] = src['location'].values
src_trim['cum_vaccinations'] = src['total_vaccinations'].values
src_trim['daily_vaccinations'] = src['daily_vaccinations'].values
src_trim['cum_people_vaccinated'] = src['people_vaccinated'].values
src_trim['cum_people_fully_vaccinated'] = src['people_fully_vaccinated'].values
src_trim.set_index('dates2', inplace=True, drop=True)
src_trim.replace("New York State", "New York", inplace=True) # fix NY name
src_rois = src_trim['region'].unique()
for roi in src_rois:
if roi in US_STATE_ABBREV:
try:
timeseries_path = data_path / ('covidtimeseries_%s.csv' % US_STATE_ABBREV[roi])
df_timeseries = pd.read_csv(timeseries_path, index_col='dates2')
except FileNotFoundError as fnf_error:
print(fnf_error, 'Could not add OWID vaccinations data.')
pass
for i in df_timeseries.columns: # Check if OWID vaccines data already included
if 'vaccin' in i:
df_timeseries.drop([i], axis=1, inplace=True) # drop so we can add new
src_roi = src_trim[src_trim['region'] == roi] # filter rows that match roi
df_combined = df_timeseries.merge(src_roi[['cum_vaccinations', 'daily_vaccinations', 'cum_people_vaccinated',
'cum_people_fully_vaccinated']], how='left', on='dates2')
cum_vacc_columns = ['vaccinations', 'people_vaccinated', 'people_fully_vaccinated']
df = dummy_cumulative_new_counts(US_STATE_ABBREV[roi], df_combined, cum_vacc_columns)
df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
df.to_csv(timeseries_path) # overwrite timeseries CSV
def fix_owid_dates(x):
y = datetime.strptime(x, '%Y-%m-%d')
return datetime.strftime(y, '%m/%d/%y')
def get_jhu_us_states_tests(data_path: str, filter_: Union[dict, bool] = False) -> None:
""" Scrape JHU for US State level test results. Data is stored as a collection of
CSVs per date containing states and test results.
Args:
data_path (str): Full path to data directory.
Returns:
None
"""
url_template = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/%s.csv"
# generate a list of dates for scraping
start_dt = date(2020, 4, 12) # When JHU starts reporting
end_dt = date.today()
dates = []
delta = end_dt - start_dt
delta = delta.days
for dt in daterange(start_dt, end_dt):
dates.append(dt.strftime("%m-%d-%Y"))
# cumulative tests are named 'People_Tested' for first 200 ish days
# then cumulative tests are named 'Total_Test_Results' after 200 ish days
dfs = []
for i in tqdm(dates, desc=f'Scraping {delta} days of data across all states'):
url = url_template % i
try:
df = pd.read_csv(url)
df_trim = pd.DataFrame(columns=['Province_State', 'cum_tests', 'dates2'])
df_trim['Province_State'] = df['Province_State'].values
df_trim['dates2'] = fix_jhu_testing_dates(i)
# handle cases where column is people_tested and then switches to Total_Test_Results
if 'People_Tested' in df.columns:
df_trim['cum_tests'] = df['People_Tested'].fillna(-1).astype(int).values
dfs.append(df_trim)
if 'Total_Test_Results' in df.columns:
df_trim['cum_tests'] = df['Total_Test_Results'].fillna(-1).astype(int).values
dfs.append(df_trim)
except HTTPError:
print("Could not download tests data for %s" % i)
df_combined = pd.concat(dfs)
df_combined.sort_values(by='Province_State', inplace=True)
df_combined['Date'] = pd.to_datetime(df_combined['dates2'])
rois = df_combined['Province_State'].unique()
sorted_dfs = []
for roi in rois:
df_roi = df_combined[df_combined['Province_State'] == roi]
df_roi = df_roi.sort_values(by="Date")
df_roi['new_tests'] = df_roi['cum_tests'].diff().fillna(-1).astype(int)
sorted_dfs.append(df_roi)
df_tests = pd.concat(sorted_dfs)
df_tests.reset_index(inplace=True, drop=True)
df_tests.replace(US_STATE_ABBREV, inplace=True)
df_tests.rename(columns={'Province_State': 'roi'}, inplace=True)
# now open csvs in data_path that match rois and merge on csv to add cum_test and new_tests
rois = df_tests.roi.unique().tolist()
to_remove = ['Diamond Princess', 'Grand Princess', 'Recovered']
for i in to_remove:
if i in rois:
rois.remove(i)
for roi in rois:
csv_path = data_path / f'covidtimeseries_{roi}.csv'
try:
df_timeseries = pd.read_csv(csv_path)
except:
print(f"{csv_path} not found in data path.")
try:
for i in df_timeseries.columns: # Check if testng data already included
if 'tests' in i:
df_timeseries.drop([i], axis=1, inplace=True) # drop so we can add new
df_roi_tests = df_tests[df_tests['roi'] == roi] # filter down to roi
df_result = df_timeseries.merge(df_roi_tests, on='dates2', how='left')
df_result.fillna(-1, inplace=True)
df_result.loc[df_result['new_tests'] < 0, 'new_tests'] = -1 # Handle cases where
# cumulative counts decrease and new_tests becomes a large negative number
df_result['new_tests'] = df_result['new_tests'].astype(int)
df_result[['cum_tests', 'new_tests']] = df_result[['cum_tests', 'new_tests']].astype(int)
df_result_trim = df_result[['dates2', 'cum_cases', 'new_cases',
'cum_deaths', 'new_deaths', 'cum_recover',
'new_recover', 'new_uninfected', 'cum_tests',
'new_tests', 'population']].copy()
df_result_trim = df_result_trim.loc[:, ~df_result_trim.columns.str.contains('^Unnamed')]
df_result_trim.to_csv(csv_path) # overwrite timeseries CSV
except:
print(f'Could not get tests data for {roi}.')
def daterange(date1, date2):
for n in range(int ((date2 - date1).days)+1):
yield date1 + timedelta(n)
def fix_jhu_testing_dates(x):
y = datetime.strptime(x, '%m-%d-%Y')
return datetime.strftime(y, '%m/%d/%y')
def fix_negatives(data_path: str, plot: bool = False) -> None:
"""Fix negative values in daily data.
The purpose of this script is to fix spurious negative values in new daily
numbers. For example, the cumulative total of cases should not go from N
to a value less than N on a subsequent day. This script fixes this by
nulling such data and applying a monotonic spline interpolation in between
valid days of data. This only affects a small number of regions. It
overwrites the original .csv files produced by the functions above.
Args:
data_path (str): Full path to data directory.
plot (bool): Whether to plot the changes.
Returns:
None
"""
csvs = [x for x in data_path.iterdir() if 'covidtimeseries' in str(x)]
for csv in tqdm(csvs, desc="Regions"):
roi = str(csv).split('.')[0].split('_')[-1]
df = pd.read_csv(csv)
# Exclude final day because it is often a partial count.
df = df.iloc[:-1]
df = fix_neg(df, roi, plot=plot)
df.to_csv(data_path / (csv.name.split('.')[0]+'.csv'))
def fix_neg(df: pd.DataFrame, roi: str,
columns: list = ['cases', 'deaths', 'recover'],
plot: bool = False) -> pd.DataFrame:
"""Used by `fix_negatives` to fix negatives values for a single region.
This function uses monotonic spline interpolation to make sure that
cumulative counts are non-decreasing.
Args:
df (pd.DataFrame): DataFrame containing data for one region.
roi (str): One region, e.g 'US_MI' or 'Greece'.
columns (list, optional): Columns to make non-decreasing.
Defaults to ['cases', 'deaths', 'recover'].
Returns:
pd.DataFrame: [description]
"""
for c in columns:
cum = 'cum_%s' % c
new = 'new_%s' % c
before = df[cum].copy()
non_zeros = df[df[new] > 0].index
has_negs = before.diff().min() < 0
if len(non_zeros) and has_negs:
first_non_zero = non_zeros[0]
maxx = df.loc[first_non_zero, cum].max()
# Find the bad entries and null the corresponding
# cumulative column, which are:
# 1) Cumulative columns which are zero after previously
# being non-zero
bad = df.loc[first_non_zero:, cum] == 0
df.loc[bad[bad].index, cum] = None
# 2) New daily columns which are negative
bad = df.loc[first_non_zero:, new] < 0
df.loc[bad[bad].index, cum] = None
# Protect against 0 null final value which screws up interpolator
if np.isnan(df.loc[df.index[-1], cum]):
df.loc[df.index[-1], cum] = maxx
# Then run a loop which:
while True:
# Interpolates the cumulative column nulls to have
# monotonic growth
after = df[cum].interpolate('pchip')
diff = after.diff()
if diff.min() < 0:
# If there are still negative first-differences at this
# point, increase the corresponding cumulative values by 1.
neg_index = diff[diff < 0].index
df.loc[neg_index, cum] += 1
else:
break
# Then repeat
if plot:
plt.figure()
plt.plot(df.index, before, label='raw')
plt.plot(df.index, after, label='fixed')
r = np.corrcoef(before, after)[0, 1]
plt.title("%s %s Raw vs Fixed R=%.5g" % (roi, c, r))
plt.legend()
else:
after = before
# Make sure the first differences are now all non-negative
assert after.diff().min() >= 0
# Replace the values
df[new] = df[cum].diff().fillna(0).astype(int).values
return df
def negify_missing(data_path: str) -> None:
"""Fix negative values in daily data.
The purpose of this script is to fix spurious negative values in new daily
numbers. For example, the cumulative total of cases should not go from N
to a value less than N on a subsequent day. This script fixes this by
nulling such data and applying a monotonic spline interpolation in between
valid days of data. This only affects a small number of regions. It
overwrites the original .csv files produced by the functions above.
Args:
data_path (str): Full path to data directory.
plot (bool): Whether to plot the changes.
Returns:
None
"""
csvs = [x for x in data_path.iterdir() if 'covidtimeseries' in str(x)]
for csv in tqdm(csvs, desc="Regions"):
roi = str(csv).split('.')[0].split('_')[-1]
df = pd.read_csv(csv)
for kind in ['cases', 'deaths', 'recover']:
if df['cum_%s' % kind].sum() == 0:
print("Negifying 'new_%s' for %s" % (kind, roi))
df['new_%s' % kind] = -1
out = data_path / (csv.name.split('.')[0]+'.csv')
df.to_csv(out)
def remove_old_rois(data_path: str):
"""Delete time-series files for regions no longer tracked, such as:
Diamond Princess, MS Zaandam, Samoa, Vanuatu, Marshall Islands,
US, US_AS (American Somoa)"""
csvs = [x for x in data_path.iterdir() if 'covidtimeseries' in str(x)]
rois_to_remove = ['Diamond Princess', 'Grand Princess', 'MS Zaandam', 'Samoa', 'Vanuatu',
'Marshall Islands', 'US', 'US_AS', 'Micronesia', 'Kiribati', 'Palau']
for csv in csvs:
roi = str(csv).split('.')[0].split('_', 1)[-1]
if roi in rois_to_remove:
try:
if os.path.exists(csv):
print("Removing {} from data_path".format(roi))
os.remove(csv)
except:
print("could not remove {}. Check that path is correct.".format(csv))
|
[
"matplotlib.pyplot.title",
"os.remove",
"pandas.read_csv",
"numpy.isnan",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"os.path.exists",
"datetime.timedelta",
"pandas.concat",
"datetime.datetime.strftime",
"tqdm.tqdm",
"numpy.corrcoef",
"matplotlib.pyplot.legend",
"datetime.date",
"datetime.date.today",
"datetime.datetime.strptime",
"pandas.to_datetime",
"pandas.Series",
"matplotlib.pyplot.plot",
"pandas.json_normalize"
] |
[((4997, 5035), 'tqdm.tqdm', 'tqdm', (['good_countries'], {'desc': '"""Countries"""'}), "(good_countries, desc='Countries')\n", (5001, 5035), False, 'from tqdm import tqdm\n'), ((6806, 6836), 'tqdm.tqdm', 'tqdm', (['states'], {'desc': '"""US States"""'}), "(states, desc='US States')\n", (6810, 6836), False, 'from tqdm import tqdm\n'), ((8490, 8522), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%m/%d/%y"""'], {}), "(x, '%m/%d/%y')\n", (8507, 8522), False, 'from datetime import datetime\n'), ((8534, 8566), 'datetime.datetime.strftime', 'datetime.strftime', (['y', '"""%m/%d/%y"""'], {}), "(y, '%m/%d/%y')\n", (8551, 8566), False, 'from datetime import datetime\n'), ((10644, 10670), 'pandas.read_csv', 'pd.read_csv', (['archived_data'], {}), '(archived_data)\n', (10655, 10670), True, 'import pandas as pd\n'), ((13381, 13423), 'tqdm.tqdm', 'tqdm', (['provinces'], {'desc': '"""Canadian Provinces"""'}), "(provinces, desc='Canadian Provinces')\n", (13385, 13423), False, 'from tqdm import tqdm\n'), ((14800, 14832), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%d-%m-%Y"""'], {}), "(x, '%d-%m-%Y')\n", (14817, 14832), False, 'from datetime import datetime\n'), ((15934, 15975), 'tqdm.tqdm', 'tqdm', (['state_code'], {'desc': '"""Brazilian States"""'}), "(state_code, desc='Brazilian States')\n", (15938, 15975), False, 'from tqdm import tqdm\n'), ((17807, 17823), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (17818, 17823), True, 'import pandas as pd\n'), ((17840, 17888), 'pandas.read_csv', 'pd.read_csv', (["(data_path / 'country_iso_codes.csv')"], {}), "(data_path / 'country_iso_codes.csv')\n", (17851, 17888), True, 'import pandas as pd\n'), ((18036, 18097), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dates2', 'Alpha-3 code', 'cum_tests']"}), "(columns=['dates2', 'Alpha-3 code', 'cum_tests'])\n", (18048, 18097), True, 'import pandas as pd\n'), ((20591, 20607), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (20602, 20607), True, 'import pandas as pd\n'), ((20623, 20777), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dates2', 'Alpha-3 code', 'cum_vaccinations', 'daily_vaccinations',\n 'cum_people_vaccinated', 'cum_people_fully_vaccinated']"}), "(columns=['dates2', 'Alpha-3 code', 'cum_vaccinations',\n 'daily_vaccinations', 'cum_people_vaccinated',\n 'cum_people_fully_vaccinated'])\n", (20635, 20777), True, 'import pandas as pd\n'), ((21250, 21298), 'pandas.read_csv', 'pd.read_csv', (["(data_path / 'country_iso_codes.csv')"], {}), "(data_path / 'country_iso_codes.csv')\n", (21261, 21298), True, 'import pandas as pd\n'), ((25121, 25143), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(1)'}), '(dfs, axis=1)\n', (25130, 25143), True, 'import pandas as pd\n'), ((25696, 25712), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (25707, 25712), True, 'import pandas as pd\n'), ((25728, 25864), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dates2', 'region', 'cum_vaccinations', 'daily_vaccinations',\n 'people_vaccinated', 'people_fully_vaccinated']"}), "(columns=['dates2', 'region', 'cum_vaccinations',\n 'daily_vaccinations', 'people_vaccinated', 'people_fully_vaccinated'])\n", (25740, 25864), True, 'import pandas as pd\n'), ((27801, 27833), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%Y-%m-%d"""'], {}), "(x, '%Y-%m-%d')\n", (27818, 27833), False, 'from datetime import datetime\n'), ((27845, 27877), 'datetime.datetime.strftime', 'datetime.strftime', (['y', '"""%m/%d/%y"""'], {}), "(y, '%m/%d/%y')\n", (27862, 27877), False, 'from datetime import datetime\n'), ((28435, 28452), 'datetime.date', 'date', (['(2020)', '(4)', '(12)'], {}), '(2020, 4, 12)\n', (28439, 28452), False, 'from datetime import timedelta, date\n'), ((28494, 28506), 'datetime.date.today', 'date.today', ([], {}), '()\n', (28504, 28506), False, 'from datetime import timedelta, date\n'), ((28840, 28908), 'tqdm.tqdm', 'tqdm', (['dates'], {'desc': 'f"""Scraping {delta} days of data across all states"""'}), "(dates, desc=f'Scraping {delta} days of data across all states')\n", (28844, 28908), False, 'from tqdm import tqdm\n'), ((29755, 29769), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (29764, 29769), True, 'import pandas as pd\n'), ((29859, 29896), 'pandas.to_datetime', 'pd.to_datetime', (["df_combined['dates2']"], {}), "(df_combined['dates2'])\n", (29873, 29896), True, 'import pandas as pd\n'), ((30232, 30253), 'pandas.concat', 'pd.concat', (['sorted_dfs'], {}), '(sorted_dfs)\n', (30241, 30253), True, 'import pandas as pd\n'), ((32444, 32476), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%m-%d-%Y"""'], {}), "(x, '%m-%d-%Y')\n", (32461, 32476), False, 'from datetime import datetime\n'), ((32488, 32520), 'datetime.datetime.strftime', 'datetime.strftime', (['y', '"""%m/%d/%y"""'], {}), "(y, '%m/%d/%y')\n", (32505, 32520), False, 'from datetime import datetime\n'), ((33325, 33351), 'tqdm.tqdm', 'tqdm', (['csvs'], {'desc': '"""Regions"""'}), "(csvs, desc='Regions')\n", (33329, 33351), False, 'from tqdm import tqdm\n'), ((37222, 37248), 'tqdm.tqdm', 'tqdm', (['csvs'], {'desc': '"""Regions"""'}), "(csvs, desc='Regions')\n", (37226, 37248), False, 'from tqdm import tqdm\n'), ((9845, 9896), 'pandas.read_csv', 'pd.read_csv', (["(data_path / 'population_estimates.csv')"], {}), "(data_path / 'population_estimates.csv')\n", (9856, 9896), True, 'import pandas as pd\n'), ((10856, 10918), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dates2', 'cum_recover', 'new_recover']"}), "(columns=['dates2', 'cum_recover', 'new_recover'])\n", (10868, 10918), True, 'import pandas as pd\n'), ((13521, 13659), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dates2', 'cum_cases', 'cum_deaths', 'cum_recover', 'new_cases',\n 'new_deaths', 'new_recover', 'new_uninfected']"}), "(columns=['dates2', 'cum_cases', 'cum_deaths', 'cum_recover',\n 'new_cases', 'new_deaths', 'new_recover', 'new_uninfected'])\n", (13533, 13659), True, 'import pandas as pd\n'), ((15172, 15188), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (15183, 15188), True, 'import pandas as pd\n'), ((16067, 16205), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dates2', 'cum_cases', 'cum_deaths', 'cum_recover', 'new_cases',\n 'new_deaths', 'new_recover', 'new_uninfected']"}), "(columns=['dates2', 'cum_cases', 'cum_deaths', 'cum_recover',\n 'new_cases', 'new_deaths', 'new_recover', 'new_uninfected'])\n", (16079, 16205), True, 'import pandas as pd\n'), ((33418, 33434), 'pandas.read_csv', 'pd.read_csv', (['csv'], {}), '(csv)\n', (33429, 33434), True, 'import pandas as pd\n'), ((37315, 37331), 'pandas.read_csv', 'pd.read_csv', (['csv'], {}), '(csv)\n', (37326, 37331), True, 'import pandas as pd\n'), ((5434, 5572), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dates2', 'cum_cases', 'cum_deaths', 'cum_recover', 'new_cases',\n 'new_deaths', 'new_recover', 'new_uninfected']"}), "(columns=['dates2', 'cum_cases', 'cum_deaths', 'cum_recover',\n 'new_cases', 'new_deaths', 'new_recover', 'new_uninfected'])\n", (5446, 5572), True, 'import pandas as pd\n'), ((7144, 7252), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dates2', 'cum_cases', 'cum_deaths', 'new_cases', 'new_deaths',\n 'new_uninfected']"}), "(columns=['dates2', 'cum_cases', 'cum_deaths', 'new_cases',\n 'new_deaths', 'new_uninfected'])\n", (7156, 7252), True, 'import pandas as pd\n'), ((12529, 12558), 'pandas.json_normalize', 'pd.json_normalize', (['data[kind]'], {}), '(data[kind])\n', (12546, 12558), True, 'import pandas as pd\n'), ((17910, 17978), 'pandas.Series', 'pd.Series', (['roi_codes.Country.values'], {'index': "roi_codes['Alpha-3 code']"}), "(roi_codes.Country.values, index=roi_codes['Alpha-3 code'])\n", (17919, 17978), True, 'import pandas as pd\n'), ((18912, 18960), 'pandas.read_csv', 'pd.read_csv', (['timeseries_path'], {'index_col': '"""dates2"""'}), "(timeseries_path, index_col='dates2')\n", (18923, 18960), True, 'import pandas as pd\n'), ((21320, 21388), 'pandas.Series', 'pd.Series', (['roi_codes.Country.values'], {'index': "roi_codes['Alpha-3 code']"}), "(roi_codes.Country.values, index=roi_codes['Alpha-3 code'])\n", (21329, 21388), True, 'import pandas as pd\n'), ((22033, 22081), 'pandas.read_csv', 'pd.read_csv', (['timeseries_path'], {'index_col': '"""dates2"""'}), "(timeseries_path, index_col='dates2')\n", (22044, 22081), True, 'import pandas as pd\n'), ((28971, 28987), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (28982, 28987), True, 'import pandas as pd\n'), ((29010, 29073), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Province_State', 'cum_tests', 'dates2']"}), "(columns=['Province_State', 'cum_tests', 'dates2'])\n", (29022, 29073), True, 'import pandas as pd\n'), ((30828, 30849), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (30839, 30849), True, 'import pandas as pd\n'), ((35162, 35197), 'numpy.isnan', 'np.isnan', (['df.loc[df.index[-1], cum]'], {}), '(df.loc[df.index[-1], cum])\n', (35170, 35197), True, 'import numpy as np\n'), ((2895, 2911), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (2906, 2911), True, 'import pandas as pd\n'), ((26704, 26752), 'pandas.read_csv', 'pd.read_csv', (['timeseries_path'], {'index_col': '"""dates2"""'}), "(timeseries_path, index_col='dates2')\n", (26715, 26752), True, 'import pandas as pd\n'), ((32392, 32404), 'datetime.timedelta', 'timedelta', (['n'], {}), '(n)\n', (32401, 32404), False, 'from datetime import timedelta, date\n'), ((35907, 35919), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (35917, 35919), True, 'import matplotlib.pyplot as plt\n'), ((35936, 35975), 'matplotlib.pyplot.plot', 'plt.plot', (['df.index', 'before'], {'label': '"""raw"""'}), "(df.index, before, label='raw')\n", (35944, 35975), True, 'import matplotlib.pyplot as plt\n'), ((35992, 36032), 'matplotlib.pyplot.plot', 'plt.plot', (['df.index', 'after'], {'label': '"""fixed"""'}), "(df.index, after, label='fixed')\n", (36000, 36032), True, 'import matplotlib.pyplot as plt\n'), ((36102, 36154), 'matplotlib.pyplot.title', 'plt.title', (["('%s %s Raw vs Fixed R=%.5g' % (roi, c, r))"], {}), "('%s %s Raw vs Fixed R=%.5g' % (roi, c, r))\n", (36111, 36154), True, 'import matplotlib.pyplot as plt\n'), ((36171, 36183), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (36181, 36183), True, 'import matplotlib.pyplot as plt\n'), ((38242, 38261), 'os.path.exists', 'os.path.exists', (['csv'], {}), '(csv)\n', (38256, 38261), False, 'import os\n'), ((3867, 3894), 'pandas.concat', 'pd.concat', (['([df1] + more_dfs)'], {}), '([df1] + more_dfs)\n', (3876, 3894), True, 'import pandas as pd\n'), ((11570, 11598), 'pandas.to_datetime', 'pd.to_datetime', (["df['dates2']"], {}), "(df['dates2'])\n", (11584, 11598), True, 'import pandas as pd\n'), ((14517, 14545), 'pandas.to_datetime', 'pd.to_datetime', (["df['dates2']"], {}), "(df['dates2'])\n", (14531, 14545), True, 'import pandas as pd\n'), ((17126, 17154), 'pandas.to_datetime', 'pd.to_datetime', (["df['dates2']"], {}), "(df['dates2'])\n", (17140, 17154), True, 'import pandas as pd\n'), ((36053, 36079), 'numpy.corrcoef', 'np.corrcoef', (['before', 'after'], {}), '(before, after)\n', (36064, 36079), True, 'import numpy as np\n'), ((38351, 38365), 'os.remove', 'os.remove', (['csv'], {}), '(csv)\n', (38360, 38365), False, 'import os\n')]
|
import torch.multiprocessing as multiprocessing
import threading
from torch.utils.data import _utils
import torch
import random
import sys
from torch._six import queue
import os
from torch.utils.data._utils import collate, signal_handling, MP_STATUS_CHECK_INTERVAL, \
ExceptionWrapper, IS_WINDOWS
if IS_WINDOWS:
import ctypes
from ctypes.wintypes import DWORD, BOOL, HANDLE
# On Windows, the parent ID of the worker process remains unchanged when the manager process
# is gone, and the only way to check it through OS is to let the worker have a process handle
# of the manager and ask if the process status has changed.
class ManagerWatchdog(object):
def __init__(self):
self.manager_pid = os.getppid()
self.kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
self.kernel32.OpenProcess.argtypes = (DWORD, BOOL, DWORD)
self.kernel32.OpenProcess.restype = HANDLE
self.kernel32.WaitForSingleObject.argtypes = (HANDLE, DWORD)
self.kernel32.WaitForSingleObject.restype = DWORD
# Value obtained from https://msdn.microsoft.com/en-us/library/ms684880.aspx
SYNCHRONIZE = 0x00100000
self.manager_handle = self.kernel32.OpenProcess(SYNCHRONIZE, 0, self.manager_pid)
if not self.manager_handle:
raise ctypes.WinError(ctypes.get_last_error())
self.manager_dead = False
def is_alive(self):
if not self.manager_dead:
# Value obtained from https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032.aspx
self.manager_dead = self.kernel32.WaitForSingleObject(self.manager_handle, 0) == 0
return not self.manager_dead
else:
class ManagerWatchdog(object):
def __init__(self):
self.manager_pid = os.getppid()
self.manager_dead = False
def is_alive(self):
if not self.manager_dead:
self.manager_dead = os.getppid() != self.manager_pid
return not self.manager_dead
def _worker_loop(dataset, index_queue, data_queue, done_event, collate_fn, seed, init_fn, worker_id):
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the
# logic of this function.
try:
collate._use_shared_memory = True
# Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal
# module's handlers are executed after Python returns from C low-level
# handlers, likely when the same fatal signal had already happened
# again.
# https://docs.python.org/3/library/signal.html#execution-of-python-signal-handlers
signal_handling._set_worker_signal_handlers()
torch.set_num_threads(1)
random.seed(seed)
torch.manual_seed(seed)
data_queue.cancel_join_thread()
if init_fn is not None:
init_fn(worker_id)
watchdog = ManagerWatchdog()
while watchdog.is_alive():
try:
r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
continue
if r is None:
# Received the final signal
assert done_event.is_set()
return
elif done_event.is_set():
# Done event is set. But I haven't received the final signal
# (None) yet. I will keep continuing until get it, and skip the
# processing steps.
continue
idx, batch_indices = r
try:
dataset.__before_hook__()
samples = collate_fn([dataset[i] for i in batch_indices])
dataset.__after_hook__()
except Exception:
# It is important that we don't store exc_info in a variable,
# see NOTE [ Python Traceback Reference Cycle Problem ]
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
del samples
except KeyboardInterrupt:
# Main process will raise KeyboardInterrupt anyways.
pass
# Balanced batch sampler and online train loader
class HookDataloderIter(object):
def __init__(self, loader):
self.dataset = loader.dataset
self.collate_fn = loader.collate_fn
self.batch_sampler = loader.batch_sampler
self.num_workers = loader.num_workers
self.pin_memory = loader.pin_memory and torch.cuda.is_available()
self.timeout = loader.timeout
self.sample_iter = iter(self.batch_sampler)
base_seed = torch.LongTensor(1).random_().item()
if self.num_workers > 0:
self.worker_init_fn = loader.worker_init_fn
self.worker_queue_idx = 0
self.worker_result_queue = multiprocessing.Queue()
self.batches_outstanding = 0
self.worker_pids_set = False
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
self.done_event = multiprocessing.Event()
self.index_queues = []
self.workers = []
for i in range(self.num_workers):
index_queue = multiprocessing.Queue()
index_queue.cancel_join_thread()
w = multiprocessing.Process(
target=_worker_loop,
args=(self.dataset, index_queue,
self.worker_result_queue, self.done_event,
self.collate_fn, base_seed + i,
self.worker_init_fn, i))
w.daemon = True
# NB: Process.start() actually take some time as it needs to
# start a process and pass the arguments over via a pipe.
# Therefore, we only add a worker to self.workers list after
# it started, so that we do not call .join() if program dies
# before it starts, and __del__ tries to join but will get:
# AssertionError: can only join a started process.
w.start()
self.index_queues.append(index_queue)
self.workers.append(w)
if self.pin_memory:
self.data_queue = queue.Queue()
pin_memory_thread = threading.Thread(
target=_utils.pin_memory._pin_memory_loop,
args=(self.worker_result_queue, self.data_queue,
torch.cuda.current_device(), self.done_event))
pin_memory_thread.daemon = True
pin_memory_thread.start()
# Similar to workers (see comment above), we only register
# pin_memory_thread once it is started.
self.pin_memory_thread = pin_memory_thread
else:
self.data_queue = self.worker_result_queue
_utils.signal_handling._set_worker_pids(id(self), tuple(w.pid for w in self.workers))
_utils.signal_handling._set_SIGCHLD_handler()
self.worker_pids_set = True
# prime the prefetch loop
for _ in range(2 * self.num_workers):
self._put_indices()
def __len__(self):
return len(self.batch_sampler)
def _try_get_batch(self, timeout=_utils.MP_STATUS_CHECK_INTERVAL):
# Tries to fetch data from `data_queue` for a given timeout. This can
# also be used as inner loop of fetching without timeout, with the
# sender status as the loop condition.
#
# This raises a `RuntimeError` if any worker died expectedly. This error
# can come from either the SIGCHLD handler in `_utils/signal_handling.py`
# (only for non-Windows platforms), or the manual check below on errors
# and timeouts.
#
# Returns a 2-tuple:
# (bool: whether successfully get data, any: data if successful else None)
try:
data = self.data_queue.get(timeout=timeout)
return (True, data)
except Exception as e:
# At timeout and error, we manually check whether any worker has
# failed. Note that this is the only mechanism for Windows to detect
# worker failures.
if not all(w.is_alive() for w in self.workers):
pids_str = ', '.join(str(w.pid) for w in self.workers if not w.is_alive())
raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str))
if isinstance(e, queue.Empty):
return (False, None)
raise
def _get_batch(self):
# Fetches data from `self.data_queue`.
#
# We check workers' status every `MP_STATUS_CHECK_INTERVAL` seconds,
# which we achieve by running `self._try_get_batch(timeout=MP_STATUS_CHECK_INTERVAL)`
# in a loop. This is the only mechanism to detect worker failures for
# Windows. For other platforms, a SIGCHLD handler is also used for
# worker failure detection.
#
# If `pin_memory=True`, we also need check if `pin_memory_thread` had
# died at timeouts.
if self.timeout > 0:
success, data = self._try_get_batch(self.timeout)
if success:
return data
else:
raise RuntimeError('DataLoader timed out after {} seconds'.format(self.timeout))
elif self.pin_memory:
while self.pin_memory_thread.is_alive():
success, data = self._try_get_batch()
if success:
return data
else:
# while condition is false, i.e., pin_memory_thread died.
raise RuntimeError('Pin memory thread exited unexpectedly')
# In this case, `self.data_queue` is a `queue.Queue`,. But we don't
# need to call `.task_done()` because we don't use `.join()`.
else:
while True:
success, data = self._try_get_batch()
if success:
return data
def __next__(self):
if self.num_workers == 0: # same-process loading
indices = next(self.sample_iter) # may raise StopIteration
batch = self.collate_fn([self.dataset[i] for i in indices])
if self.pin_memory:
batch = _utils.pin_memory.pin_memory_batch(batch)
return batch
# check if the next sample has already been generated
if self.rcvd_idx in self.reorder_dict:
batch = self.reorder_dict.pop(self.rcvd_idx)
return self._process_next_batch(batch)
if self.batches_outstanding == 0:
self._shutdown_workers()
raise StopIteration
while True:
assert (not self.shutdown and self.batches_outstanding > 0)
idx, batch = self._get_batch()
self.batches_outstanding -= 1
if idx != self.rcvd_idx:
# store out-of-order samples
self.reorder_dict[idx] = batch
continue
return self._process_next_batch(batch)
next = __next__ # Python 2 compatibility
def __iter__(self):
return self
def _put_indices(self):
assert self.batches_outstanding < 2 * self.num_workers
indices = next(self.sample_iter, None)
if indices is None:
return
self.index_queues[self.worker_queue_idx].put((self.send_idx, indices))
self.worker_queue_idx = (self.worker_queue_idx + 1) % self.num_workers
self.batches_outstanding += 1
self.send_idx += 1
def _process_next_batch(self, batch):
self.rcvd_idx += 1
self._put_indices()
if isinstance(batch, _utils.ExceptionWrapper):
# make multiline KeyError msg readable by working around
# a python bug https://bugs.python.org/issue2651
if batch.exc_type == KeyError and "\n" in batch.exc_msg:
raise Exception("KeyError:" + batch.exc_msg)
else:
raise batch.exc_type(batch.exc_msg)
return batch
def __getstate__(self):
# TODO: add limited pickling support for sharing an iterator
# across multiple threads for HOGWILD.
# Probably the best way to do this is by moving the sample pushing
# to a separate thread and then just sharing the data queue
# but signalling the end is tricky without a non-blocking API
raise NotImplementedError("_DataLoaderIter cannot be pickled")
def _shutdown_workers(self):
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on
# the logic of this function.
python_exit_status = _utils.python_exit_status
if python_exit_status is True or python_exit_status is None:
# See (2) of the note. If Python is shutting down, do no-op.
return
# Normal exit when last reference is gone / iterator is depleted.
# See (1) and the second half of the note.
if not self.shutdown:
self.shutdown = True
try:
self.done_event.set()
# Exit `pin_memory_thread` first because exiting workers may leave
# corrupted data in `worker_result_queue` which `pin_memory_thread`
# reads from.
if hasattr(self, 'pin_memory_thread'):
# Use hasattr in case error happens before we set the attribute.
# First time do `worker_result_queue.put` in this process.
# `cancel_join_thread` in case that `pin_memory_thread` exited.
self.worker_result_queue.cancel_join_thread()
self.worker_result_queue.put(None)
self.pin_memory_thread.join()
# Indicate that no more data will be put on this queue by the
# current process. This **must** be called after
# `pin_memory_thread` is joined because that thread shares the
# same pipe handles with this loader thread. If the handle is
# closed, Py3 will error in this case, but Py2 will just time
# out even if there is data in the queue.
self.worker_result_queue.close()
# Exit workers now.
for q in self.index_queues:
q.put(None)
# Indicate that no more data will be put on this queue by the
# current process.
q.close()
for w in self.workers:
w.join()
finally:
# Even though all this function does is putting into queues that
# we have called `cancel_join_thread` on, weird things can
# happen when a worker is killed by a signal, e.g., hanging in
# `Event.set()`. So we need to guard this with SIGCHLD handler,
# and remove pids from the C side data structure only at the
# end.
#
# FIXME: Unfortunately, for Windows, we are missing a worker
# error detection mechanism here in this function, as it
# doesn't provide a SIGCHLD handler.
if self.worker_pids_set:
_utils.signal_handling._remove_worker_pids(id(self))
self.worker_pids_set = False
def __del__(self):
if self.num_workers > 0:
self._shutdown_workers()
class HookDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None,
batch_sampler=None, num_workers=0, collate_fn=torch.utils.data.dataloader.default_collate,
pin_memory=False, drop_last=False, timeout=0,
worker_init_fn=None):
torch.utils.data.DataLoader.__init__(self, dataset=dataset, batch_size=batch_size, shuffle=shuffle,
sampler=sampler,
batch_sampler=batch_sampler, num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=pin_memory, drop_last=drop_last, timeout=timeout,
worker_init_fn=worker_init_fn)
def __iter__(self):
return HookDataloderIter(self)
class HookDataset(object):
def __before_hook__(self):
pass
def __after_hook__(self):
pass
|
[
"ctypes.WinDLL",
"torch.utils.data._utils.signal_handling._set_SIGCHLD_handler",
"torch.LongTensor",
"os.getppid",
"torch.manual_seed",
"torch.utils.data._utils.signal_handling._set_worker_signal_handlers",
"torch._six.queue.Queue",
"torch.set_num_threads",
"random.seed",
"torch.cuda.is_available",
"torch.multiprocessing.Queue",
"torch.multiprocessing.Event",
"torch.utils.data.DataLoader.__init__",
"torch.multiprocessing.Process",
"torch.utils.data._utils.pin_memory.pin_memory_batch",
"ctypes.get_last_error",
"sys.exc_info",
"torch.cuda.current_device"
] |
[((2719, 2764), 'torch.utils.data._utils.signal_handling._set_worker_signal_handlers', 'signal_handling._set_worker_signal_handlers', ([], {}), '()\n', (2762, 2764), False, 'from torch.utils.data._utils import collate, signal_handling, MP_STATUS_CHECK_INTERVAL, ExceptionWrapper, IS_WINDOWS\n'), ((2774, 2798), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (2795, 2798), False, 'import torch\n'), ((2807, 2824), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2818, 2824), False, 'import random\n'), ((2833, 2856), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2850, 2856), False, 'import torch\n'), ((16174, 16477), 'torch.utils.data.DataLoader.__init__', 'torch.utils.data.DataLoader.__init__', (['self'], {'dataset': 'dataset', 'batch_size': 'batch_size', 'shuffle': 'shuffle', 'sampler': 'sampler', 'batch_sampler': 'batch_sampler', 'num_workers': 'num_workers', 'collate_fn': 'collate_fn', 'pin_memory': 'pin_memory', 'drop_last': 'drop_last', 'timeout': 'timeout', 'worker_init_fn': 'worker_init_fn'}), '(self, dataset=dataset, batch_size=\n batch_size, shuffle=shuffle, sampler=sampler, batch_sampler=\n batch_sampler, num_workers=num_workers, collate_fn=collate_fn,\n pin_memory=pin_memory, drop_last=drop_last, timeout=timeout,\n worker_init_fn=worker_init_fn)\n', (16210, 16477), False, 'import torch\n'), ((742, 754), 'os.getppid', 'os.getppid', ([], {}), '()\n', (752, 754), False, 'import os\n'), ((784, 830), 'ctypes.WinDLL', 'ctypes.WinDLL', (['"""kernel32"""'], {'use_last_error': '(True)'}), "('kernel32', use_last_error=True)\n", (797, 830), False, 'import ctypes\n'), ((1871, 1883), 'os.getppid', 'os.getppid', ([], {}), '()\n', (1881, 1883), False, 'import os\n'), ((4572, 4597), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4595, 4597), False, 'import torch\n'), ((4914, 4937), 'torch.multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (4935, 4937), True, 'import torch.multiprocessing as multiprocessing\n'), ((5179, 5202), 'torch.multiprocessing.Event', 'multiprocessing.Event', ([], {}), '()\n', (5200, 5202), True, 'import torch.multiprocessing as multiprocessing\n'), ((7162, 7207), 'torch.utils.data._utils.signal_handling._set_SIGCHLD_handler', '_utils.signal_handling._set_SIGCHLD_handler', ([], {}), '()\n', (7205, 7207), False, 'from torch.utils.data import _utils\n'), ((5345, 5368), 'torch.multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (5366, 5368), True, 'import torch.multiprocessing as multiprocessing\n'), ((5438, 5623), 'torch.multiprocessing.Process', 'multiprocessing.Process', ([], {'target': '_worker_loop', 'args': '(self.dataset, index_queue, self.worker_result_queue, self.done_event, self\n .collate_fn, base_seed + i, self.worker_init_fn, i)'}), '(target=_worker_loop, args=(self.dataset,\n index_queue, self.worker_result_queue, self.done_event, self.collate_fn,\n base_seed + i, self.worker_init_fn, i))\n', (5461, 5623), True, 'import torch.multiprocessing as multiprocessing\n'), ((6421, 6434), 'torch._six.queue.Queue', 'queue.Queue', ([], {}), '()\n', (6432, 6434), False, 'from torch._six import queue\n'), ((10556, 10597), 'torch.utils.data._utils.pin_memory.pin_memory_batch', '_utils.pin_memory.pin_memory_batch', (['batch'], {}), '(batch)\n', (10590, 10597), False, 'from torch.utils.data import _utils\n'), ((1391, 1414), 'ctypes.get_last_error', 'ctypes.get_last_error', ([], {}), '()\n', (1412, 1414), False, 'import ctypes\n'), ((2025, 2037), 'os.getppid', 'os.getppid', ([], {}), '()\n', (2035, 2037), False, 'import os\n'), ((4710, 4729), 'torch.LongTensor', 'torch.LongTensor', (['(1)'], {}), '(1)\n', (4726, 4729), False, 'import torch\n'), ((6647, 6674), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (6672, 6674), False, 'import torch\n'), ((4015, 4029), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4027, 4029), False, 'import sys\n')]
|
from ctypes import *
from athena import ndarray
from athena.stream import *
import numpy as np
from enum import Enum
import os
def _load_nccl_lib():
"""Load libary in build/lib."""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
lib_path = os.path.join(curr_path, '../../../build/lib/')
path_to_so_file = os.path.join(lib_path, "lib_mpi_nccl_runtime_api.so")
lib = CDLL(path_to_so_file, RTLD_GLOBAL)
return lib
lib_mpi_nccl = _load_nccl_lib()
# lib_mpi_nccl = CDLL("./lib_mpi_nccl_runtime_api.so", RTLD_GLOBAL)
class ncclDataType_t(Enum):
ncclInt8 = 0
ncclChar = 0
ncclUint8 = 1
ncclInt32 = 2
ncclInt = 2
ncclUint32 = 3
ncclInt64 = 4
ncclUint64 = 5
ncclFloat16 = 6
ncclHalf = 6
ncclFloat32 = 7
ncclFloat = 7
ncclFloat64 = 8
ncclDouble = 8
ncclNumTypes = 9
class ncclRedOp_t(Enum):
ncclSum = 0
ncclProd = 1
ncclMax = 2
ncclMin = 3
ncclNumOps = 4
class ncclUniqueId(Structure):
_fields_=[("internal", (c_int8 * 128))]
class MPI_NCCL_Communicator():
def __init__(self, stream = None):
'''
mpicomm: the MPI communicator, to use in MPI_Bcast, MPI_Reduce, MPI_Scatter, etc
ncclcomm: the NCCL communicator, to use in ncclAllReduce ...
nRanks: the total number of MPI threads
myRanks: the rank in all MPI threads
localRank: the rank among the MPI threads in this device
ncclId: ncclGetUniqueId should be called once when creating a communicator
and the Id should be distributed to all ranks in the communicator before calling ncclCommInitRank.
stream: the stream for NCCL communication
'''
self.mpicomm = c_int64(0)
self.ncclcomm = c_int64(0)
self.nRanks = c_int32(0)
self.myRank = c_int32(0)
self.localRank = c_int32(-1)
self.ncclId = ncclUniqueId()
self.device_id = c_int(0)
self.MPI_Init()
self.MPIGetComm()
self.MPI_Comm_rank()
self.MPI_Comm_size()
self.getLocalRank()
self.device_id.value = self.localRank.value
if stream == None:
self.stream = create_stream_handle(ndarray.gpu(self.device_id.value))
else:
self.stream = stream
def MPI_Init(self):
lib_mpi_nccl.MPIInit()
def MPI_Finalize(self):
lib_mpi_nccl.MPIFinalize()
def MPIGetComm(self):
lib_mpi_nccl.MPIGetComm(ctypes.byref(self.mpicomm))
def MPI_Comm_rank(self):
lib_mpi_nccl.getMPICommRank(ctypes.byref(self.mpicomm), ctypes.byref(self.myRank))
def MPI_Comm_size(self):
lib_mpi_nccl.getMPICommSize(ctypes.byref(self.mpicomm), ctypes.byref(self.nRanks))
def getLocalRank(self):
lib_mpi_nccl.getLocalRank(ctypes.byref(self.mpicomm), self.nRanks, self.myRank, ctypes.byref(self.localRank))
def ncclGetUniqueId(self):
lib_mpi_nccl.getNcclUniqueId(ctypes.byref(self.ncclId), self.mpicomm, self.localRank)
def dlarrayNcclAllReduce(self, dlarray, datatype, reduceop, executor_stream = None):
lib_mpi_nccl.dlarrayAllReduce(dlarray.handle, c_int(datatype.value), c_int(reduceop.value), self.ncclcomm, executor_stream.handle if executor_stream else self.stream.handle)
def dlarrayBroadcast(self, dlarray, datatype, root, executor_stream = None):
lib_mpi_nccl.dlarrayBroadcast(dlarray.handle, c_int(datatype.value), c_int(root), self.ncclcomm, executor_stream.handle if executor_stream else self.stream.handle)
def dlarrayAllGather(self, input_arr, output_arr, datatype, executor_stream = None):
lib_mpi_nccl.dlarrayAllGather(input_arr.handle, output_arr.handle, c_int(datatype.value), self.ncclcomm, executor_stream.handle if executor_stream else self.stream.handle)
def dlarraySend(self, arr, datatype, target, executor_stream = None):
lib_mpi_nccl.dlarraySend(arr.handle, c_int(datatype.value), c_int(target), self.ncclcomm, executor_stream.handle if executor_stream else self.stream.handle)
def dlarrayRecv(self, arr, datatype, src, executor_stream = None):
lib_mpi_nccl.dlarrayRecv(arr.handle, c_int(datatype.value), c_int(src), self.ncclcomm, executor_stream.handle if executor_stream else self.stream.handle)
def ncclCommInitRank(self):
'''
Use partial AllReduce to change here.
self.nRanks is the number of threads to use ncclallreduce
self.myRank is the rank among these threads. the value must in [0, self.nRank - 1]
'''
lib_mpi_nccl.initNcclCommRank(ctypes.byref(self.ncclcomm), self.nRanks, ctypes.byref(self.ncclId), self.myRank, self.localRank)
def ncclCommDestroy(self):
lib_mpi_nccl.commDestroyNccl(ctypes.byref(self.ncclcomm))
def ncclSetDevice(self, device_id):
self.device_id.value = device_id
lib_mpi_nccl.setDevice(self.device_id.value)
def ncclInit(self):
self.ncclSetDevice(self.device_id.value)
self.ncclGetUniqueId()
self.ncclCommInitRank()
def ncclFinish(self):
self.MPI_Finalize()
def mpi_nccl_communicator():
'''
'''
return MPI_NCCL_Communicator()
# NCCL_DEBUG=INFO mpirun --allow-run-as-root -np 4 python mpi_nccl_comm.py
if __name__ == "__main__":
t = mpi_nccl_communicator()
t.ncclInit()
arr = np.ones(16)*t.localRank.value
print("before: = ", arr)
arr = ndarray.array(arr, ctx = ndarray.gpu(t.device_id.value))
output_arr = np.zeros(16 * t.nRanks.value)
output_arr = ndarray.array(output_arr, ctx = ndarray.gpu(t.device_id.value))
t.dlarrayNcclAllReduce(arr, ncclDataType_t.ncclFloat32, ncclRedOp_t.ncclSum)
# t.dlarrayBroadcast(arr, ncclDataType_t.ncclFloat32, 0)
# t.dlarrayAllGather(arr, output_arr, ncclDataType_t.ncclFloat32)
print("after: = ", arr.asnumpy())
t.ncclFinish()
|
[
"os.path.expanduser",
"athena.ndarray.gpu",
"numpy.zeros",
"numpy.ones",
"os.path.join"
] |
[((280, 326), 'os.path.join', 'os.path.join', (['curr_path', '"""../../../build/lib/"""'], {}), "(curr_path, '../../../build/lib/')\n", (292, 326), False, 'import os\n'), ((349, 402), 'os.path.join', 'os.path.join', (['lib_path', '"""lib_mpi_nccl_runtime_api.so"""'], {}), "(lib_path, 'lib_mpi_nccl_runtime_api.so')\n", (361, 402), False, 'import os\n'), ((5693, 5722), 'numpy.zeros', 'np.zeros', (['(16 * t.nRanks.value)'], {}), '(16 * t.nRanks.value)\n', (5701, 5722), True, 'import numpy as np\n'), ((5550, 5561), 'numpy.ones', 'np.ones', (['(16)'], {}), '(16)\n', (5557, 5561), True, 'import numpy as np\n'), ((234, 262), 'os.path.expanduser', 'os.path.expanduser', (['__file__'], {}), '(__file__)\n', (252, 262), False, 'import os\n'), ((5644, 5674), 'athena.ndarray.gpu', 'ndarray.gpu', (['t.device_id.value'], {}), '(t.device_id.value)\n', (5655, 5674), False, 'from athena import ndarray\n'), ((5773, 5803), 'athena.ndarray.gpu', 'ndarray.gpu', (['t.device_id.value'], {}), '(t.device_id.value)\n', (5784, 5803), False, 'from athena import ndarray\n'), ((2369, 2402), 'athena.ndarray.gpu', 'ndarray.gpu', (['self.device_id.value'], {}), '(self.device_id.value)\n', (2380, 2402), False, 'from athena import ndarray\n')]
|
# Generated by Django 2.1.7 on 2019-03-27 13:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_id', models.CharField(db_index=True, max_length=20, unique=True)),
('email', models.EmailField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='SolvedHiddenPuzzle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('puzzle', models.CharField(choices=[('rot13', 'rot13'), ('sky', 'sky'), ('image', 'image'), ('terminal', 'terminal'), ('redirect', 'redirect'), ('login', 'login'), ('pages', 'pages'), ('audio_spectrum', 'audio_spectrum'), ('keypad', 'keypad'), ('vigenere', 'vigenere'), ('stego_mix', 'stego_mix'), ('reverse', 'reverse'), ('finish', 'finish')], max_length=40)),
('timestamp', models.DateTimeField()),
('player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='game.Player')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SolvedPuzzle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('puzzle', models.CharField(choices=[('rot13', 'rot13'), ('sky', 'sky'), ('image', 'image'), ('terminal', 'terminal'), ('redirect', 'redirect'), ('login', 'login'), ('pages', 'pages'), ('audio_spectrum', 'audio_spectrum'), ('keypad', 'keypad'), ('vigenere', 'vigenere'), ('stego_mix', 'stego_mix'), ('reverse', 'reverse'), ('finish', 'finish')], max_length=40)),
('timestamp', models.DateTimeField()),
('player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='game.Player')),
],
options={
'abstract': False,
},
),
migrations.AlterIndexTogether(
name='solvedpuzzle',
index_together={('player', 'puzzle')},
),
migrations.AlterIndexTogether(
name='solvedhiddenpuzzle',
index_together={('player', 'puzzle')},
),
]
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.migrations.AlterIndexTogether",
"django.db.models.EmailField",
"django.db.models.AutoField",
"django.db.models.DateTimeField"
] |
[((2320, 2414), 'django.db.migrations.AlterIndexTogether', 'migrations.AlterIndexTogether', ([], {'name': '"""solvedpuzzle"""', 'index_together': "{('player', 'puzzle')}"}), "(name='solvedpuzzle', index_together={(\n 'player', 'puzzle')})\n", (2349, 2414), False, 'from django.db import migrations, models\n'), ((2454, 2554), 'django.db.migrations.AlterIndexTogether', 'migrations.AlterIndexTogether', ([], {'name': '"""solvedhiddenpuzzle"""', 'index_together': "{('player', 'puzzle')}"}), "(name='solvedhiddenpuzzle', index_together={(\n 'player', 'puzzle')})\n", (2483, 2554), False, 'from django.db import migrations, models\n'), ((335, 428), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (351, 428), False, 'from django.db import migrations, models\n'), ((458, 517), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'max_length': '(20)', 'unique': '(True)'}), '(db_index=True, max_length=20, unique=True)\n', (474, 517), False, 'from django.db import migrations, models\n'), ((546, 591), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'max_length': '(100)'}), '(blank=True, max_length=100)\n', (563, 591), False, 'from django.db import migrations, models\n'), ((735, 828), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (751, 828), False, 'from django.db import migrations, models\n'), ((854, 1220), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('rot13', 'rot13'), ('sky', 'sky'), ('image', 'image'), ('terminal',\n 'terminal'), ('redirect', 'redirect'), ('login', 'login'), ('pages',\n 'pages'), ('audio_spectrum', 'audio_spectrum'), ('keypad', 'keypad'), (\n 'vigenere', 'vigenere'), ('stego_mix', 'stego_mix'), ('reverse',\n 'reverse'), ('finish', 'finish')]", 'max_length': '(40)'}), "(choices=[('rot13', 'rot13'), ('sky', 'sky'), ('image',\n 'image'), ('terminal', 'terminal'), ('redirect', 'redirect'), ('login',\n 'login'), ('pages', 'pages'), ('audio_spectrum', 'audio_spectrum'), (\n 'keypad', 'keypad'), ('vigenere', 'vigenere'), ('stego_mix',\n 'stego_mix'), ('reverse', 'reverse'), ('finish', 'finish')], max_length=40)\n", (870, 1220), False, 'from django.db import migrations, models\n'), ((1236, 1258), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1256, 1258), False, 'from django.db import migrations, models\n'), ((1288, 1373), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""game.Player"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='game.Player'\n )\n", (1305, 1373), False, 'from django.db import migrations, models\n'), ((1578, 1671), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1594, 1671), False, 'from django.db import migrations, models\n'), ((1697, 2063), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('rot13', 'rot13'), ('sky', 'sky'), ('image', 'image'), ('terminal',\n 'terminal'), ('redirect', 'redirect'), ('login', 'login'), ('pages',\n 'pages'), ('audio_spectrum', 'audio_spectrum'), ('keypad', 'keypad'), (\n 'vigenere', 'vigenere'), ('stego_mix', 'stego_mix'), ('reverse',\n 'reverse'), ('finish', 'finish')]", 'max_length': '(40)'}), "(choices=[('rot13', 'rot13'), ('sky', 'sky'), ('image',\n 'image'), ('terminal', 'terminal'), ('redirect', 'redirect'), ('login',\n 'login'), ('pages', 'pages'), ('audio_spectrum', 'audio_spectrum'), (\n 'keypad', 'keypad'), ('vigenere', 'vigenere'), ('stego_mix',\n 'stego_mix'), ('reverse', 'reverse'), ('finish', 'finish')], max_length=40)\n", (1713, 2063), False, 'from django.db import migrations, models\n'), ((2079, 2101), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (2099, 2101), False, 'from django.db import migrations, models\n'), ((2131, 2216), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""game.Player"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='game.Player'\n )\n", (2148, 2216), False, 'from django.db import migrations, models\n')]
|
"""
ETL step wrapper for creating an S3 node for input from local files
"""
from dataduct.steps import ExtractLocalStep
import logging
logger = logging.getLogger(__name__)
class CustomExtractLocalStep(ExtractLocalStep):
"""CustomExtractLocal Step class that helps get data from a local file
"""
def __init__(self, **kwargs):
"""Constructor for the CustomExtractLocal class
Args:
**kwargs(optional): Keyword arguments directly passed to base class
"""
logger.info('Using the Custom Extract Local Step')
super(CustomExtractLocalStep, self).__init__(**kwargs)
|
[
"logging.getLogger"
] |
[((144, 171), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (161, 171), False, 'import logging\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Useful tools."""
import os
import glob
import pickle
from astropy.io import fits
__all__ = [
'read_from_pickle',
'save_to_pickle',
'save_to_fits',
'linux_or_mac',
'clean_after_ellipse',
]
def read_from_pickle(name):
"""Read the data from Pickle file."""
return pickle.load(open(name, "rb"))
def save_to_pickle(obj, name):
"""Save an object to a cPickle/Pickle format binary file."""
output = open(name, 'wb')
pickle.dump(obj, output, protocol=2)
output.close()
return
def save_to_fits(data, fits_file, wcs=None, header=None, overwrite=True):
"""Save a NDarray to FITS file.
Parameters
----------
data : ndarray
Data to be saved in FITS file.
fits_file : str
Name of the FITS file.
wcs : astropy.wcs object, optional
World coordinate system information. Default: None
header : str, optional
Header information. Default: None
overwrite : bool, optional
Overwrite existing file or not. Default: True
"""
if wcs is not None:
wcs_header = wcs.to_header()
data_hdu = fits.PrimaryHDU(data, header=wcs_header)
else:
data_hdu = fits.PrimaryHDU(data)
if header is not None:
if 'SIMPLE' in header and 'BITPIX' in header:
data_hdu.header = header
else:
data_hdu.header.extend(header)
if os.path.islink(fits_file):
os.unlink(fits_file)
data_hdu.writeto(fits_file, overwrite=overwrite)
return
def linux_or_mac():
"""Check the current platform.
Parameters
----------
Return
------
platform : str
"linux" or "macosx".
"""
from sys import platform
if platform == "linux" or platform == "linux2":
return "linux"
elif platform == "darwin":
return "macosx"
else:
raise TypeError("# Sorry, only support Linux and MacOSX for now!")
def clean_after_ellipse(folder, prefix, remove_bin=False):
"""Clean all the unecessary files after ellipse run.
Parameters
----------
folder : str
Directory that keeps all the output files.
prefix : str
Prefix of the file.
remove_bin : bool, optional
Remove the output binary table or not. Default: False
"""
_ = [os.remove(par) for par in glob.glob("{}/{}*.par".format(folder, prefix))]
_ = [os.remove(pkl) for pkl in glob.glob("{}/{}*.pkl".format(folder, prefix))]
_ = [os.remove(img) for img in glob.glob("{}/{}*.fits".format(folder, prefix))]
_ = [os.remove(tab) for tab in glob.glob("{}/{}*.tab".format(folder, prefix))]
if remove_bin:
_ = [os.remove(bin) for bin in glob.glob("{}/{}*.bin".format(folder, prefix))]
|
[
"pickle.dump",
"os.remove",
"os.unlink",
"astropy.io.fits.PrimaryHDU",
"os.path.islink"
] |
[((505, 541), 'pickle.dump', 'pickle.dump', (['obj', 'output'], {'protocol': '(2)'}), '(obj, output, protocol=2)\n', (516, 541), False, 'import pickle\n'), ((1441, 1466), 'os.path.islink', 'os.path.islink', (['fits_file'], {}), '(fits_file)\n', (1455, 1466), False, 'import os\n'), ((1166, 1206), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['data'], {'header': 'wcs_header'}), '(data, header=wcs_header)\n', (1181, 1206), False, 'from astropy.io import fits\n'), ((1236, 1257), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['data'], {}), '(data)\n', (1251, 1257), False, 'from astropy.io import fits\n'), ((1476, 1496), 'os.unlink', 'os.unlink', (['fits_file'], {}), '(fits_file)\n', (1485, 1496), False, 'import os\n'), ((2350, 2364), 'os.remove', 'os.remove', (['par'], {}), '(par)\n', (2359, 2364), False, 'import os\n'), ((2433, 2447), 'os.remove', 'os.remove', (['pkl'], {}), '(pkl)\n', (2442, 2447), False, 'import os\n'), ((2516, 2530), 'os.remove', 'os.remove', (['img'], {}), '(img)\n', (2525, 2530), False, 'import os\n'), ((2600, 2614), 'os.remove', 'os.remove', (['tab'], {}), '(tab)\n', (2609, 2614), False, 'import os\n'), ((2706, 2720), 'os.remove', 'os.remove', (['bin'], {}), '(bin)\n', (2715, 2720), False, 'import os\n')]
|
news = "Online disinformation, or fake news intended to deceive, has emerged as a major societal problem. Currently, fake news articles are written by humans, but recently-introduced AI technology based on Neural Networks might enable adversaries to generate fake news. Our goal is to reliably detect this “neural fake news” so that its harm can be minimized."
from selenium import webdriver
from seleniumrequests import Chrome
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import time
import json
import argparse
import req
#initialization
human_data = []
machine_data = []
driver = webdriver.Firefox()
#command-line argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str)
parser.add_argument('--file_name', type=str)
parser.add_argument('--save_human_file', type=str)
parser.add_argument('--save_machine_file', type=str)
args = parser.parse_args()
model = args.model
file_name = args.file_name
save_human_file = args.save_human_file
save_machine_file = args.save_machine_file
store_human_data = []
store_machine_data = []
#check_now = human_data
#driver.find_element_by_class_name("ant-input.sc-htpNat.sc-ksYbfQ.iuRnVj").clear()
#driver.find_element_by_class_name("ant-input.sc-htpNat.sc-ksYbfQ.iuRnVj").send_keys("Online disinformation, or fake news intended to deceive, has emerged as a major societal problem. Currently, fake news articles are written by humans, but recently-introduced AI technology based on Neural Networks might enable adversaries to generate fake news. Our goal is to reliably detect this “neural fake news” so that its harm can be minimized.")
#ans = driver.find_element_by_css_selector("button.ant-btn.sc-bwzfXH.sc-jDwBTQ.kNoRcT.ant-btn-default").submit()
#element = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, "sc-kvZOFW.bpFYHv")))
def detectGrover(news, driver, store_human_data, store_machine_data):
#for news in check_now:
driver.find_element_by_css_selector("textarea.ant-input.sc-dxgOiQ.sc-kTUwUJ.gEHnFy").clear()
driver.find_element_by_css_selector("textarea.ant-input.sc-dxgOiQ.sc-kTUwUJ.gEHnFy").send_keys(news.get('article'))
ans = driver.find_element_by_css_selector("button.ant-btn.sc-bdVaJa.sc-jbKcbu.iUrOzv").submit()
#ant-btn sc-bdVaJa sc-jbKcbu iUrOzv
try:
element = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.CSS_SELECTOR, "div.sc-dfVpRl.eIhhqn")))
if element:
print(element.text.split())
if (news['label'] not in element.text.split()) and ((news['label'] + ".") not in element.text.split()[-1]):
print(news['article'], element.text.split(), news['label'])
else:
if news['label'] == 'human':
store_human_data.append(news)
else:
store_machine_data.append(news)
except:
ans = driver.find_element_by_css_selector("button.ant-btn.sc-bdVaJa.sc-jbKcbu.iUrOzv").submit()
try:
element = WebDriverWait(driver, 30).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "div.sc-dfVpRl.eIhhqn")))
if element:
if (news['label'] not in element.text.split()) and (
(news['label'] + ".") not in element.text.split()[-1]):
print(news['article'], element.text.split(), news['label'])
else:
if news['label'] == 'human':
store_human_data.append(news)
else:
store_machine_data.append(news)
except:
print("Unresponsive!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
def detectGPT2(news, driver, store_human_data, store_machine_data):
if 'article' in news.keys():
#print(news.keys())
driver.find_element_by_id("textbox").clear()
driver.find_element_by_id("textbox").send_keys(news['article'])
temp = driver.find_element_by_id("real-percentage")
time.sleep(5)
temp = driver.find_element_by_id("real-percentage").text.split('%')
if float(temp[0]) > 50:
label = 'human'
else:
label = 'machine'
#if label not in news['label']:
# print(news['article'], label, news['label'])
#else:
if label == 'human':
store_human_data.append(news)
else:
store_machine_data.append(news)
#driver.close()
def detectFakeBox(news, store_human_data, store_machine_data):
maxtry = 10
res = 0
label = ""
try:
while maxtry > 0:
res = req.sendRequest(news.get('article'))
maxtry = maxtry - 1
except:
print("Internet Error!Sleep 3 sec!", res, maxtry)
time.sleep(3)
if res:
if res["content_decision"] == 'impartial' or ((res['content_decision'] == 'bias') and (res['content_score'] < 0.5)):
label = 'human'
else:
label = 'machine'
if label == news['label']:
if label == 'human':
store_human_data.append(news)
else:
store_machine_data.append(news)
#model load
if model == 'groverAI':
driver.get("https://grover.allenai.org/detect")
#detectGrover(human_data, driver)
elif model == 'gpt2':
driver.get("https://huggingface.co/openai-detector")
#detectGPT2(human_data, driver)
elif model == 'fakebox':
req.init()
else:
print("Not supported as yet! TODO:CTRL, FakeBox")
#temporary
i = 0
count = 0
#input read
human_file = open(save_human_file, "a+")
machine_file = open(save_machine_file, "a+")
with open(file_name) as json_file:
while True:
line = json_file.readline()
if len(line)!=0 and (model == 'groverAI'):
#print(line)
detectGrover(json.loads(line), driver, store_human_data, store_machine_data)
count +=1
elif len(line)!=0 and (model == 'gpt2'):
len_human = len(store_human_data)
len_machine = len(store_machine_data)
detectGPT2(json.loads(line), driver, store_human_data, store_machine_data)
if len_human < len(store_human_data):
human_file.write(str(json.dumps(store_human_data[-1]))+'\n')
elif len_machine < len(store_machine_data):
machine_file.write(str(json.dumps(store_machine_data[-1]))+'\n')
elif len(line)!=0 and (model == 'fakebox'):
len_human = len(store_human_data)
len_machine = len(store_machine_data)
detectFakeBox(json.loads(line), store_human_data, store_machine_data)
if len_human < len(store_human_data):
human_file.write(str(json.dumps(store_human_data[-1]))+'\n')
elif len_machine < len(store_machine_data):
machine_file.write(str(json.dumps(store_machine_data[-1]))+'\n')
else:
break
json_file.close()
driver.close()
human_file.close()
machine_file.close()
'''
with open(save_human_file, "w") as json_file:
for each in store_human_data:
json_file.write(str(json.dumps(each))+'\n')
with open(save_machine_file, "w") as json_file:
for each in store_machine_data:
json_file.write(str(json.dumps(each))+'\n')
json_file.close()
'''
|
[
"selenium.webdriver.support.expected_conditions.presence_of_element_located",
"argparse.ArgumentParser",
"json.loads",
"selenium.webdriver.Firefox",
"req.init",
"time.sleep",
"json.dumps",
"selenium.webdriver.support.ui.WebDriverWait"
] |
[((704, 723), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (721, 723), False, 'from selenium import webdriver\n'), ((765, 790), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (788, 790), False, 'import argparse\n'), ((4134, 4147), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4144, 4147), False, 'import time\n'), ((2471, 2544), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.CSS_SELECTOR, 'div.sc-dfVpRl.eIhhqn')"], {}), "((By.CSS_SELECTOR, 'div.sc-dfVpRl.eIhhqn'))\n", (2501, 2544), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((4897, 4910), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (4907, 4910), False, 'import time\n'), ((5574, 5584), 'req.init', 'req.init', ([], {}), '()\n', (5582, 5584), False, 'import req\n'), ((2439, 2464), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(30)'], {}), '(driver, 30)\n', (2452, 2464), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((5960, 5976), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (5970, 5976), False, 'import json\n'), ((3189, 3262), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.CSS_SELECTOR, 'div.sc-dfVpRl.eIhhqn')"], {}), "((By.CSS_SELECTOR, 'div.sc-dfVpRl.eIhhqn'))\n", (3219, 3262), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((6214, 6230), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (6224, 6230), False, 'import json\n'), ((3140, 3165), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(30)'], {}), '(driver, 30)\n', (3153, 3165), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((6716, 6732), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (6726, 6732), False, 'import json\n'), ((6365, 6397), 'json.dumps', 'json.dumps', (['store_human_data[-1]'], {}), '(store_human_data[-1])\n', (6375, 6397), False, 'import json\n'), ((6500, 6534), 'json.dumps', 'json.dumps', (['store_machine_data[-1]'], {}), '(store_machine_data[-1])\n', (6510, 6534), False, 'import json\n'), ((6859, 6891), 'json.dumps', 'json.dumps', (['store_human_data[-1]'], {}), '(store_human_data[-1])\n', (6869, 6891), False, 'import json\n'), ((6994, 7028), 'json.dumps', 'json.dumps', (['store_machine_data[-1]'], {}), '(store_machine_data[-1])\n', (7004, 7028), False, 'import json\n')]
|
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from rest_framework.routers import SimpleRouter
from django.views import defaults as default_views
from scholars.courses.viewsets import CourseViewSet, SlideViewSet, CategoryViewSet, LanguageViewSet, TimezoneViewSet, \
SlideReviewViewSet
from scholars.users.viewsets import UserViewSet
router = SimpleRouter()
router.register(r'users', UserViewSet)
router.register(r'timezones', TimezoneViewSet, base_name='timezones')
router.register(r'languages', LanguageViewSet, base_name='languages')
router.register(r'categories', CategoryViewSet)
router.register(r'courses', CourseViewSet)
router.register(r'slides', SlideViewSet)
router.register(r'reviews', SlideReviewViewSet)
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include('scholars.authentication.urls')),
url(r'^api/auth/', include('rest_framework_social_oauth2.urls')),
url(r'^api/', include(router.urls)),
# the 'api-root' from django rest-frameworks default router
# http://www.django-rest-framework.org/api-guide/routers/#defaultrouter
# url(r'^$', RedirectView.as_view(url=reverse_lazy('api-root'), permanent=False)),
url(r'^$', TemplateView.as_view(template_name='index.html')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
[
"django.conf.urls.include",
"rest_framework.routers.SimpleRouter",
"django.views.generic.TemplateView.as_view",
"django.conf.urls.url",
"django.conf.urls.static.static"
] |
[((540, 554), 'rest_framework.routers.SimpleRouter', 'SimpleRouter', ([], {}), '()\n', (552, 554), False, 'from rest_framework.routers import SimpleRouter\n'), ((1463, 1524), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (1469, 1524), False, 'from django.conf.urls.static import static\n'), ((951, 975), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (958, 975), False, 'from django.conf.urls import include, url\n'), ((996, 1035), 'django.conf.urls.include', 'include', (['"""scholars.authentication.urls"""'], {}), "('scholars.authentication.urls')\n", (1003, 1035), False, 'from django.conf.urls import include, url\n'), ((1061, 1105), 'django.conf.urls.include', 'include', (['"""rest_framework_social_oauth2.urls"""'], {}), "('rest_framework_social_oauth2.urls')\n", (1068, 1105), False, 'from django.conf.urls import include, url\n'), ((1127, 1147), 'django.conf.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (1134, 1147), False, 'from django.conf.urls import include, url\n'), ((1393, 1441), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""index.html"""'}), "(template_name='index.html')\n", (1413, 1441), False, 'from django.views.generic import TemplateView\n'), ((2038, 2079), 'django.conf.urls.url', 'url', (['"""^500/$"""', 'default_views.server_error'], {}), "('^500/$', default_views.server_error)\n", (2041, 2079), False, 'from django.conf.urls import include, url\n'), ((2224, 2251), 'django.conf.urls.include', 'include', (['debug_toolbar.urls'], {}), '(debug_toolbar.urls)\n', (2231, 2251), False, 'from django.conf.urls import include, url\n')]
|
import numpy as np
import warnings
from scipy import stats
from six import string_types
import matplotlib.pyplot as plt
from scipy.integrate import trapz
from explore.utils import Proportions
try:
import statsmodels.nonparametric.api as smnp
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
def _univariate_kde(data, shade=False, vertical=False, kernel='gau',
bw="scott", gridsize=100, cut=3,
clip=None, legend=True, ax=None, cumulative=False,
**kwargs):
"""
Computes the KDE of univariate data.
shade : bool, optional
If True, shade in the area under the KDE curve (or draw with filled
contours when data is bivariate).
vertical : bool, optional
If True, density is on x-axis.
kernel : {'gau' | 'cos' | 'biw' | 'epa' | 'tri' | 'triw' }, optional
Code for shape of kernel to fit with. Bivariate KDE can only use
gaussian kernel.
bw : {'scott' | 'silverman' | scalar | pair of scalars }, optional
Name of reference method to determine kernel size, scalar factor,
or scalar for each dimension of the bivariate plot. Note that the
underlying computational libraries have different interperetations
for this parameter: ``statsmodels`` uses it directly, but ``scipy``
treats it as a scaling factor for the standard deviation of the
data.
gridsize : int, optional
Number of discrete points in the evaluation grid.
cut : scalar, optional
Draw the estimate to cut * bw from the extreme data points.
clip : pair of scalars, or pair of pair of scalars, optional
Lower and upper bounds for datapoints used to fit KDE. Can provide
a pair of (low, high) bounds for bivariate plots.
legend : bool, optional
If True, add a legend or label the axes when possible.
cumulative : bool, optional
If True, draw the cumulative distribution estimated by the kde.
ax : matplotlib axes, optional
Axes to plot on, otherwise uses current axes.
kwargs : key, value pairings
Other keyword arguments are passed to ``plt.plot()`` or
``plt.contour{f}`` depending on whether a univariate or bivariate
plot is being drawn.
Output
------
x: array-like, (n_grid_points, )
The grid of values where the kde is evaluated.
y: array-like, (n_grid_points, )
The values of the KDE.
"""
# Sort out the clipping
if clip is None:
clip = (-np.inf, np.inf)
# Calculate the KDE
if np.nan_to_num(data.var()) == 0:
# Don't try to compute KDE on singular data
msg = "Data must have variance to compute a kernel density estimate."
warnings.warn(msg, UserWarning)
x, y = np.array([]), np.array([])
elif _has_statsmodels:
# Prefer using statsmodels for kernel flexibility
x, y = _statsmodels_univariate_kde(data, kernel, bw,
gridsize, cut, clip,
cumulative=cumulative)
else:
# Fall back to scipy if missing statsmodels
if kernel != "gau":
kernel = "gau"
msg = "Kernel other than `gau` requires statsmodels."
warnings.warn(msg, UserWarning)
if cumulative:
raise ImportError("Cumulative distributions are currently "
"only implemented in statsmodels. "
"Please install statsmodels.")
x, y = _scipy_univariate_kde(data, bw, gridsize, cut, clip)
# Make sure the density is nonnegative
y = np.amax(np.c_[np.zeros_like(y), y], axis=1)
return x, y
def _statsmodels_univariate_kde(data, kernel, bw, gridsize, cut, clip,
cumulative=False):
"""Compute a univariate kernel density estimate using statsmodels."""
fft = kernel == "gau"
kde = smnp.KDEUnivariate(data)
kde.fit(kernel, bw, fft, gridsize=gridsize, cut=cut, clip=clip)
if cumulative:
grid, y = kde.support, kde.cdf
else:
grid, y = kde.support, kde.density
return grid, y
def _scipy_univariate_kde(data, bw, gridsize, cut, clip):
"""Compute a univariate kernel density estimate using scipy."""
try:
kde = stats.gaussian_kde(data, bw_method=bw)
except TypeError:
kde = stats.gaussian_kde(data)
if bw != "scott": # scipy default
msg = ("Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth.")
warnings.warn(msg, UserWarning)
if isinstance(bw, string_types):
bw = "scotts" if bw == "scott" else bw
bw = getattr(kde, "%s_factor" % bw)() * np.std(data)
grid = _kde_support(data, bw, gridsize, cut, clip)
y = kde(grid)
return grid, y
def _kde_support(data, bw, gridsize='default', cut=3, clip=None):
"""Establish support for a kernel density estimate."""
support_min = max(data.min() - bw * cut, clip[0])
support_max = min(data.max() + bw * cut, clip[1])
return np.linspace(support_min, support_max, gridsize)
def get_class_kdes(values, classes, ensure_norm=True, **kde_kws):
"""
KDEs for values with associated classes. Computes the KDE of each class
then weights each KDE by the number of points in each class. Also
compute the overall KDE.
Output
------
cl_kdes, overall_kde
cl_kdes: dict
KDE for each class. Keys are class labels.
overall_kde: dict
Overall KDE (i.e. ignoring class labels)
"""
# TODO: do we really need ensure_norm
overall_grid, overall_y = _univariate_kde(values, **kde_kws)
if ensure_norm:
overall_y = norm_kde(grid=overall_grid, y=overall_y)
overall_kde = {'grid': overall_grid, 'y': overall_y}
cl_props = Proportions(classes)
cl_kdes = {}
for cl in np.unique(classes):
cl_mask = classes == cl
cl_values = values[cl_mask]
cl_grid, cl_y = _univariate_kde(cl_values, **kde_kws)
if ensure_norm:
cl_y = norm_kde(grid=cl_grid, y=cl_y)
# weight area under KDE by number of samples
cl_y *= cl_props[cl]
cl_kdes[cl] = {'grid': cl_grid,
'y': cl_y}
return cl_kdes, overall_kde
def norm_kde(grid, y):
tot = trapz(y=y, x=grid)
return y / tot
def _univariate_kdeplot(x, y, shade=True, vertical=False,
legend=True, ax=None, **kwargs):
"""Plot a univariate kernel density estimate on one of the axes."""
if ax is None:
ax = plt.gca()
# Make sure the density is nonnegative
y = np.amax(np.c_[np.zeros_like(y), y], axis=1)
# Flip the data if the plot should be on the y axis
if vertical:
x, y = y, x
# Check if a label was specified in the call
label = kwargs.pop("label", None)
# Otherwise check if the data object has a name
if label is None and hasattr(x, "name"):
label = x.name
# Decide if we're going to add a legend
legend = label is not None and legend
label = "_nolegend_" if label is None else label
# Use the active color cycle to find the plot color
facecolor = kwargs.pop("facecolor", None)
line, = ax.plot(x, y, **kwargs)
color = line.get_color()
line.remove()
kwargs.pop("color", None)
facecolor = color if facecolor is None else facecolor
# Draw the KDE plot and, optionally, shade
ax.plot(x, y, color=color, label=label, **kwargs)
shade_kws = dict(
facecolor=facecolor,
alpha=kwargs.get("alpha", 0.25),
clip_on=kwargs.get("clip_on", True),
zorder=kwargs.get("zorder", 1),
)
if shade:
if vertical:
ax.fill_betweenx(y, 0, x, **shade_kws)
else:
ax.fill_between(x, 0, y, **shade_kws)
# Set the density axis minimum to 0
if vertical:
ax.set_xlim(0, auto=None)
else:
ax.set_ylim(0, auto=None)
# Draw the legend here
handles, labels = ax.get_legend_handles_labels()
if legend and handles:
ax.legend(loc="best")
return ax
def _univariate_conditional_kdeplot(values, classes,
cl_labels=None,
cl_palette=None,
include_overall=True,
shade=True,
vertical=False,
legend=True,
ax=None,
kde_kws={},
kde_plt_kws={}):
cl_kdes, overall_kde = get_class_kdes(values, classes, **kde_kws)
# in case 'overall' is one of the classes
if 'overall' in np.unique(classes):
overall_name = ''.join(np.unique(classes))
else:
overall_name = 'overall'
cl_kdes[overall_name] = overall_kde
# plot the KDE for each class
for cl in cl_kdes.keys():
_kwargs = kde_plt_kws.copy()
_kwargs['shade'] = shade
x = cl_kdes[cl]['grid']
y = cl_kdes[cl]['y']
if cl_palette is not None and cl in cl_palette:
_kwargs['color'] = cl_palette[cl]
if cl_labels is not None and cl in cl_labels:
_kwargs['label'] = cl_labels[cl]
else:
_kwargs['label'] = cl
if cl == overall_name:
if not include_overall:
continue
_kwargs['ls'] = '--'
# _kwargs['alpha'] = .2
_kwargs['zorder'] = 1
_kwargs['label'] = None # 'overall'
_kwargs['color'] = 'gray'
_kwargs['shade'] = False
_univariate_kdeplot(x=x, y=y,
vertical=vertical,
legend=legend, ax=ax, **_kwargs)
|
[
"numpy.zeros_like",
"matplotlib.pyplot.gca",
"numpy.std",
"scipy.stats.gaussian_kde",
"explore.utils.Proportions",
"numpy.array",
"numpy.linspace",
"scipy.integrate.trapz",
"warnings.warn",
"statsmodels.nonparametric.api.KDEUnivariate",
"numpy.unique"
] |
[((4001, 4025), 'statsmodels.nonparametric.api.KDEUnivariate', 'smnp.KDEUnivariate', (['data'], {}), '(data)\n', (4019, 4025), True, 'import statsmodels.nonparametric.api as smnp\n'), ((5167, 5214), 'numpy.linspace', 'np.linspace', (['support_min', 'support_max', 'gridsize'], {}), '(support_min, support_max, gridsize)\n', (5178, 5214), True, 'import numpy as np\n'), ((5927, 5947), 'explore.utils.Proportions', 'Proportions', (['classes'], {}), '(classes)\n', (5938, 5947), False, 'from explore.utils import Proportions\n'), ((5979, 5997), 'numpy.unique', 'np.unique', (['classes'], {}), '(classes)\n', (5988, 5997), True, 'import numpy as np\n'), ((6430, 6448), 'scipy.integrate.trapz', 'trapz', ([], {'y': 'y', 'x': 'grid'}), '(y=y, x=grid)\n', (6435, 6448), False, 'from scipy.integrate import trapz\n'), ((2786, 2817), 'warnings.warn', 'warnings.warn', (['msg', 'UserWarning'], {}), '(msg, UserWarning)\n', (2799, 2817), False, 'import warnings\n'), ((4375, 4413), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['data'], {'bw_method': 'bw'}), '(data, bw_method=bw)\n', (4393, 4413), False, 'from scipy import stats\n'), ((6690, 6699), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6697, 6699), True, 'import matplotlib.pyplot as plt\n'), ((8892, 8910), 'numpy.unique', 'np.unique', (['classes'], {}), '(classes)\n', (8901, 8910), True, 'import numpy as np\n'), ((2833, 2845), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2841, 2845), True, 'import numpy as np\n'), ((2847, 2859), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2855, 2859), True, 'import numpy as np\n'), ((4450, 4474), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['data'], {}), '(data)\n', (4468, 4474), False, 'from scipy import stats\n'), ((4816, 4828), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (4822, 4828), True, 'import numpy as np\n'), ((8943, 8961), 'numpy.unique', 'np.unique', (['classes'], {}), '(classes)\n', (8952, 8961), True, 'import numpy as np\n'), ((3332, 3363), 'warnings.warn', 'warnings.warn', (['msg', 'UserWarning'], {}), '(msg, UserWarning)\n', (3345, 3363), False, 'import warnings\n'), ((3720, 3736), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (3733, 3736), True, 'import numpy as np\n'), ((4652, 4683), 'warnings.warn', 'warnings.warn', (['msg', 'UserWarning'], {}), '(msg, UserWarning)\n', (4665, 4683), False, 'import warnings\n'), ((6766, 6782), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (6779, 6782), True, 'import numpy as np\n')]
|
from collections import Counter
INPUT_FILE = "../../input/14.txt"
Ruleset = dict[str, str]
def parse_input() -> tuple[str, Ruleset]:
"""
Parses the input and returns the polymer template and the pair insertion rules
"""
with open(INPUT_FILE) as f:
template, _, *rules = f.read().splitlines()
ruleset = dict(rule.split(" -> ") for rule in rules)
return (template, ruleset)
def step(ruleset: Ruleset, pair_counter: Counter[str]) -> Counter[str]:
"""
Applies a single step to the given pair_counter
"""
new_pair_counter: Counter[str] = Counter()
for pair, count in pair_counter.items():
inserted = ruleset[pair]
first, second = pair
new_pair_counter[first + inserted] += count
new_pair_counter[inserted + second] += count
return new_pair_counter
def calculate_answer(template: str, pair_counter: Counter[str]) -> int:
"""
Calculates how many times each letter occurs by adding the counts of pairs
where the given letter comes first and 1 for the last letter of the original
template (which does not change), then subtracts the lowest count from the
highest count and returns the answer
"""
letter_counter = Counter(template[-1])
for pair, count in pair_counter.items():
first_letter, _ = pair
letter_counter[first_letter] += count
return max(letter_counter.values()) - min(letter_counter.values())
def solve(template: str, ruleset: Ruleset) -> tuple[int, int]:
"""
Calculates the required answers given the original template and the pair
insertion rules
"""
pairs = ("".join(pair) for pair in zip(template, template[1:]))
pair_counter = Counter(pairs)
for _ in range(10):
pair_counter = step(ruleset, pair_counter)
part1 = calculate_answer(template, pair_counter)
for _ in range(30):
pair_counter = step(ruleset, pair_counter)
part2 = calculate_answer(template, pair_counter)
return (part1, part2)
if __name__ == "__main__":
template, ruleset = parse_input()
part1, part2 = solve(template, ruleset)
print(part1)
print(part2)
|
[
"collections.Counter"
] |
[((587, 596), 'collections.Counter', 'Counter', ([], {}), '()\n', (594, 596), False, 'from collections import Counter\n'), ((1230, 1251), 'collections.Counter', 'Counter', (['template[-1]'], {}), '(template[-1])\n', (1237, 1251), False, 'from collections import Counter\n'), ((1712, 1726), 'collections.Counter', 'Counter', (['pairs'], {}), '(pairs)\n', (1719, 1726), False, 'from collections import Counter\n')]
|
from typing import Optional, Dict
import jwt
import sentry_sdk
from fastapi import HTTPException
from starlette import status
from starlette.requests import Request
from auth.models import Role
from auth.models import User
from config import cfg
def get_user(request: Request) -> User:
"""
Protect route from anonymous access, requiring and returning current
authenticated user.
:param request: web request
:return: current user, otherwise raise an HTTPException (status=401)
"""
return _check_and_extract_user(request)
def get_admin(request: Request) -> User:
"""
Allow access only to an 'admin' account, returning current
authenticated admin account data.
:param request: web request
:return: current admin user, otherwise raise an HTTPException (status=401)
"""
user = _check_and_extract_user(request)
if user.role != Role.ADMIN:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
return user
def get_optional_user(request: Request) -> Optional[User]:
"""
Return authenticated user or None if session is anonymous.
:param request: web request
:return: current user or None for anonymous sessions
"""
try:
return _check_and_extract_user(request)
except HTTPException:
if request.headers.get("Authorization"):
raise
def extract_user_from_token(access_token: str, verify_exp: bool = True) -> User:
"""
Extract User object from jwt token, with optional expiration check.
:param access_token: encoded access token string
:param verify_exp: whether to perform verification or not
:return: User object stored inside the jwt
"""
return User(**jwt.decode(
access_token,
key=cfg.jwt_secret,
algorithms=[cfg.jwt_algorithm],
options={"verify_exp": verify_exp})["user"])
def decode_jwt_refresh_token(
encoded_refresh_token: str,
verify_exp: bool = True) -> Dict:
"""
Decode an encoded refresh token, with optional expiration check.
:param encoded_refresh_token: encoded refresh token string
:param verify_exp: whether to perform verification or not
:return: decoded jwt refresh token as dictionary
"""
return jwt.decode(
encoded_refresh_token,
key=cfg.jwt_secret,
algorithms=[cfg.jwt_algorithm],
options={"verify_exp": verify_exp})
def _check_and_extract_user(request: Request) -> User:
authorization_header = request.headers.get("Authorization")
if not authorization_header:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
try:
access_token = authorization_header.replace("Bearer ", "")
user = extract_user_from_token(access_token, )
if cfg.sentry_dsn:
sentry_sdk.set_user({
"id": user.id,
"username": user.username,
"email": user.email,
"ip_address": request.client.host
})
return user
except jwt.exceptions.ExpiredSignatureError:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
|
[
"sentry_sdk.set_user",
"fastapi.HTTPException",
"jwt.decode"
] |
[((2263, 2389), 'jwt.decode', 'jwt.decode', (['encoded_refresh_token'], {'key': 'cfg.jwt_secret', 'algorithms': '[cfg.jwt_algorithm]', 'options': "{'verify_exp': verify_exp}"}), "(encoded_refresh_token, key=cfg.jwt_secret, algorithms=[cfg.\n jwt_algorithm], options={'verify_exp': verify_exp})\n", (2273, 2389), False, 'import jwt\n'), ((917, 972), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_401_UNAUTHORIZED'}), '(status_code=status.HTTP_401_UNAUTHORIZED)\n', (930, 972), False, 'from fastapi import HTTPException\n'), ((2586, 2641), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_401_UNAUTHORIZED'}), '(status_code=status.HTTP_401_UNAUTHORIZED)\n', (2599, 2641), False, 'from fastapi import HTTPException\n'), ((2812, 2935), 'sentry_sdk.set_user', 'sentry_sdk.set_user', (["{'id': user.id, 'username': user.username, 'email': user.email,\n 'ip_address': request.client.host}"], {}), "({'id': user.id, 'username': user.username, 'email':\n user.email, 'ip_address': request.client.host})\n", (2831, 2935), False, 'import sentry_sdk\n'), ((3093, 3148), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_401_UNAUTHORIZED'}), '(status_code=status.HTTP_401_UNAUTHORIZED)\n', (3106, 3148), False, 'from fastapi import HTTPException\n'), ((1722, 1838), 'jwt.decode', 'jwt.decode', (['access_token'], {'key': 'cfg.jwt_secret', 'algorithms': '[cfg.jwt_algorithm]', 'options': "{'verify_exp': verify_exp}"}), "(access_token, key=cfg.jwt_secret, algorithms=[cfg.jwt_algorithm],\n options={'verify_exp': verify_exp})\n", (1732, 1838), False, 'import jwt\n')]
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="diffusionfit",
version="0.7.0",
python_requires=">=3.9",
install_requires=[
"numpy",
"scipy",
"scikit-image",
"matplotlib",
"seaborn",
"pandas",
"numba",
"streamlit",
"plotly",
],
author="<NAME>",
author_email="<EMAIL>",
description="Python package for extract estimates of dye/peptide diffusion coefficients and loss rates from a time-sequence of fluorescence images.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/NTBEL/diffusion-fit",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
)
|
[
"setuptools.find_packages"
] |
[((740, 766), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (764, 766), False, 'import setuptools\n')]
|
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
EntityReferenceEntityType = Enum(
'cluster',
'datacenter',
'datastore',
'host',
'nasMount',
'resourcePool',
'scsiAdapter',
'scsiPath',
'scsiTarget',
'scsiVolume',
'storagePod',
'vm',
'vmFile',
)
|
[
"pyvisdk.thirdparty.Enum"
] |
[((190, 361), 'pyvisdk.thirdparty.Enum', 'Enum', (['"""cluster"""', '"""datacenter"""', '"""datastore"""', '"""host"""', '"""nasMount"""', '"""resourcePool"""', '"""scsiAdapter"""', '"""scsiPath"""', '"""scsiTarget"""', '"""scsiVolume"""', '"""storagePod"""', '"""vm"""', '"""vmFile"""'], {}), "('cluster', 'datacenter', 'datastore', 'host', 'nasMount',\n 'resourcePool', 'scsiAdapter', 'scsiPath', 'scsiTarget', 'scsiVolume',\n 'storagePod', 'vm', 'vmFile')\n", (194, 361), False, 'from pyvisdk.thirdparty import Enum\n')]
|
from exit import Exit
class Room: #objet room
def __init__(self, id, name, description): #rappel : self est mon objet qui suit le plan Room
self.id = id
self.name = name
self.description = description
self.pickables = {} #les objets ramassables (pickables) sont actuellement un dictionnaire vide, qu'on va remplir plus tard, on ne les retrouve pas dans les paramètres de ma fonction init
self.inspectables = []
self.exits = {}
self.characters = []
def addPickable(self, itemId, nbOfThisItem): #méthode ajouter un objet dans une salle
numberOfThisItem = self.pickables.get(itemId, 0) #fonction get me retourne la valeur associée à la clef itemId,
# et s'il n'y en a pas, il me retourne ici 0
numberOfThisItem += nbOfThisItem #j'ajoute x item de plus au nombre d'items
self.pickables[itemId] = numberOfThisItem #numberfThisItem est stocké dans le pickable de ma 'cuisine' qui a pour clef itemId
def addInspectable(self, inspectableItem):
self.inspectables.append(inspectableItem)
def addCharacter(self, charactersName):
self.characters.append(charactersName)
def addExit(self, destination, exitName): #méthode ajouter une sortie (d'un seul côté)
newExit = Exit(self, destination) #self ici se trouve dans Room, il ne fait référence qu'à une Room du coup (ex 'cuisine')
#Création d'un nouvel objet de type Exit (ex : new Exit en JS) en lui donnant les paramètres attendus par le contructeur
#init //// = je stocke ma nouvelle sortie dans newExit
self.exits[exitName] = newExit #dans le dico exits{} de mon objet ('cuisine'), je stocke newExit (associé à la clef qui
# vaut exitName)
def addDoubleExit(self, destination, exitName1, exitName2): #méthode ajouter la même sortie des deux côtés
self.addExit(destination, exitName1) #exécution méthode addExit() sur 'cuisine'
destination.addExit(self, exitName2) #destination est associé à la classe Room. A la destination de ma salle
# actuelle 'cuisine', je lui ajoute une sortie
@staticmethod
def addDoubleExitBuilder(source, destination, exitName1, exitName2):
def hiddenDoubleExit(game, source=source, destination=destination, exitName1=exitName1, exitName2=exitName2):
source.addDoubleExit(destination, exitName1, exitName2)
return hiddenDoubleExit
def __repr__(self):
return "Room("+self.name+")"
|
[
"exit.Exit"
] |
[((1300, 1323), 'exit.Exit', 'Exit', (['self', 'destination'], {}), '(self, destination)\n', (1304, 1323), False, 'from exit import Exit\n')]
|
from unittest import TestCase
from uttut.elements import Datum, Entity, Intent
from ..partition_by_entities import partition_by_entities
class PartitionByEntitiesTestCase(TestCase):
def setUp(self):
self.utterance = '我想訂明天從紐約飛到新加坡的機票'
self.entities = [
Entity(label=0, value='明天', start=3, end=5, replacements=['下禮拜二']),
Entity(label=1, value='紐約', start=6, end=8),
Entity(label=2, value='新加坡', start=10, end=13, replacements=['斯堪地那維亞', 'KIX']),
]
self.intents = [
Intent(label=0),
]
self.datum = Datum(
utterance=self.utterance,
intents=self.intents,
entities=self.entities,
)
self.datum_wo_entity = Datum(
utterance='薄餡亂入',
intents=[Intent(label=0)],
)
def test_partition_by_entities(self):
actual_parts, entity_names = partition_by_entities(self.datum, False)
expected_parts = [
['我想訂'],
['下禮拜二'],
['從'],
['紐約'],
['飛到'],
['斯堪地那維亞', 'KIX'],
['的機票'],
]
for exp_part, act_part in zip(expected_parts, actual_parts):
self.assertEqual(set(exp_part), set(act_part))
self.assertEqual(
entity_names,
[None, 0, None, 1, None, 2, None],
)
def test_partition_by_entities_include_orig(self):
actual_parts, entity_names = partition_by_entities(self.datum, True)
expected_parts = [
['我想訂'],
['明天', '下禮拜二'],
['從'],
['紐約'],
['飛到'],
['新加坡', '斯堪地那維亞', 'KIX'],
['的機票'],
]
for exp_part, act_part in zip(expected_parts, actual_parts):
self.assertEqual(set(exp_part), set(act_part))
self.assertEqual(
entity_names,
[None, 0, None, 1, None, 2, None],
)
def test_datum_wo_entity(self):
# do not include origin
output = partition_by_entities(self.datum_wo_entity, True)
self.assertEqual(([['薄餡亂入']], [None]), output)
# include origin
output = partition_by_entities(self.datum_wo_entity, False)
self.assertEqual(([['薄餡亂入']], [None]), output)
|
[
"uttut.elements.Datum",
"uttut.elements.Intent",
"uttut.elements.Entity"
] |
[((601, 678), 'uttut.elements.Datum', 'Datum', ([], {'utterance': 'self.utterance', 'intents': 'self.intents', 'entities': 'self.entities'}), '(utterance=self.utterance, intents=self.intents, entities=self.entities)\n', (606, 678), False, 'from uttut.elements import Datum, Entity, Intent\n'), ((289, 355), 'uttut.elements.Entity', 'Entity', ([], {'label': '(0)', 'value': '"""明天"""', 'start': '(3)', 'end': '(5)', 'replacements': "['下禮拜二']"}), "(label=0, value='明天', start=3, end=5, replacements=['下禮拜二'])\n", (295, 355), False, 'from uttut.elements import Datum, Entity, Intent\n'), ((369, 412), 'uttut.elements.Entity', 'Entity', ([], {'label': '(1)', 'value': '"""紐約"""', 'start': '(6)', 'end': '(8)'}), "(label=1, value='紐約', start=6, end=8)\n", (375, 412), False, 'from uttut.elements import Datum, Entity, Intent\n'), ((426, 504), 'uttut.elements.Entity', 'Entity', ([], {'label': '(2)', 'value': '"""新加坡"""', 'start': '(10)', 'end': '(13)', 'replacements': "['斯堪地那維亞', 'KIX']"}), "(label=2, value='新加坡', start=10, end=13, replacements=['斯堪地那維亞', 'KIX'])\n", (432, 504), False, 'from uttut.elements import Datum, Entity, Intent\n'), ((553, 568), 'uttut.elements.Intent', 'Intent', ([], {'label': '(0)'}), '(label=0)\n', (559, 568), False, 'from uttut.elements import Datum, Entity, Intent\n'), ((816, 831), 'uttut.elements.Intent', 'Intent', ([], {'label': '(0)'}), '(label=0)\n', (822, 831), False, 'from uttut.elements import Datum, Entity, Intent\n')]
|
from abc import ABCMeta, abstractmethod
import numpy as np
class ProposalDistribution(metaclass=ABCMeta):
@abstractmethod
def __init__(self):
...
@abstractmethod
def sample(self, x: np.ndarray) -> np.ndarray:
...
@abstractmethod
def pdf(self, x: np.ndarray, cond: np.ndarray) -> np.ndarray:
...
class Normal(ProposalDistribution):
__slots__ = ['mean', 'std']
def __init__(self, mean: float, spread: float):
super().__init__()
self.mean = mean
self.std = spread
assert self.std > 0, "Wrong specification of distribution!"
def sample(self, x):
return x + np.random.normal(self.mean, self.std, x.shape)
def pdf(self, x, cond):
return 1 / (np.sqrt(2 * np.pi) * self.std) * np.exp(-(x - self.mean - cond) ** 2 / (2 * self.std ** 2))
class Uniform(ProposalDistribution):
__slots__ = ['spread']
def __init__(self, spread: float):
super().__init__()
self.spread = spread
assert self.spread > 0, "Wrong specification of distribution!"
def sample(self, x):
return x + np.random.uniform(low=-self.spread / 2, high=self.spread / 2, size=x.shape)
def pdf(self, x, cond):
return np.array(1 / self.spread)
|
[
"numpy.random.uniform",
"numpy.array",
"numpy.exp",
"numpy.random.normal",
"numpy.sqrt"
] |
[((1248, 1273), 'numpy.array', 'np.array', (['(1 / self.spread)'], {}), '(1 / self.spread)\n', (1256, 1273), True, 'import numpy as np\n'), ((662, 708), 'numpy.random.normal', 'np.random.normal', (['self.mean', 'self.std', 'x.shape'], {}), '(self.mean, self.std, x.shape)\n', (678, 708), True, 'import numpy as np\n'), ((791, 849), 'numpy.exp', 'np.exp', (['(-(x - self.mean - cond) ** 2 / (2 * self.std ** 2))'], {}), '(-(x - self.mean - cond) ** 2 / (2 * self.std ** 2))\n', (797, 849), True, 'import numpy as np\n'), ((1128, 1203), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.spread / 2)', 'high': '(self.spread / 2)', 'size': 'x.shape'}), '(low=-self.spread / 2, high=self.spread / 2, size=x.shape)\n', (1145, 1203), True, 'import numpy as np\n'), ((758, 776), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (765, 776), True, 'import numpy as np\n')]
|
from __future__ import print_function
import numpy as np
try:
from builtins import range, zip
except:
pass
def fermi_dirac(e_fermi, delta, energy):
"""
Return fermi-dirac distribution weight.
"""
x = (energy - e_fermi)/delta
if x < -200:
f = 1.
elif x > 200:
f = 0.
else:
f = 1./(np.exp(x) + 1)
return f
def num_electron_diff(e_fermi, delta, e_skn, w_k, nb_k, num_elec):
ne = 0
for e_kn in e_skn:
for e_n, w, nb in zip(e_kn, w_k, nb_k):
f = [fermi_dirac(e_fermi, delta, e) for e in e_n[:nb]]
ne += np.sum(f)*w
return ne - num_elec
|
[
"numpy.sum",
"numpy.exp",
"builtins.zip"
] |
[((500, 520), 'builtins.zip', 'zip', (['e_kn', 'w_k', 'nb_k'], {}), '(e_kn, w_k, nb_k)\n', (503, 520), False, 'from builtins import range, zip\n'), ((607, 616), 'numpy.sum', 'np.sum', (['f'], {}), '(f)\n', (613, 616), True, 'import numpy as np\n'), ((343, 352), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (349, 352), True, 'import numpy as np\n')]
|
from mock import Mock
from nameko.legacy import responses
def test_iter_rpcresponses():
response_list = [
Mock(payload={'id': 1, 'failure': False, 'ending': False}),
Mock(payload={'id': 2, 'failure': False, 'ending': False}),
Mock(payload={'id': 3, 'failure': False, 'ending': True}),
]
iter_ = responses.iter_rpcresponses(response_list)
ret = responses.last(iter_)
# should be the message preceeding the `ending`
assert ret.payload['id'] == 2
def test_iter_rpcresponses_ending_only():
response_list = [
Mock(payload={'id': 3, 'failure': False, 'ending': True}),
]
iter_ = responses.iter_rpcresponses(response_list)
# should not include the ending message
assert list(iter_) == []
|
[
"nameko.legacy.responses.iter_rpcresponses",
"nameko.legacy.responses.last",
"mock.Mock"
] |
[((335, 377), 'nameko.legacy.responses.iter_rpcresponses', 'responses.iter_rpcresponses', (['response_list'], {}), '(response_list)\n', (362, 377), False, 'from nameko.legacy import responses\n'), ((388, 409), 'nameko.legacy.responses.last', 'responses.last', (['iter_'], {}), '(iter_)\n', (402, 409), False, 'from nameko.legacy import responses\n'), ((649, 691), 'nameko.legacy.responses.iter_rpcresponses', 'responses.iter_rpcresponses', (['response_list'], {}), '(response_list)\n', (676, 691), False, 'from nameko.legacy import responses\n'), ((121, 179), 'mock.Mock', 'Mock', ([], {'payload': "{'id': 1, 'failure': False, 'ending': False}"}), "(payload={'id': 1, 'failure': False, 'ending': False})\n", (125, 179), False, 'from mock import Mock\n'), ((189, 247), 'mock.Mock', 'Mock', ([], {'payload': "{'id': 2, 'failure': False, 'ending': False}"}), "(payload={'id': 2, 'failure': False, 'ending': False})\n", (193, 247), False, 'from mock import Mock\n'), ((257, 314), 'mock.Mock', 'Mock', ([], {'payload': "{'id': 3, 'failure': False, 'ending': True}"}), "(payload={'id': 3, 'failure': False, 'ending': True})\n", (261, 314), False, 'from mock import Mock\n'), ((571, 628), 'mock.Mock', 'Mock', ([], {'payload': "{'id': 3, 'failure': False, 'ending': True}"}), "(payload={'id': 3, 'failure': False, 'ending': True})\n", (575, 628), False, 'from mock import Mock\n')]
|
import discord
from ..admin.managecommands import perms
import json
from discord.utils import get
from pymongo import MongoClient, collation
from discord.ext import commands, tasks
import time
import os
import pymongo as pm
import asyncio
import random
import datetime
import copy
class Kick(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Kick
@commands.command(pass_context=True)
@commands.has_permissions(kick_members=True)
@commands.bot_has_permissions(kick_members=True)
@commands.check(perms)
async def kick(self, ctx, member:discord.Member, *reason):
# Sets default reason if not specified
if not reason:
reason = "Reason was not specified"
# Bans member if the author has a higher role than the subject.
if member is None:
await ctx.reply("Please mention someone to kick")
else:
if ctx.author.top_role.position > member.top_role.position:
reason = ' '.join(map(str, reason))
await ctx.reply(f'{member} was kicked with reason "{reason}"')
await ctx.guild.kick(member, reason=reason)
else:
await ctx.reply("The person you are trying to kick is more powerful than you")
def setup(bot):
bot.add_cog(Kick(bot))
|
[
"discord.ext.commands.has_permissions",
"discord.ext.commands.check",
"discord.ext.commands.bot_has_permissions",
"discord.ext.commands.command"
] |
[((378, 413), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (394, 413), False, 'from discord.ext import commands, tasks\n'), ((419, 462), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'kick_members': '(True)'}), '(kick_members=True)\n', (443, 462), False, 'from discord.ext import commands, tasks\n'), ((468, 515), 'discord.ext.commands.bot_has_permissions', 'commands.bot_has_permissions', ([], {'kick_members': '(True)'}), '(kick_members=True)\n', (496, 515), False, 'from discord.ext import commands, tasks\n'), ((521, 542), 'discord.ext.commands.check', 'commands.check', (['perms'], {}), '(perms)\n', (535, 542), False, 'from discord.ext import commands, tasks\n')]
|
# Imports
import os
import random
from collections import Counter, defaultdict
import random
from nltk.tag import StanfordNERTagger
from nltk.tokenize import word_tokenize
from nltk import pos_tag
from nltk.chunk import conlltags2tree
from nltk.tree import Tree
import pandas as pd
from htrc_features import FeatureReader
import geocoder
import folium
from pprint import pprint
from tqdm import tqdm
# Set environment variable
# Geonames requires a username to access the API but we do not want to expose personal info in code
#
# Run this locally by adding USERNAME to environment variables, e.g. to .env, as follows:
# > export USERNAME=<insert username here>
USERNAME = os.getenv('USERNAME')
# Setup Stanford NER Tagger
# Ignore deprecation warning for now; we'll deal with it when the time comes!
st = StanfordNERTagger('/usr/local/share/stanford-ner/classifiers/english.all.3class.distsim.crf.ser.gz',
'/usr/local/share/stanford-ner/stanford-ner.jar',
encoding='utf-8')
# Functions for putting together with inside-outside-beginning (IOB) logic
# Cf. https://stackoverflow.com/a/30666949
#
# For more information on IOB tagging, see https://en.wikipedia.org/wiki/Inside–outside–beginning_(tagging)
# Sample HathiTrust ID
# This is the HTID for...
# "Ancient Corinth: A guide to the excavations," <NAME>, <NAME>, and <NAME>
htid = "wu.89079728994"
# Get HTEF data for this ID; specifically tokenlist
fr = FeatureReader(ids=[htid])
for vol in fr:
tokens = vol.tokenlist()
# Create pandas dataframe with relevant data
temp = tokens.index.values.tolist()
counts = pd.DataFrame.from_records(temp, columns=['page', 'section', 'token', 'pos'])
counts['count'] = tokens['count'].tolist()
counts[:10]
# Reconstruct text using tokens and counts
text_data = list(zip(counts['token'].tolist(), counts['count'].tolist()))
# Loop through and multiply words by counts
text_list = []
for w, c in text_data:
for i in range(0, c):
text_list.append(w)
random.shuffle(text_list) # Necessary?
text_reconstruction = " ".join(text_list)
#page_words_extended = page_words+page_ner
tokens = word_tokenize(text_reconstruction)
tagged_tokens = st.tag(tokens)
tagged_tokens = [item for item in tagged_tokens if item[0] != '']
ne_tree = stanfordNE2tree(tagged_tokens)
ne_in_sent = []
for subtree in ne_tree:
if type(subtree) == Tree: # If subtree is a noun chunk, i.e. NE != "O"
ne_label = subtree.label()
ne_string = " ".join([token for token, pos in subtree.leaves()])
ne_in_sent.append((ne_string, ne_label))
locations = [tag[0].title() for tag in ne_in_sent if tag[1] == 'LOCATION']
print(locations)
most_common_locations = Counter(locations).most_common(10)
pprint(most_common_locations)
# Organize some data for map info
places_list = [name for name, _ in most_common_locations][:3] # Limit to top three
most_common_locations = dict(most_common_locations) # Turn mcl into dictionary
# Retrieve json from geonames API (for fun this time using geocoder)
geocoder_results = []
for place in places_list:
results = geocoder.geonames(place, maxRows=5, key=USERNAME)
jsons = []
for result in results:
jsons.append(result.json)
geocoder_results.append(jsons)
# Create a list of 'country' from the geonames json results
countries = []
for results in geocoder_results:
for item in results:
if 'country' in item.keys():
countries.append(item['country'])
# Determine which country appears most often
top_country = sorted(Counter(countries))[0]
print(top_country)
# Iterate over geocoder_results and keep the first lat/long that matches the top country
coordinates = []
for i, results in enumerate(geocoder_results):
for item in results:
if item['country'] == top_country:
coordinates.append((float(item['lat']), float(item['lng'])))
break # Only get the first item for now
print(places_list)
print(coordinates)
# Set up Folium and populate with weighted coordinates
basemap = folium.Map(location=[37.97945, 23.71622], zoom_start=8, tiles='cartodbpositron', width=960, height=512)
for i, c in enumerate(coordinates):
folium.CircleMarker([c[0], c[1]], radius=most_common_locations[places_list[i]]*.25, color='#3186cc',
fill=True, fill_opacity=0.5, fill_color='#3186cc',
popup='{} ({}, {}) appears {} times in book.'.format(places_list[i], c[0], c[1], most_common_locations[places_list[i]])).add_to(basemap)
print('Map of relevant locations in Broneer et al.\'s "Ancient Corinth: A guide to the excavations," weighted by frequency.')
basemap
page = 87
test = counts[counts['page'] == page]['token'].tolist()
print(test)
print(len(test))
from nltk.corpus import stopwords
stops = set(stopwords.words('english'))
pns_list = []
for i in range(1, max(counts['page'])+1):
tokens = counts[counts['page'] == i]['token'].tolist()
tokens = [token for token in tokens if token.lower() not in stops and len(token) > 2]
pns = [token for token in tokens if token[0].isupper()]
combs = [f'{x} {y}' for x, y in combinations(pns, 2)]
pns_list.extend(combs)
|
[
"htrc_features.FeatureReader",
"random.shuffle",
"nltk.tag.StanfordNERTagger",
"geocoder.geonames",
"pprint.pprint",
"pandas.DataFrame.from_records",
"folium.Map",
"nltk.corpus.stopwords.words",
"collections.Counter",
"os.getenv",
"nltk.tokenize.word_tokenize"
] |
[((673, 694), 'os.getenv', 'os.getenv', (['"""USERNAME"""'], {}), "('USERNAME')\n", (682, 694), False, 'import os\n'), ((806, 984), 'nltk.tag.StanfordNERTagger', 'StanfordNERTagger', (['"""/usr/local/share/stanford-ner/classifiers/english.all.3class.distsim.crf.ser.gz"""', '"""/usr/local/share/stanford-ner/stanford-ner.jar"""'], {'encoding': '"""utf-8"""'}), "(\n '/usr/local/share/stanford-ner/classifiers/english.all.3class.distsim.crf.ser.gz'\n , '/usr/local/share/stanford-ner/stanford-ner.jar', encoding='utf-8')\n", (823, 984), False, 'from nltk.tag import StanfordNERTagger\n'), ((1457, 1482), 'htrc_features.FeatureReader', 'FeatureReader', ([], {'ids': '[htid]'}), '(ids=[htid])\n', (1470, 1482), False, 'from htrc_features import FeatureReader\n'), ((1617, 1693), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['temp'], {'columns': "['page', 'section', 'token', 'pos']"}), "(temp, columns=['page', 'section', 'token', 'pos'])\n", (1642, 1693), True, 'import pandas as pd\n'), ((2002, 2027), 'random.shuffle', 'random.shuffle', (['text_list'], {}), '(text_list)\n', (2016, 2027), False, 'import random\n'), ((2135, 2169), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text_reconstruction'], {}), '(text_reconstruction)\n', (2148, 2169), False, 'from nltk.tokenize import word_tokenize\n'), ((2736, 2765), 'pprint.pprint', 'pprint', (['most_common_locations'], {}), '(most_common_locations)\n', (2742, 2765), False, 'from pprint import pprint\n'), ((4039, 4147), 'folium.Map', 'folium.Map', ([], {'location': '[37.97945, 23.71622]', 'zoom_start': '(8)', 'tiles': '"""cartodbpositron"""', 'width': '(960)', 'height': '(512)'}), "(location=[37.97945, 23.71622], zoom_start=8, tiles=\n 'cartodbpositron', width=960, height=512)\n", (4049, 4147), False, 'import folium\n'), ((3093, 3142), 'geocoder.geonames', 'geocoder.geonames', (['place'], {'maxRows': '(5)', 'key': 'USERNAME'}), '(place, maxRows=5, key=USERNAME)\n', (3110, 3142), False, 'import geocoder\n'), ((4788, 4814), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (4803, 4814), False, 'from nltk.corpus import stopwords\n'), ((2701, 2719), 'collections.Counter', 'Counter', (['locations'], {}), '(locations)\n', (2708, 2719), False, 'from collections import Counter, defaultdict\n'), ((3536, 3554), 'collections.Counter', 'Counter', (['countries'], {}), '(countries)\n', (3543, 3554), False, 'from collections import Counter, defaultdict\n')]
|
import cv2
import virtcam.debug as debug
from virtcam.base import Frame, FrameSource, Image, Mask, StreamConfig, immutable
class Webcam(FrameSource):
def __init__(self):
super().__init__()
self.current_id = -1
self.camera = cv2.VideoCapture("/dev/video0", cv2.CAP_V4L2)
c1, c2, c3, c4 = "M", "J", "P", "G"
codec = cv2.VideoWriter_fourcc(c1, c2, c3, c4)
self.camera.set(cv2.CAP_PROP_FOURCC, codec)
camConfig = StreamConfig(
int(self.camera.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT)),
int(self.camera.get(cv2.CAP_PROP_FPS)),
)
self._init_config(camConfig)
self.frame = Frame(self.config, Image(self.config.width, self.config.height), self.fullmask)
debug.config("Webcam:init:config", camConfig)
def grab(self) -> bool:
return True
def next(self, frame_id: int) -> Frame:
if not self.frame or self.current_id != frame_id:
grabbed = False
while not grabbed:
grabbed, image = self.camera.read()
self.frame = Frame(self.config, immutable(image), self.fullmask)
self.current_id = frame_id
# debug.frame(f"Webcam:next[{frame_id}]", self.frame)
return self.frame
|
[
"cv2.VideoWriter_fourcc",
"cv2.VideoCapture",
"virtcam.debug.config",
"virtcam.base.immutable",
"virtcam.base.Image"
] |
[((255, 300), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""/dev/video0"""', 'cv2.CAP_V4L2'], {}), "('/dev/video0', cv2.CAP_V4L2)\n", (271, 300), False, 'import cv2\n'), ((362, 400), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['c1', 'c2', 'c3', 'c4'], {}), '(c1, c2, c3, c4)\n', (384, 400), False, 'import cv2\n'), ((818, 863), 'virtcam.debug.config', 'debug.config', (['"""Webcam:init:config"""', 'camConfig'], {}), "('Webcam:init:config', camConfig)\n", (830, 863), True, 'import virtcam.debug as debug\n'), ((748, 792), 'virtcam.base.Image', 'Image', (['self.config.width', 'self.config.height'], {}), '(self.config.width, self.config.height)\n', (753, 792), False, 'from virtcam.base import Frame, FrameSource, Image, Mask, StreamConfig, immutable\n'), ((1172, 1188), 'virtcam.base.immutable', 'immutable', (['image'], {}), '(image)\n', (1181, 1188), False, 'from virtcam.base import Frame, FrameSource, Image, Mask, StreamConfig, immutable\n')]
|
import numpy as np
import math
def softmax(src):
# Get size of input vector
rows, cols = src.shape
# Checking
if rows > 1:
raise Exception("Input rows > 1")
# Find softmax
expVec = np.exp(src)
return expVec / np.sum(expVec)
def softmax_derivative(src):
# Get size of input vector
rows, cols = src.shape
# Checking
if rows > 1:
raise Exception("Input rows > 1")
# Find softmax derivative
tmpVec = softmax(src)
retMat = np.zeros((cols, cols))
for i in range(cols):
for j in range(cols):
retMat[i, j] = tmpVec[0, i] * (float((i == j)) - tmpVec[0, j])
return retMat
def relu(src):
# Get size of input vector
rows, cols = src.shape
# Checking
if rows > 1:
raise Exception("Input rows > 1")
# Find relu
retVec = np.zeros((1, cols))
for i in range(cols):
retVec[0, i] = max(src[0, i], 0.0)
return retVec
def relu_derivative(src):
# Get size of input vector
rows, cols = src.shape
# Checking
if rows > 1:
raise Exception("Input rows > 1")
# Find relu derivative
retMat = np.zeros((cols, cols))
for i in range(cols):
if src[0, i] < 0.0:
retMat[i, i] = 0
else:
retMat[i, i] = 1
return retMat
|
[
"numpy.sum",
"numpy.zeros",
"numpy.exp"
] |
[((216, 227), 'numpy.exp', 'np.exp', (['src'], {}), '(src)\n', (222, 227), True, 'import numpy as np\n'), ((496, 518), 'numpy.zeros', 'np.zeros', (['(cols, cols)'], {}), '((cols, cols))\n', (504, 518), True, 'import numpy as np\n'), ((848, 867), 'numpy.zeros', 'np.zeros', (['(1, cols)'], {}), '((1, cols))\n', (856, 867), True, 'import numpy as np\n'), ((1157, 1179), 'numpy.zeros', 'np.zeros', (['(cols, cols)'], {}), '((cols, cols))\n', (1165, 1179), True, 'import numpy as np\n'), ((248, 262), 'numpy.sum', 'np.sum', (['expVec'], {}), '(expVec)\n', (254, 262), True, 'import numpy as np\n')]
|
import numpy as np
from scipy import sparse
import scipy.sparse.linalg as spla
import pylab as plt
from scipy.linalg import block_diag
#
#
nSub = 2
def load_matrix_basic(pathToFile,makeSparse,makeSymmetric, offset):
f0 = open(pathToFile).readlines()
firstLine = f0.pop(0) #removes the first line
tmp = np.zeros((len(f0),3), dtype = float)
for i in range(len(f0)):
line = f0[i]
k = line.split()
tmp[i,0] = float(k[0])
tmp[i,1] = float(k[1])
tmp[i,2] = float(k[2])
if (tmp.shape[0]==1):
tmp = []
else:
n = np.int32(tmp[0,0])
m = np.int32(tmp[0,1])
I = tmp[1::,0]-offset;
J = tmp[1::,1]-offset;
V = tmp[1::,2]
#
# print str0,i,j
if (makeSymmetric):
logInd = J != I;
I = np.concatenate((I,J[logInd]))
J = np.concatenate((J,I[logInd]))
V = np.concatenate((V,V[logInd]))
if (makeSparse):
tmp = sparse.csc_matrix((V,(I,J)),shape=(n,m)).tocoo()
else:
if (m==1):
tmp = V
else:
tmp = sparse.csc_matrix((V,(I,J)),shape=(n,m)).toarray()
return tmp
def load_matrix(path,str0,i,j,makeSparse,makeSymmetric,offset):
pathToFile = path+'/'+str(i)+'/'+str0+str(j)+'.txt' #
tmp = load_matrix_basic(pathToFile,makeSparse,makeSymmetric,offset)
return tmp
path0 = "../data"
if 1:
K = []
K_reg = []
Fc = []
R = []
Rf = []
Bc = []
Bf = []
BcT_dense = []
Gc = []
# Gf = []
Gf_p = []
Gc = []
Fc_p = []
rhs = []
xx = []
Kplus_f_test = []
KplusBcT_p = []
Bc_nonzRow = []
KplusBcT = []
BcKplus_tmp = []
# BcK_dense = []
K_UT = []
# x_out = []
# x_out_p = []
# Lumped = []
# Lumped = []
for i in range(nSub):
K.append(load_matrix(path0,"dump_K_","",str(i),False,True,1))
K_UT.append(load_matrix(path0,"dump_K_","",str(i),False,False,1))
K_reg.append(load_matrix(path0,"dump_K_reg_","",str(i),False,True,1))
Fc.append(load_matrix(path0,"dump_Fc_","",str(i),False,False,1))
R.append(load_matrix(path0,"dump_R_","",str(i),False,False,1))
Rf.append(load_matrix(path0,"dump_Rf_","",str(i),False,False,1))
Bc.append(load_matrix(path0,"dump_Bc_","",str(i),False,False,1))
Bf.append(load_matrix(path0,"dump_Bf_","",str(i),False,False,1))
Gf_p.append(np.dot(Bf[i],Rf[i]))
# Lumped.append(load_matrix(path0,"dump_Lumped_","",str(i),False,False,1))
BcT_dense.append(load_matrix(path0,"dump_BcT_dense_","",str(i),False,False,1))
Gc.append(load_matrix(path0,"dump_Gc_","",str(i),False,False,1))
# Gf.append(load_matrix(path0,"dump_Gf_","",str(i),False,False,1))
indBc = np.abs(Bc[i]).sum(axis=1)>0
Bc_nonzRow.append( Bc[i][indBc,:])
# Fc.append( np.dot(Bc_nonzRow[i], np.linalg.solve(K_reg[i],Bc_nonzRow[i].T)))
# Lumped.append( np.dot(Bc_nonzRow[i], np.dot(K[i],Bc_nonzRow[i].T)))
rhs.append(load_matrix(path0,"dump_rhs_","",str(i),False,False,1))
# xx.append(load_matrix(path0,"dump_xxTest_","",str(i),False,False,1))
# Kplus_f_test.append(load_matrix(path0,"dump_Kplus_f_test_","",str(i),False,False,1))
# KplusBcT_p = BcKplus_List[i]
# BcK_dense.append(load_matrix(path0,"dump_BcK_dense_","",str(i),False,False,1))
# BcK_dense.append(np.dot(K[i],Bc_nonzRow[i].T).T)
Gc.append(np.dot(Bc[i], R[i]))
KplusBcT.append(load_matrix(path0,"dump_KplusBcT_","",str(i),False,False,1))
KplusBcT_p.append(np.linalg.solve(K_reg[i],Bc_nonzRow[i].T))
# BcKplus_tmp.append(np.linalg.solve(K_reg[i],Bc[i].T).T)
# x_out.append(load_matrix(path0,"dump_x_out_","",str(i),False,False,1))
Fc_p.append(np.dot(Bc_nonzRow[i],KplusBcT_p[i]))
# iK_K = np.linalg.solve(K_reg[i],K[i])
# K_iK_K = np.dot(K[i],iK_K)
# del_ = np.linalg.norm(K_iK_K - K[i] ) / np.linalg.norm(K[i])
# print(del_)
#
tmp_g = np.dot(Bc[i],np.linalg.solve(K_reg[i], rhs[i]))
tmp_e = -np.dot(R[i].T,rhs[i])
if (i == 0):
g_p = tmp_g
e_p = tmp_e;
else:
g_p += tmp_g;
e_p = np.concatenate((e_p,tmp_e))
print(' ...%d '%(i))
# gc_p = np.concatenate((g_p,e_p))
# gc_p = np.concatenate((gc_p,np.zeros(6)))
Gc_clust = load_matrix(path0,"dump_Gc_clust_","",str(0),False,False,1)
Ac_clust = load_matrix(path0,"dump_Ac_clust_","",str(0),False,True,1)
Fc_clust = load_matrix(path0,"dump_Fc_clust_","",str(0),False,True,1)
ker_GcTGc = load_matrix(path0,"dump_kerGc_","",str(0),False,False,1)
# gc = load_matrix(path0,"dump_gc_","",str(0),False,False,1)
# lam_alpha = load_matrix(path0,"dump_lam_alpha_","",str(0),False,False,1)
# lam_alpha_p = np.linalg.solve(Ac_clust, gc)
# nLam = Bc[0].shape[0]
# lam_p = lam_alpha_p[0:nLam]
## alpha_p = lam_alpha[nLam:]
# for i in range(nSub):
# print (" ! %d " % (i))
# x10 = np.linalg.solve(K_reg[i],rhs[i])
# x11 = np.linalg.solve(K_reg[i],np.dot(Bc[i].T,lam_p))
#
# print alpha_p[(6*i):(6*(i+1))]
# x2 = np.dot(R[i],alpha_p[(6*i):(6*(i+1))])
#
# x_out_p.append(x10 - x11 + x2)
# print( "||x_out - x_out_p || = %e " % np.linalg.norm(x_out[i] - x_out_p[i]))
Ac_clust_python = np.hstack((Fc_clust,Gc_clust))
Z = np.zeros((Gc_clust.shape[1],Ac_clust_python.shape[1]))
print ( Z.shape)
Ac_clust_python = np.vstack((Ac_clust_python,Z))
Gf_clust = load_matrix(path0,"dump_Gf_clust_","",str(0),False,False,1)
# test = load_matrix(path0,"dump_testXYZ_","",str(0),False,False,1)
# KpOnes= load_matrix(path0,"dump_KplusONES_","",str(0),False,False,1)
#K_regD = K_reg[0]
#frhs = rhs[0]
#xxD = xx[0]
#RD = R[0]
#for i in range(1,nSub):
# K_regD = block_diag(K_regD,K_reg[i]);
# RD = block_diag(RD,R[i]);
# frhs = np.concatenate((frhs,rhs[i]))
# xxD = np.concatenate((xxD,xx[i]))
#
for i in range(nSub - 1):
if (i == 0):
Bc_g = np.hstack((Bc[0],Bc[1]))
else:
Bc_g = np.hstack((Bc_g,Bc[i+1]))
for i in range(nSub - 1):
if (i == 0):
Bf_g = np.hstack((Bf[0],Bf[1]))
else:
Bf_g = np.hstack((Bf_g,Bf[i+1]))
for i in range(nSub - 1):
if (i == 0):
Gf_g = Gf_p[0]+ Gf_p[1]
else:
Gf_g += Gf_p[i+1]
weigth = np.loadtxt(path0+'/dump_weigth.txt')
#Fc__ = np.dot(Bc_g,np.linalg.solve(K_regD,Bc_g.T))
#
#
#gc__ = np.dot(Bc_g,np.linalg.solve(K_regD,frhs))
#ec__ = - np.dot(RD.T,frhs)
#
#gc__ = np.concatenate((gc__,ec__))
#H = ker_GcTGc
#AA0 = np.hstack((Fc__,Gc_clust))
#AB1 =
#
#
#ZZ1 = np.zeros((Gc_clust.shape[0], H.shape[1]))
#AA1 = np.vstack((ZZ1,H))
#AA01 = np.hstack((AA0,AA1))
#A0 = np.hstack((K_regD,Bc_g.T))
#
#nB = Bc_g.shape[0]
#Bc_Z = np.hstack((Bc_g,np.zeros((nB,nB))))
#
#crhs = np.zeros(nB);
#
#A = np.vstack((A0,Bc_Z))
#
#b = np.concatenate((frhs,crhs))
#
#x = np.linalg.solve(A,b)
#
#xxD = np.concatenate((xxD,crhs))
#Bc_g = np.hstack((Bc_g,Bc[2]))
#Bc_g = np.hstack((Bc_g,Bc[2]))
#BcT_dense = load_matrix(path0,"dump_BcT_dense_","",str(0),True,True,1)
#Fc_clust = load_matrix(path0,"dump_Fc_clust_","",str(0),True,True,1)
#Ac_clust = load_matrix(path0,"dump_Ac_clust_","",str(0),True,True,1)
#GcTGc = load_matrix(path0,"dump_GcTGc_clust_","",str(0),False,True,1)
#GfTGf = load_matrix(path0,"dump_GfTGf_","",str(0),False,False,1)
#iGfTGf = load_matrix(path0,"dump_iGfTGf_","",str(0),False,False,1)
#ker_Ac = load_matrix(path0,"dump_ker_Ac_","",str(0),False,False,1)
##KpBcT0 = load_matrix(path0,"dump_KplusBcT_","",str(0),False,False,1)
##KpBcT1 = load_matrix(path0,"dump_KplusBcT_","",str(1),False,False,1)
#
#
#dFc_eig = load_matrix(path0,"dump_Fc_clust_","",str(444),False,False,1)
##dFc_svd = load_matrix(path0,"dump_Fc_clust_","",str(555),False,False,1)
#dAc_eig = load_matrix(path0,"dump_Ac_clust_","",str(444),False,False,1)
##dAc_svd = load_matrix(path0,"dump_Ac_clust_","",str(555),False,False,1)
#
#
#GfTGf_ = np.zeros((GfTGf.shape[0],GfTGf.shape[0]))
#
#
#
#
#
#
#for d in range(nSub):
# GfTGf_ += np.dot(Gf[d].T,Gf[d])
#
#
#
#
#if False:
# plt.subplot(1,3,1)
# if GcTGc.shape[0] < 100:
# markersize_ = 3
# else:
# markersize_ = 0.7
# plt.spy(GcTGc, markersize=markersize_)
# plt.xlabel("nnz = %d" % (GcTGc.nonzero()[0].shape[0]))
# plt.subplot(1,3,2)
# if Fc_clust.shape[0] < 100:
# markersize = 3
# else:
# markersize = 0.7
# plt.spy(Fc_clust, markersize=markersize_)
# plt.xlabel("nnz = %d" % (Fc_clust.nonzero()[0].shape[0]))
# plt.subplot(1,3,3)
# if Ac_clust.shape[0] < 100:
# markersize_ = 3
# else:
# markersize_ = 0.7
# plt.spy(Ac_clust, markersize=markersize_)
# plt.xlabel("nnz = %d" % (Ac_clust.nonzero()[0].shape[0]))
# plt.show()
#
##Bc_from_Rt = []
##for i in range(1,14):
## Bc_from_Rt.append( load_matrix(path0,"dump_Bc_from_Rt_","",str(i),False,False,1) )
##
#
## Gc_ = load_matrix(path0,"dump_Gc_i_","",str(0),False,False,1)
#
#
#
#
##BcT_dense = load_matrix(path0,"dump_BcT_dense_","",str(0),True,True,1)
#
#
#K_test= []
#Kplus_K_test = []
#K_Kplus_K_test = []
#K_reg_test = []
#K_reg_SF = []
#x_test = []
#
#
#for i in range(4):
#
# K_test.append(load_matrix(path0,"dump_K_dense_","",str(i),False,True,1))
# K_reg_test.append(load_matrix(path0,"dump_K_reg_","",str(i),False,True,1))
# K_reg_SF.append(load_matrix(path0,"dump_K_reg_SF_","",str(i),False,True,1))
# Kplus_K_test.append(load_matrix(path0,"dump_Kplus_K_","",str(i),False,False,1))
# K_Kplus_K_test.append(load_matrix(path0,"dump_K_Kplus_K_","",str(i),False,False,1))
#
# #KKpK = np.dot(K_test[i], np.linalg.solve(K_reg_test[i],K_test[i]))
# KKpK = np.dot(K[i], np.linalg.solve(K_reg[i],K[i]))
# print "norm = %3.8e \n" % np.linalg.norm(KKpK - K[i])
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
##plt.spy(Fc_clust,markersize = .8);plt.show()
#
##Gc_ = load_matrix(path0,"dump_Gc_i_","",str(0),True,True,1)
#
#
#
##r = sparse.csgraph.reverse_cuthill_mckee(Ac_clust.tocsr(), symmetric_mode=True)
##Ac_clust = Ac_clust.toarray()
###
##P,L,U= scipy.linalg.lu(Ac_clust)
##nnz0 = L.nonzero()[0].shape[0] + U.nonzero()[0].shape[0]
##
##
###
###
##AcR = Ac_clust[np.ix_(r,r)]
##PR,LR,UR = scipy.linalg.lu(AcR)
##nnzR = LR.nonzero()[0].shape[0] + UR.nonzero()[0].shape[0]
###
###
##plt.subplot(2,2,1)
##plt.spy(L,markersize=0.1);
##plt.subplot(2,2,2)
##plt.spy(U,markersize=0.1);
##plt.subplot(2,2,3)
##plt.spy(LR,markersize=0.1);
##plt.subplot(2,2,4)
##plt.spy(UR,markersize=0.1);
#
##print ("nnz = %d, nnz(reordered) = %d ") % (nnz0, nnzR)
#
#
##plt.show()
#
##ker_Ac = load_matrix(path0,"dump_ker_Ac_","",str(0),False,True,1)
##ker_GcTGc = load_matrix(path0,"dump_ker_GcTGc_","",str(0),False,True,1)
##R0 = load_matrix(path0,"dump_R_","",str(0),False,True,1)
#
##Gc_H = np.dot(GcTGc.toarray(),ker_GcTGc)
#
##r = sparse.csgraph.reverse_cuthill_mckee(Ac_clust.tocsr(), symmetric_mode=True)
##Ac = Ac_clust.toarray()[np.ix_(r,r)]
##plt.subplot(1,2,1)
##plt.spy(Ac_clust ,markersize = 2.0)
##plt.subplot(1,2,2)
##plt.spy(Ac,markersize = 0.125)
#
#
#
##Fc_python_List = []
#
##if 0:
## Fc_clust_python = np.zeros((Bct_list[i].shape[0], Bct_list[i].shape[0]))
## for i in range(nSub):
## Bc = Bct_list[i].toarray()
## indBc = np.abs(Bc).sum(axis=1)>0
## Bc_red = Bc[indBc,:]
## BcKplus = BcKplus_List[i]
##
## Bf = Bf_List[i].toarray()
## indBf = np.abs(Bf).sum(axis=1)>0
## Bf_red = Bf[indBf,:]
##
## Rc = RList[i].toarray()
##
##
##
## if (i == 0):
## Gf_clust_python = np.dot(Bf,Rc)
## Gc_clust_python = np.dot(Bc,Rc)
## else:
## Gf_clust_python = np.hstack((Gf_clust_python,np.dot(Bf,Rc)))
## Gc_clust_python = np.hstack((Gc_clust_python,np.dot(Bc,Rc)))
## indBcKplus = np.abs(BcKplus).sum(axis=1)>0
## BcKplus = BcKplus[indBcKplus,:]
## BcKplus_python = np.linalg.solve(K_reg_List[i],Bc_red.T)
## BcKplus_ = np.linalg.solve(K_reg_List[i],Bc.T)
## Fc_i = np.dot(Bc_red,BcKplus_python)
## Fc_clust_python += np.dot(Bc,BcKplus_)
## Fc_python_List.append(Fc_i)
##
## for ii in range(nSub):
##
## ttt = Gc_List[ii][np.abs(Gc_List[ii]).sum(axis=1)>0,:] - GcList[ii]
## print np.linalg.norm(ttt)
##
##
## for ii in range(nSub):
## ddd0 = np.linalg.norm(Fc_python_List[ii] - Fc_List[ii])
## ddd1 = np.linalg.norm(Fc_python_List[ii])
## print "|Fc_python - Fc_myAp|/|Fc_python|",ddd0 / ddd1
##
##
## Fc_clust = load_matrix(path0,"dump_Fc_clust_","",0,False,True,1)
## Gc_clust = load_matrix(path0,"dump_Gc_clust_","",0,False,False,1)
## Gf_clust = load_matrix(path0,"dump_Gf_clust_","",0,False,False,1)
## Ac_clust = load_matrix(path0,"dump_Ac_clust_","",0,False,True,1)
## Ac_clust_python = np.hstack((Fc_clust_python,Gc_clust_python))
##
## Z = np.zeros((Gc_clust_python.shape[1],Ac_clust.shape[1]))
## print ( Z.shape)
## Ac_clust_python = np.vstack((Ac_clust_python,Z))
##
##
## ddd0 = np.linalg.norm(Fc_clust - Fc_clust_python)
## ddd1 = np.linalg.norm(Fc_clust)
## print "|Fc_clust_python - Fc_clust_myAp|/|Fc_clust_python|",ddd0 / ddd1
##
## ddd0 = np.linalg.norm(Gc_clust - Gc_clust_python)
## ddd1 = np.linalg.norm(Gc_clust)
## print "|Gc_clust_python - Gc_clust_myAp|/|Gc_clust_python|",ddd0 / ddd1
##
## ddd0 = np.linalg.norm(Gf_clust - Gf_clust_python)
## ddd1 = np.linalg.norm(Gf_clust)
## print "|Gf_clust_python - Gf_clust_myAp|/|Gf_clust_python|",ddd0 / ddd1
##
## ddd0 = np.linalg.norm(Ac_clust - Ac_clust_python)
## ddd1 = np.linalg.norm(Ac_clust)
## print "|Ac_clust_python - Ac_clust_myAp|/|Ac_clust_python|",ddd0 / ddd1
##
##K = []
#
#
#
##plt.subplot(1,2,1)
##plt.spy(Gf_clust_python,markersize=1)
##plt.subplot(1,2,2)
##plt.spy(Gf_clust,markersize=1)
##plt.show()
|
[
"numpy.abs",
"numpy.concatenate",
"numpy.zeros",
"numpy.hstack",
"scipy.sparse.csc_matrix",
"numpy.loadtxt",
"numpy.int32",
"numpy.dot",
"numpy.linalg.solve",
"numpy.vstack"
] |
[((6585, 6623), 'numpy.loadtxt', 'np.loadtxt', (["(path0 + '/dump_weigth.txt')"], {}), "(path0 + '/dump_weigth.txt')\n", (6595, 6623), True, 'import numpy as np\n'), ((5559, 5590), 'numpy.hstack', 'np.hstack', (['(Fc_clust, Gc_clust)'], {}), '((Fc_clust, Gc_clust))\n', (5568, 5590), True, 'import numpy as np\n'), ((5599, 5654), 'numpy.zeros', 'np.zeros', (['(Gc_clust.shape[1], Ac_clust_python.shape[1])'], {}), '((Gc_clust.shape[1], Ac_clust_python.shape[1]))\n', (5607, 5654), True, 'import numpy as np\n'), ((5697, 5728), 'numpy.vstack', 'np.vstack', (['(Ac_clust_python, Z)'], {}), '((Ac_clust_python, Z))\n', (5706, 5728), True, 'import numpy as np\n'), ((588, 607), 'numpy.int32', 'np.int32', (['tmp[0, 0]'], {}), '(tmp[0, 0])\n', (596, 607), True, 'import numpy as np\n'), ((622, 641), 'numpy.int32', 'np.int32', (['tmp[0, 1]'], {}), '(tmp[0, 1])\n', (630, 641), True, 'import numpy as np\n'), ((6252, 6277), 'numpy.hstack', 'np.hstack', (['(Bc[0], Bc[1])'], {}), '((Bc[0], Bc[1]))\n', (6261, 6277), True, 'import numpy as np\n'), ((6302, 6330), 'numpy.hstack', 'np.hstack', (['(Bc_g, Bc[i + 1])'], {}), '((Bc_g, Bc[i + 1]))\n', (6311, 6330), True, 'import numpy as np\n'), ((6387, 6412), 'numpy.hstack', 'np.hstack', (['(Bf[0], Bf[1])'], {}), '((Bf[0], Bf[1]))\n', (6396, 6412), True, 'import numpy as np\n'), ((6437, 6465), 'numpy.hstack', 'np.hstack', (['(Bf_g, Bf[i + 1])'], {}), '((Bf_g, Bf[i + 1]))\n', (6446, 6465), True, 'import numpy as np\n'), ((830, 860), 'numpy.concatenate', 'np.concatenate', (['(I, J[logInd])'], {}), '((I, J[logInd]))\n', (844, 860), True, 'import numpy as np\n'), ((876, 906), 'numpy.concatenate', 'np.concatenate', (['(J, I[logInd])'], {}), '((J, I[logInd]))\n', (890, 906), True, 'import numpy as np\n'), ((922, 952), 'numpy.concatenate', 'np.concatenate', (['(V, V[logInd])'], {}), '((V, V[logInd]))\n', (936, 952), True, 'import numpy as np\n'), ((2546, 2566), 'numpy.dot', 'np.dot', (['Bf[i]', 'Rf[i]'], {}), '(Bf[i], Rf[i])\n', (2552, 2566), True, 'import numpy as np\n'), ((3596, 3615), 'numpy.dot', 'np.dot', (['Bc[i]', 'R[i]'], {}), '(Bc[i], R[i])\n', (3602, 3615), True, 'import numpy as np\n'), ((3729, 3771), 'numpy.linalg.solve', 'np.linalg.solve', (['K_reg[i]', 'Bc_nonzRow[i].T'], {}), '(K_reg[i], Bc_nonzRow[i].T)\n', (3744, 3771), True, 'import numpy as np\n'), ((3947, 3983), 'numpy.dot', 'np.dot', (['Bc_nonzRow[i]', 'KplusBcT_p[i]'], {}), '(Bc_nonzRow[i], KplusBcT_p[i])\n', (3953, 3983), True, 'import numpy as np\n'), ((4192, 4225), 'numpy.linalg.solve', 'np.linalg.solve', (['K_reg[i]', 'rhs[i]'], {}), '(K_reg[i], rhs[i])\n', (4207, 4225), True, 'import numpy as np\n'), ((4244, 4266), 'numpy.dot', 'np.dot', (['R[i].T', 'rhs[i]'], {}), '(R[i].T, rhs[i])\n', (4250, 4266), True, 'import numpy as np\n'), ((4405, 4433), 'numpy.concatenate', 'np.concatenate', (['(e_p, tmp_e)'], {}), '((e_p, tmp_e))\n', (4419, 4433), True, 'import numpy as np\n'), ((1008, 1052), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['(V, (I, J))'], {'shape': '(n, m)'}), '((V, (I, J)), shape=(n, m))\n', (1025, 1052), False, 'from scipy import sparse\n'), ((2900, 2913), 'numpy.abs', 'np.abs', (['Bc[i]'], {}), '(Bc[i])\n', (2906, 2913), True, 'import numpy as np\n'), ((1174, 1218), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['(V, (I, J))'], {'shape': '(n, m)'}), '((V, (I, J)), shape=(n, m))\n', (1191, 1218), False, 'from scipy import sparse\n')]
|
from galaxy_analysis.plot.plot_styles import *
import matplotlib.pyplot as plt
import os, sys
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
def bins_from_centers(x):
xnew = np.zeros(len(x) + 1)
dx = np.zeros(len(x) + 1)
dx[1:-1] = x[1:] - x[:-1]
dx[0] = dx[1]
dx[-1] = dx[-2]
xnew[:-1] = x - 0.5*dx[:-1]
xnew[-1] = x[-1] + 0.5*dx[-1]
return xnew
def plot_2d_histogram(datafile = 'all_runs_d_12.20.dat'):
ylabel = r'log(H$^{-}$ Photodetachment Scale Factor)'
xlabel = "log(LW Scale Factor)"
data = np.genfromtxt(datafile) # names = True)
k27 = data[:,0]
LW = data[:,1]
k27_centers = np.linspace(np.log10(np.min(k27)), np.log10(np.max(k27)),
int(np.sqrt(np.size(k27) )))
k27_vals = bins_from_centers(k27_centers)
LW_centers = np.linspace(np.log10(np.min(LW)), np.log10(np.max(LW)),
int(np.sqrt(np.size(LW))))
LW_vals = bins_from_centers(LW_centers)
k27_mesh, LW_mesh = np.meshgrid(LW_vals, k27_vals)
k27_center_mesh, LW_center_mesh = np.meshgrid(LW_centers, k27_centers)
#f_H2[data['k27'] == 1.58489319] = 100.0 # flag to figure out orientation
f_H2 = data[:,2]
z_mesh = f_H2.reshape( int(np.sqrt(np.size(k27))), int(np.sqrt(np.size(LW))))
#z_mesh = z[:-1,:-1]
fig, ax = plt.subplots()
fig.set_size_inches(8,8)
img1 = ax.pcolormesh(10.0**(LW_mesh),
10.0**(k27_mesh),
np.log10(z_mesh.T), cmap = 'magma',
vmin = -9,
vmax = -2.8)
ax.semilogx()
ax.semilogy()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
divider = make_axes_locatable(ax)
cax1 = divider.append_axes('right', size = '5%', pad = 0.05)
fig.colorbar(img1, cax=cax1, label = r'log(f$_{\rm H_2}$)')
ax.contour( 10.**(LW_center_mesh), 10.0**(k27_center_mesh), np.log10(z_mesh.T),
levels = [-8,-7,-6,-5,-4,-3], colors = 'black',
linewidths = 3, linestyles = '-.')
ax.scatter( [1,1,100,100], [1,100,1,100], s = 250, marker = "*", color = "white")
plt.minorticks_on()
plt.tight_layout(h_pad = 0, w_pad = 0.05)
fig.savefig("fH2.png")
plt.close()
f_H2 = data[:,3]
z_mesh= f_H2.reshape( int(np.sqrt(np.size(k27))), int(np.sqrt(np.size(LW))))
#z_mesh = z[:-1,:-1]
fig, ax = plt.subplots()
fig.set_size_inches(8,8)
img1 = ax.pcolormesh(10.0**(LW_mesh),
10.0**(k27_mesh),
np.log10(z_mesh.T), cmap = 'RdYlBu_r',
vmin = np.min(np.log10(z_mesh)),
vmax = np.max(np.log10(z_mesh)))
ax.semilogx()
ax.semilogy()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
divider = make_axes_locatable(ax)
cax1 = divider.append_axes('right', size = '5%', pad = 0.05)
fig.colorbar(img1, cax=cax1, label = r'log(Temperature [K])')
plt.minorticks_on()
plt.tight_layout(h_pad = 0, w_pad = 0.05)
fig.savefig("T.png")
plt.close()
return
if __name__ == "__main__":
plot_2d_histogram( datafile = str(sys.argv[1]))
|
[
"mpl_toolkits.axes_grid1.make_axes_locatable",
"matplotlib.pyplot.tight_layout",
"numpy.size",
"numpy.meshgrid",
"matplotlib.pyplot.close",
"numpy.genfromtxt",
"numpy.min",
"numpy.max",
"matplotlib.pyplot.minorticks_on",
"numpy.log10",
"matplotlib.pyplot.subplots"
] |
[((580, 603), 'numpy.genfromtxt', 'np.genfromtxt', (['datafile'], {}), '(datafile)\n', (593, 603), True, 'import numpy as np\n'), ((1047, 1077), 'numpy.meshgrid', 'np.meshgrid', (['LW_vals', 'k27_vals'], {}), '(LW_vals, k27_vals)\n', (1058, 1077), True, 'import numpy as np\n'), ((1116, 1152), 'numpy.meshgrid', 'np.meshgrid', (['LW_centers', 'k27_centers'], {}), '(LW_centers, k27_centers)\n', (1127, 1152), True, 'import numpy as np\n'), ((1375, 1389), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1387, 1389), True, 'import matplotlib.pyplot as plt\n'), ((1743, 1766), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (1762, 1766), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((2187, 2206), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (2204, 2206), True, 'import matplotlib.pyplot as plt\n'), ((2212, 2249), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'h_pad': '(0)', 'w_pad': '(0.05)'}), '(h_pad=0, w_pad=0.05)\n', (2228, 2249), True, 'import matplotlib.pyplot as plt\n'), ((2285, 2296), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2294, 2296), True, 'import matplotlib.pyplot as plt\n'), ((2440, 2454), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2452, 2454), True, 'import matplotlib.pyplot as plt\n'), ((2853, 2876), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (2872, 2876), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((3013, 3032), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (3030, 3032), True, 'import matplotlib.pyplot as plt\n'), ((3038, 3075), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'h_pad': '(0)', 'w_pad': '(0.05)'}), '(h_pad=0, w_pad=0.05)\n', (3054, 3075), True, 'import matplotlib.pyplot as plt\n'), ((3109, 3120), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3118, 3120), True, 'import matplotlib.pyplot as plt\n'), ((1530, 1548), 'numpy.log10', 'np.log10', (['z_mesh.T'], {}), '(z_mesh.T)\n', (1538, 1548), True, 'import numpy as np\n'), ((1961, 1979), 'numpy.log10', 'np.log10', (['z_mesh.T'], {}), '(z_mesh.T)\n', (1969, 1979), True, 'import numpy as np\n'), ((2595, 2613), 'numpy.log10', 'np.log10', (['z_mesh.T'], {}), '(z_mesh.T)\n', (2603, 2613), True, 'import numpy as np\n'), ((711, 722), 'numpy.min', 'np.min', (['k27'], {}), '(k27)\n', (717, 722), True, 'import numpy as np\n'), ((734, 745), 'numpy.max', 'np.max', (['k27'], {}), '(k27)\n', (740, 745), True, 'import numpy as np\n'), ((889, 899), 'numpy.min', 'np.min', (['LW'], {}), '(LW)\n', (895, 899), True, 'import numpy as np\n'), ((911, 921), 'numpy.max', 'np.max', (['LW'], {}), '(LW)\n', (917, 921), True, 'import numpy as np\n'), ((787, 799), 'numpy.size', 'np.size', (['k27'], {}), '(k27)\n', (794, 799), True, 'import numpy as np\n'), ((963, 974), 'numpy.size', 'np.size', (['LW'], {}), '(LW)\n', (970, 974), True, 'import numpy as np\n'), ((1292, 1304), 'numpy.size', 'np.size', (['k27'], {}), '(k27)\n', (1299, 1304), True, 'import numpy as np\n'), ((1320, 1331), 'numpy.size', 'np.size', (['LW'], {}), '(LW)\n', (1327, 1331), True, 'import numpy as np\n'), ((2357, 2369), 'numpy.size', 'np.size', (['k27'], {}), '(k27)\n', (2364, 2369), True, 'import numpy as np\n'), ((2385, 2396), 'numpy.size', 'np.size', (['LW'], {}), '(LW)\n', (2392, 2396), True, 'import numpy as np\n'), ((2673, 2689), 'numpy.log10', 'np.log10', (['z_mesh'], {}), '(z_mesh)\n', (2681, 2689), True, 'import numpy as np\n'), ((2731, 2747), 'numpy.log10', 'np.log10', (['z_mesh'], {}), '(z_mesh)\n', (2739, 2747), True, 'import numpy as np\n')]
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
import argparse
import boto3
# Parse required options
parser = argparse.ArgumentParser(description='Creates all required AWS CodeBuild projects for a repo')
parser.add_argument('project', type=str, help='The name of the repo to create the projects for')
parser.add_argument('--github-account', type=str, dest='github_account', default='awslabs', help='The GitHub account that owns the repo')
parser.add_argument('--profile', type=str, default='default', help='The profile in ~/.aws/credentials to use when creating the jobs')
args = parser.parse_args()
# The template for the arguments to be passed to create_project
CREATE_PARAM_TEMPLATE = {
'name': '{project}-{build}',
'source': {
'type': 'GITHUB',
'location': 'https://github.com/{account}/{project}.git',
'gitCloneDepth': 1,
'buildspec': 'codebuild/{build}.yml',
'auth': {
'type': 'OAUTH',
},
'reportBuildStatus': True,
},
'artifacts': {
'type': 'NO_ARTIFACTS',
},
'environment': None,
'serviceRole': 'arn:aws:iam::123124136734:role/CodeBuildServiceRole',
'badgeEnabled': False,
}
# The common enviroment objects to feed to CodeBuild
ENVIRONMENTS = {
'linux': {
'type': 'LINUX_CONTAINER',
'image': 'aws/codebuild/ubuntu-base:14.04',
'computeType': 'BUILD_GENERAL1_SMALL',
'environmentVariables': [],
'privilegedMode': False,
},
'windows-2017': {
'type': 'WINDOWS_CONTAINER',
'image': '123124136734.dkr.ecr.us-east-1.amazonaws.com/codebulid-windows-vs-2017:latest',
'computeType': 'BUILD_GENERAL1_MEDIUM',
'environmentVariables': [],
'privilegedMode': False,
},
'windows-2015': {
'type': 'WINDOWS_CONTAINER',
'image': '123124136734.dkr.ecr.us-east-1.amazonaws.com/codebulid-windows-vs-2015:latest',
'computeType': 'BUILD_GENERAL1_MEDIUM',
'environmentVariables': [],
'privilegedMode': False,
},
}
# The list of all of our build configs paired with their environments
BUILD_CONFIGS = [
{
'build': 'linux-clang3-x64',
'env': 'linux'
},
{
'build': 'linux-clang6-x64',
'env': 'linux',
'privileged': True
},
{
'build': 'linux-gcc-4x-x64',
'env': 'linux'
},
{
'build': 'linux-gcc-4x-x86',
'env': 'linux'
},
{
'build': 'linux-gcc-5x-x64',
'env': 'linux'
},
{
'build': 'linux-gcc-6x-x64',
'env': 'linux'
},
{
'build': 'linux-gcc-7x-x64',
'env': 'linux'
},
{
'build': 'windows-msvc-2017',
'env': 'windows-2017'
},
{
'build': 'windows-msvc-2015',
'env': 'windows-2015'
},
{
'build': 'windows-msvc-2015-x86',
'env': 'windows-2015'
},
]
# Fully populate the BUILDS list with all final build objects
BUILDS = {}
for config in BUILD_CONFIGS:
build_name = config['build']
build = dict(CREATE_PARAM_TEMPLATE)
env = dict(ENVIRONMENTS[config['env']])
if 'privileged' in config:
env['privilegedMode'] = config['privileged']
build['environment'] = env
sub_params = {
'project': args.project,
'build': build_name,
'account': args.github_account,
}
# Replace all templates with the values above
def do_replace(obj):
if isinstance(obj, dict):
for key, value in obj.items():
obj[key] = do_replace(value)
return obj
elif isinstance(obj, str):
return obj.format(**sub_params)
else:
return obj
do_replace(build)
BUILDS['{}-{}'.format(args.project, build_name)] = build
# Connect to codebuild
session = boto3.Session(profile_name=args.profile, region_name='us-east-1')
codebuild = session.client('codebuild')
# Find out which projects already exist and should be updated, and which must be created
all_project_names = list(BUILDS.keys())
existing_projects = codebuild.batch_get_projects(names=all_project_names)
new_projects = existing_projects['projectsNotFound']
existing_projects = [project['name'] for project in existing_projects['projects']]
# Actually create the projects
for build_name, build in BUILDS.items():
if build_name in new_projects:
print('{}: Creating'.format(build_name))
codebuild.create_project(**build)
codebuild.create_webhook(projectName=build_name)
elif build_name in existing_projects:
print('{}: Updating'.format(build_name))
codebuild.update_project(**build)
else:
assert False
|
[
"argparse.ArgumentParser",
"boto3.Session"
] |
[((173, 271), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Creates all required AWS CodeBuild projects for a repo"""'}), "(description=\n 'Creates all required AWS CodeBuild projects for a repo')\n", (196, 271), False, 'import argparse\n'), ((3914, 3979), 'boto3.Session', 'boto3.Session', ([], {'profile_name': 'args.profile', 'region_name': '"""us-east-1"""'}), "(profile_name=args.profile, region_name='us-east-1')\n", (3927, 3979), False, 'import boto3\n')]
|
# Imports ---------------------------------------------------------------------
# Python
import argparse
import joblib
import yaml
import os.path as osp
from collections import defaultdict
import joblib
import os
# PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch import autograd
from torch.optim import Adam
# NumPy
import numpy as np
from numpy import array
from numpy.random import choice, randint
# Model Building
from gen_models.attentive_vae import AttentiveVAE
import rlkit.torch.pytorch_util as ptu
# Data
from observations import multi_mnist
from torch.utils.data import DataLoader, TensorDataset
# Logging
from rlkit.core import logger
from rlkit.launchers.launcher_util import setup_logger, set_seed
from rlkit.core.vistools import generate_gif, save_pytorch_tensor_as_img
import sys
def experiment(exp_specs):
ptu.set_gpu_mode(exp_specs['use_gpu'])
# Set up logging ----------------------------------------------------------
exp_id = exp_specs['exp_id']
exp_prefix = exp_specs['exp_name']
seed = exp_specs['seed']
set_seed(seed)
setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)
# Prep the data -----------------------------------------------------------
path = 'junk_vis/debug_att_vae_shallower_48_64_dim_0p1_kl_stronger_seg_conv'
(X_train, Y_train), (X_test, Y_test) = multi_mnist(path, max_digits=2, canvas_size=48, seed=42, use_max=False)
convert_dict = {0: [0.,0.], 1: [1.,0.], 2: [1.,1.]}
Num_train = np.array([convert_dict[a.shape[0]] for a in Y_train])
Num_test = np.array([convert_dict[a.shape[0]] for a in Y_test])
X_train = X_train[:,None,...]
X_test = X_test[:,None,...]
X_train, X_test = torch.FloatTensor(X_train)/255.0, torch.FloatTensor(X_test)/255.0
mask_train, mask_test = torch.FloatTensor(Num_train), torch.FloatTensor(Num_test)
train_ds = TensorDataset(X_train, Num_train)
val_ds = TensorDataset(X_test, Num_test)
# Model Definition --------------------------------------------------------
model = AttentiveVAE(
[1, 48, 48],
exp_specs['vae_specs']['z_dim'],
exp_specs['vae_specs']['x_encoder_specs'],
exp_specs['vae_specs']['z_seg_conv_specs'],
exp_specs['vae_specs']['z_seg_fc_specs'],
exp_specs['vae_specs']['z_obj_conv_specs'],
exp_specs['vae_specs']['z_obj_fc_specs'],
exp_specs['vae_specs']['z_seg_recon_fc_specs'],
exp_specs['vae_specs']['z_seg_recon_upconv_specs'],
exp_specs['vae_specs']['z_obj_recon_fc_specs'],
exp_specs['vae_specs']['z_obj_recon_upconv_specs'],
exp_specs['vae_specs']['recon_upconv_part_specs']
)
if ptu.gpu_enabled():
model.cuda()
# Optimizer ---------------------------------------------------------------
model_optim = Adam(model.parameters(), lr=float(exp_specs['model_lr']), weight_decay=float(exp_specs['model_wd']))
# -------------------------------------------------------------------------
global_iter = 0
for epoch in range(exp_specs['epochs']):
train_loader = DataLoader(train_ds, batch_size=exp_specs['batch_size'], shuffle=True, num_workers=4, pin_memory=False, drop_last=True)
for iter_num, img_batch in enumerate(train_loader):
img_batch, num_batch = img_batch[0], img_batch[1]
if ptu.gpu_enabled(): img_batch = img_batch.cuda()
what_means, what_log_covs, where_means, where_log_covs, masks, recon_mean, recon_log_cov = model(img_batch, num_batch)
elbo, KL = model.compute_ELBO(
what_means + where_means,
what_log_covs + where_log_covs,
recon_mean,
recon_log_cov,
img_batch,
average_over_batch=True
)
loss = -1. * elbo
loss = loss + 1. * sum([m.mean() for m in masks])
loss.backward()
model_optim.step()
if global_iter % exp_specs['freq_val'] == 0:
with torch.no_grad():
print('\nValidating Iter %d...' % global_iter)
model.eval()
idxs = np.random.choice(int(X_test.size(0)), size=exp_specs['batch_size'], replace=False)
img_batch, num_batch = X_test[idxs], Num_test[idxs]
if ptu.gpu_enabled(): img_batch = img_batch.cuda()
what_means, what_log_covs, where_means, where_log_covs, masks, recon_mean, recon_log_cov = model(img_batch, num_batch)
elbo, KL = model.compute_ELBO(
what_means + where_means,
what_log_covs + where_log_covs,
recon_mean,
recon_log_cov,
img_batch,
average_over_batch=True
)
mse = ((recon_mean - img_batch)**2).mean()
print('ELBO:\t%.4f' % elbo)
print('MSE:\t%.4f' % mse)
print('KL:\t%.4f' % KL)
for i in range(1):
save_pytorch_tensor_as_img(img_batch[i].data.cpu(), os.path.join(path, '%d_%d_img.png'%(global_iter, i)))
save_pytorch_tensor_as_img(recon_mean[i].data.cpu(), os.path.join(path, '%d_%d_recon.png'%(global_iter, i)))
save_pytorch_tensor_as_img(masks[0][i].data.cpu(), os.path.join(path, '%d_%d_mask_0.png'%(global_iter, i)))
# save_pytorch_tensor_as_img(masks[1][i].data.cpu(), os.path.join(path, '%d_%d_mask_1.png'%(global_iter, i)))
model.train()
global_iter += 1
if __name__ == '__main__':
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--experiment', help='experiment specification file')
args = parser.parse_args()
with open(args.experiment, 'r') as spec_file:
spec_string = spec_file.read()
exp_specs = yaml.load(spec_string)
experiment(exp_specs)
|
[
"yaml.load",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"rlkit.torch.pytorch_util.gpu_enabled",
"observations.multi_mnist",
"rlkit.launchers.launcher_util.set_seed",
"torch.FloatTensor",
"numpy.array",
"torch.utils.data.TensorDataset",
"rlkit.torch.pytorch_util.set_gpu_mode",
"gen_models.attentive_vae.AttentiveVAE",
"torch.no_grad",
"os.path.join",
"rlkit.launchers.launcher_util.setup_logger"
] |
[((906, 944), 'rlkit.torch.pytorch_util.set_gpu_mode', 'ptu.set_gpu_mode', (["exp_specs['use_gpu']"], {}), "(exp_specs['use_gpu'])\n", (922, 944), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((1130, 1144), 'rlkit.launchers.launcher_util.set_seed', 'set_seed', (['seed'], {}), '(seed)\n', (1138, 1144), False, 'from rlkit.launchers.launcher_util import setup_logger, set_seed\n'), ((1149, 1218), 'rlkit.launchers.launcher_util.setup_logger', 'setup_logger', ([], {'exp_prefix': 'exp_prefix', 'exp_id': 'exp_id', 'variant': 'exp_specs'}), '(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)\n', (1161, 1218), False, 'from rlkit.launchers.launcher_util import setup_logger, set_seed\n'), ((1424, 1495), 'observations.multi_mnist', 'multi_mnist', (['path'], {'max_digits': '(2)', 'canvas_size': '(48)', 'seed': '(42)', 'use_max': '(False)'}), '(path, max_digits=2, canvas_size=48, seed=42, use_max=False)\n', (1435, 1495), False, 'from observations import multi_mnist\n'), ((1568, 1621), 'numpy.array', 'np.array', (['[convert_dict[a.shape[0]] for a in Y_train]'], {}), '([convert_dict[a.shape[0]] for a in Y_train])\n', (1576, 1621), True, 'import numpy as np\n'), ((1637, 1689), 'numpy.array', 'np.array', (['[convert_dict[a.shape[0]] for a in Y_test]'], {}), '([convert_dict[a.shape[0]] for a in Y_test])\n', (1645, 1689), True, 'import numpy as np\n'), ((1945, 1978), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X_train', 'Num_train'], {}), '(X_train, Num_train)\n', (1958, 1978), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((1992, 2023), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X_test', 'Num_test'], {}), '(X_test, Num_test)\n', (2005, 2023), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((2117, 2679), 'gen_models.attentive_vae.AttentiveVAE', 'AttentiveVAE', (['[1, 48, 48]', "exp_specs['vae_specs']['z_dim']", "exp_specs['vae_specs']['x_encoder_specs']", "exp_specs['vae_specs']['z_seg_conv_specs']", "exp_specs['vae_specs']['z_seg_fc_specs']", "exp_specs['vae_specs']['z_obj_conv_specs']", "exp_specs['vae_specs']['z_obj_fc_specs']", "exp_specs['vae_specs']['z_seg_recon_fc_specs']", "exp_specs['vae_specs']['z_seg_recon_upconv_specs']", "exp_specs['vae_specs']['z_obj_recon_fc_specs']", "exp_specs['vae_specs']['z_obj_recon_upconv_specs']", "exp_specs['vae_specs']['recon_upconv_part_specs']"], {}), "([1, 48, 48], exp_specs['vae_specs']['z_dim'], exp_specs[\n 'vae_specs']['x_encoder_specs'], exp_specs['vae_specs'][\n 'z_seg_conv_specs'], exp_specs['vae_specs']['z_seg_fc_specs'],\n exp_specs['vae_specs']['z_obj_conv_specs'], exp_specs['vae_specs'][\n 'z_obj_fc_specs'], exp_specs['vae_specs']['z_seg_recon_fc_specs'],\n exp_specs['vae_specs']['z_seg_recon_upconv_specs'], exp_specs[\n 'vae_specs']['z_obj_recon_fc_specs'], exp_specs['vae_specs'][\n 'z_obj_recon_upconv_specs'], exp_specs['vae_specs'][\n 'recon_upconv_part_specs'])\n", (2129, 2679), False, 'from gen_models.attentive_vae import AttentiveVAE\n'), ((2751, 2768), 'rlkit.torch.pytorch_util.gpu_enabled', 'ptu.gpu_enabled', ([], {}), '()\n', (2766, 2768), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((5883, 5908), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5906, 5908), False, 'import argparse\n'), ((1872, 1900), 'torch.FloatTensor', 'torch.FloatTensor', (['Num_train'], {}), '(Num_train)\n', (1889, 1900), False, 'import torch\n'), ((1902, 1929), 'torch.FloatTensor', 'torch.FloatTensor', (['Num_test'], {}), '(Num_test)\n', (1919, 1929), False, 'import torch\n'), ((3160, 3283), 'torch.utils.data.DataLoader', 'DataLoader', (['train_ds'], {'batch_size': "exp_specs['batch_size']", 'shuffle': '(True)', 'num_workers': '(4)', 'pin_memory': '(False)', 'drop_last': '(True)'}), "(train_ds, batch_size=exp_specs['batch_size'], shuffle=True,\n num_workers=4, pin_memory=False, drop_last=True)\n", (3170, 3283), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((6133, 6155), 'yaml.load', 'yaml.load', (['spec_string'], {}), '(spec_string)\n', (6142, 6155), False, 'import yaml\n'), ((1778, 1804), 'torch.FloatTensor', 'torch.FloatTensor', (['X_train'], {}), '(X_train)\n', (1795, 1804), False, 'import torch\n'), ((1812, 1837), 'torch.FloatTensor', 'torch.FloatTensor', (['X_test'], {}), '(X_test)\n', (1829, 1837), False, 'import torch\n'), ((3417, 3434), 'rlkit.torch.pytorch_util.gpu_enabled', 'ptu.gpu_enabled', ([], {}), '()\n', (3432, 3434), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((4100, 4115), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4113, 4115), False, 'import torch\n'), ((4423, 4440), 'rlkit.torch.pytorch_util.gpu_enabled', 'ptu.gpu_enabled', ([], {}), '()\n', (4438, 4440), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((5295, 5349), 'os.path.join', 'os.path.join', (['path', "('%d_%d_img.png' % (global_iter, i))"], {}), "(path, '%d_%d_img.png' % (global_iter, i))\n", (5307, 5349), False, 'import os\n'), ((5426, 5482), 'os.path.join', 'os.path.join', (['path', "('%d_%d_recon.png' % (global_iter, i))"], {}), "(path, '%d_%d_recon.png' % (global_iter, i))\n", (5438, 5482), False, 'import os\n'), ((5557, 5614), 'os.path.join', 'os.path.join', (['path', "('%d_%d_mask_0.png' % (global_iter, i))"], {}), "(path, '%d_%d_mask_0.png' % (global_iter, i))\n", (5569, 5614), False, 'import os\n')]
|
import sys, os, traceback
from os import path
import utils as utils
from IBMi import IBMi
CONFIG_PATH="./config.json"
def get_configuration(name):
for cfg in utils.read_file_json(CONFIG_PATH):
if cfg["name"] == name: return cfg
return None
def append_configuration(new_data):
if not path.exists(CONFIG_PATH):
utils.log("{} not found. Creating new config file...".format(CONFIG_PATH))
utils.write_file_json(CONFIG_PATH, [])
elif get_configuration(new_data["name"]):
utils.log("ERROR: Configuration already exists by this name.")
exit(1)
utils.write_file_json(CONFIG_PATH, utils.read_file_json(CONFIG_PATH) + [new_data])
return new_data
def new_configuration():
utils.log("Creating new configuration...")
return {
"name": utils.required_input(" Enter name for this configuration: "),
"host": utils.required_input(" Enter IBMi host: "),
"library": utils.required_input(" Enter library to export: "),
"output": utils.required_input(" Enter output directory path: "),
"formatting": utils.bool_input(" Inject additional formatting into source?", is_req=True),
}
def get_credentials(config_name, host):
utils.log("Fetching credentials for configuration '{}'...".format(config_name))
return { 'user': utils.required_input(" Enter user for host '{}': ".format(host)), 'pw': utils.required_pass(" Enter password: ") }
def new_library(args):
export_library(args + [append_configuration(new_configuration())["name"]])
def export_library(args):
config = get_configuration(args[1])
lib = config["library"]
if "--creds" in args:
creds_idx = args.index("--creds")
if creds_idx+2 > len(args):
print("Not enough arguments for credentials flag. --creds <user> <password>")
exit(1)
creds = {'user': args[creds_idx+1], 'pw': args[creds_idx+2]}
else:
print("Credentials not provided. --creds <user> <password>\nPrompting for credentials...")
creds = get_credentials(config["name"], config["host"])
ibmi = IBMi(out_path=config["output"])
ibmi.connect(config["host"])
try:
ibmi.login(creds)
lib_data = ibmi.get_library_data(lib)
ibmi.write_file(lib_data, '{}/lib_data'.format(lib), ext='json')
ibmi.generate_repo(lib_data)
except Exception as e:
utils.log("Exception occurred. Please yell at the programmer ; {}".format(e))
traceback.print_exc()
def print_help(args):
print("\n".join([
"IBMi-lib-repo HELP:",
" [-e <library name>] [--creds <user> <password>] --> Re-export an existing library",
" [-h] --> Display help information",
" [-n] [--creds <user> <password>] --> Setup a new library"
]))
def get_commands():
return [("-e", 1, export_library), ("-h", 0, print_help), ("-n", 0, new_library)]
def process_args(args):
if len(args) == 0:
print("Not enough arguments passed.")
return False
for cmd in get_commands():
if args[0] == cmd[0]:
cmd[2](args)
return True
print("Invalid argument: '{}' Not found.".format(args[0]))
return False
def main():
utils.log("Program started.")
if not process_args(sys.argv[1:]): exit(1)
if __name__ == "__main__": main()
|
[
"IBMi.IBMi",
"utils.required_input",
"traceback.print_exc",
"utils.read_file_json",
"os.path.exists",
"utils.bool_input",
"utils.write_file_json",
"utils.required_pass",
"utils.log"
] |
[((164, 197), 'utils.read_file_json', 'utils.read_file_json', (['CONFIG_PATH'], {}), '(CONFIG_PATH)\n', (184, 197), True, 'import utils as utils\n'), ((732, 774), 'utils.log', 'utils.log', (['"""Creating new configuration..."""'], {}), "('Creating new configuration...')\n", (741, 774), True, 'import utils as utils\n'), ((2100, 2131), 'IBMi.IBMi', 'IBMi', ([], {'out_path': "config['output']"}), "(out_path=config['output'])\n", (2104, 2131), False, 'from IBMi import IBMi\n'), ((3281, 3310), 'utils.log', 'utils.log', (['"""Program started."""'], {}), "('Program started.')\n", (3290, 3310), True, 'import utils as utils\n'), ((306, 330), 'os.path.exists', 'path.exists', (['CONFIG_PATH'], {}), '(CONFIG_PATH)\n', (317, 330), False, 'from os import path\n'), ((423, 461), 'utils.write_file_json', 'utils.write_file_json', (['CONFIG_PATH', '[]'], {}), '(CONFIG_PATH, [])\n', (444, 461), True, 'import utils as utils\n'), ((802, 863), 'utils.required_input', 'utils.required_input', (['""" Enter name for this configuration: """'], {}), "(' Enter name for this configuration: ')\n", (822, 863), True, 'import utils as utils\n'), ((879, 922), 'utils.required_input', 'utils.required_input', (['""" Enter IBMi host: """'], {}), "(' Enter IBMi host: ')\n", (899, 922), True, 'import utils as utils\n'), ((941, 992), 'utils.required_input', 'utils.required_input', (['""" Enter library to export: """'], {}), "(' Enter library to export: ')\n", (961, 992), True, 'import utils as utils\n'), ((1010, 1065), 'utils.required_input', 'utils.required_input', (['""" Enter output directory path: """'], {}), "(' Enter output directory path: ')\n", (1030, 1065), True, 'import utils as utils\n'), ((1087, 1163), 'utils.bool_input', 'utils.bool_input', (['""" Inject additional formatting into source?"""'], {'is_req': '(True)'}), "(' Inject additional formatting into source?', is_req=True)\n", (1103, 1163), True, 'import utils as utils\n'), ((1390, 1431), 'utils.required_pass', 'utils.required_pass', (['""" Enter password: """'], {}), "(' Enter password: ')\n", (1409, 1431), True, 'import utils as utils\n'), ((516, 578), 'utils.log', 'utils.log', (['"""ERROR: Configuration already exists by this name."""'], {}), "('ERROR: Configuration already exists by this name.')\n", (525, 578), True, 'import utils as utils\n'), ((634, 667), 'utils.read_file_json', 'utils.read_file_json', (['CONFIG_PATH'], {}), '(CONFIG_PATH)\n', (654, 667), True, 'import utils as utils\n'), ((2477, 2498), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2496, 2498), False, 'import sys, os, traceback\n')]
|
"""
This is extracted from the unmaintained https://github.com/jazzband/django-floppyforms to provide a datalist widget.
It has not been cleaned up yet.
"""
from django import forms
from django.template import Context, loader
from django.utils import formats
from django.utils.encoding import force_text
class DictContext(dict):
pass
REQUIRED_CONTEXT_ATTRIBTUES = (
"_form_config",
"_form_render",
)
def flatten_context(context):
if isinstance(context, Context):
flat = {}
for d in context.dicts:
flat.update(d)
return flat
else:
return context
def flatten_contexts(*contexts):
"""Takes a list of context instances and returns a new dict that
combines all of them."""
new_context = DictContext()
for context in contexts:
if context is not None:
new_context.update(flatten_context(context))
for attr in REQUIRED_CONTEXT_ATTRIBTUES:
if hasattr(context, attr):
setattr(new_context, attr, getattr(context, attr))
return new_context
class Widget(forms.Widget):
is_required = False
def render(self, name, value, attrs=None, renderer=None):
"""
Returns this Widget rendered as HTML, as a Unicode string.
The 'value' given is not guaranteed to be valid input, so subclass
implementations should program defensively.
"""
raise NotImplementedError("subclasses of Widget must provide a render() method")
def build_attrs(self, extra_attrs=None, **kwargs):
"""
Backported from Django 1.10
Helper function for building an attribute dictionary.
"""
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
# Backported from Django 1.7
@property
def is_hidden(self):
return self.input_type == "hidden" if hasattr(self, "input_type") else False
# Backported from Django 1.9
if not hasattr(forms.Widget, "format_value"):
def format_value(self, value):
return self._format_value(value)
class Input(Widget):
template_name = "widgets/input.html"
input_type = None
datalist = None
def __init__(self, *args, **kwargs):
datalist = kwargs.pop("datalist", None)
if datalist is not None:
self.datalist = datalist
template_name = kwargs.pop("template_name", None)
if template_name is not None:
self.template_name = template_name
super(Input, self).__init__(*args, **kwargs)
# This attribute is used to inject a surrounding context in the
# floppyforms templatetags, when rendered inside a complete form.
self.context_instance = None
def get_context_data(self):
return {}
def format_value(self, value):
if self.is_localized:
value = formats.localize_input(value)
return force_text(value)
def get_context(self, name, value, attrs=None):
context = {
"widget": self,
"type": self.input_type,
"name": name,
"hidden": self.is_hidden,
"required": self.is_required,
"True": True,
}
# True is injected in the context to allow stricter comparisons
# for widget attrs. See #25.
if self.is_hidden:
context["hidden"] = True
if value is None:
value = ""
if value != "":
# Only add the value if it is non-empty
context["value"] = self.format_value(value)
context.update(self.get_context_data())
context["attrs"] = self.build_attrs(attrs)
for key, attr in context["attrs"].items():
if attr == 1:
# 1 == True so 'key="1"' will show up only as 'key'
# Casting to a string so that it doesn't equal to True
# See #25.
if not isinstance(attr, bool):
context["attrs"][key] = str(attr)
if self.datalist is not None:
context["datalist"] = self.datalist
return context
def render(self, name, value, attrs=None, **kwargs):
template_name = kwargs.pop("template_name", None)
if template_name is None:
template_name = self.template_name
context = self.get_context(name, value, attrs=attrs or {})
context = flatten_contexts(self.context_instance, context)
return loader.render_to_string(template_name, context)
class TextInput(Input):
template_name = "widgets/text.html"
input_type = "text"
def __init__(self, *args, **kwargs):
if kwargs.get("attrs", None) is not None:
self.input_type = kwargs["attrs"].pop("type", self.input_type)
super(TextInput, self).__init__(*args, **kwargs)
|
[
"django.utils.formats.localize_input",
"django.utils.encoding.force_text",
"django.template.loader.render_to_string"
] |
[((2971, 2988), 'django.utils.encoding.force_text', 'force_text', (['value'], {}), '(value)\n', (2981, 2988), False, 'from django.utils.encoding import force_text\n'), ((4527, 4574), 'django.template.loader.render_to_string', 'loader.render_to_string', (['template_name', 'context'], {}), '(template_name, context)\n', (4550, 4574), False, 'from django.template import Context, loader\n'), ((2926, 2955), 'django.utils.formats.localize_input', 'formats.localize_input', (['value'], {}), '(value)\n', (2948, 2955), False, 'from django.utils import formats\n')]
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from marshmallow import fields
from azure.ai.ml._schema.core.schema_meta import PatchedSchemaMeta
class AmlComputeNodeInfoSchema(metaclass=PatchedSchemaMeta):
node_id = fields.Str()
private_ip_address = fields.Str()
public_ip_address = fields.Str()
port = fields.Str()
node_state = fields.Str()
current_job_name = fields.Str()
|
[
"marshmallow.fields.Str"
] |
[((355, 367), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (365, 367), False, 'from marshmallow import fields\n'), ((393, 405), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (403, 405), False, 'from marshmallow import fields\n'), ((430, 442), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (440, 442), False, 'from marshmallow import fields\n'), ((454, 466), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (464, 466), False, 'from marshmallow import fields\n'), ((484, 496), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (494, 496), False, 'from marshmallow import fields\n'), ((520, 532), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (530, 532), False, 'from marshmallow import fields\n')]
|
from floodsystem.geo import build_station_list
from floodsystem.geo import rivers_by_station_number
def test_rivers_by_station_number():
"""Tests to check that the outputs from funtion rivers_by_station_number are as expected"""
stations = build_station_list()
test = rivers_by_station_number(stations, 9)
for station in test:
assert type(station) is tuple
assert type(station[1]) is int
i=0
for i in range(0,len(test)-1):
assert test[i][1] >= test[i+1][1]
|
[
"floodsystem.geo.rivers_by_station_number",
"floodsystem.geo.build_station_list"
] |
[((249, 269), 'floodsystem.geo.build_station_list', 'build_station_list', ([], {}), '()\n', (267, 269), False, 'from floodsystem.geo import build_station_list\n'), ((281, 318), 'floodsystem.geo.rivers_by_station_number', 'rivers_by_station_number', (['stations', '(9)'], {}), '(stations, 9)\n', (305, 318), False, 'from floodsystem.geo import rivers_by_station_number\n')]
|
from src.Deque.deque_scratch import Deque
def is_palindrome(string_to_check):
string_to_check=string_to_check.strip()
if not string_to_check:
raise Exception("The string is empty")
deq=Deque()
for el in string_to_check:
deq.addTail(el)
front=deq.removeFront()
end=deq.removeTail()
while front == end and front is not None and end is not None:
if deq.size() == 1:
return True
front=deq.removeFront()
end=deq.removeTail()
if deq.size() == 0:
return True
else:
return False
|
[
"src.Deque.deque_scratch.Deque"
] |
[((211, 218), 'src.Deque.deque_scratch.Deque', 'Deque', ([], {}), '()\n', (216, 218), False, 'from src.Deque.deque_scratch import Deque\n')]
|
import os
import pickle
import numpy as np
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import config
from training import misc
synthesis_kwargs = dict(minibatch_size=8)
_Gs_cache = dict()
def load_Gs(url):
if url not in _Gs_cache:
with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f:
_G, _D, Gs = pickle.load(f)
_Gs_cache[url] = Gs
return _Gs_cache[url]
def draw_figure(png, Gs, seeds):
avg_dlantents_b = Gs.get_var('dlatent_avg_b')
avg_dlantents_c = Gs.get_var('dlatent_avg_c')
for seed in seeds:
rnd = np.random.RandomState(seed)
b1 = rnd.randn(Gs.input_shapes[0][1])
b1 = b1[np.newaxis]
b1 = Gs.components.mapping_b.run(b1, None)
b1_v = b1[0, 0, :]
#
b1[:, :] = (b1_v - avg_dlantents_b) * 0.9 + avg_dlantents_b
# change C
for i in range(20):
c = rnd.randn(Gs.input_shapes[1][1])
c = c[np.newaxis]
c = Gs.components.mapping_c.run(c, None) # [seed, layer, component]
c_v = c[0, 0, :]
c[:, :] = (c_v - avg_dlantents_c) * 0.7 + avg_dlantents_c
current_png = png + '/seedc_%d_%d' % (seed, i) + '.png'
gen = Gs.components.synthesis.run(b1, c, randomize_noise=False, **synthesis_kwargs)[-1]
misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1))
b1_v = b1[0, 0, :]
c = rnd.randn(Gs.input_shapes[1][1])
c = c[np.newaxis]
c = Gs.components.mapping_c.run(c, None) # [seed, layer, component]
c[:, :] = avg_dlantents_c
for j in range(80):
random_b2 = rnd.randn(Gs.input_shapes[0][1])
random_b2 = random_b2[np.newaxis]
random_b2 = Gs.components.mapping_b.run(random_b2, None)
b2_v = (random_b2[0, 0, :] - avg_dlantents_b) * 0.5 + avg_dlantents_b
print(b2_v.shape)
# gram-schmidt process
a1 = np.sum(b1_v * b2_v, dtype=np.float32)
a2 = np.sum(b1_v * b1_v, dtype=np.float32)
print(a1)
print(a2)
b2_v = b2_v - a1 / a2 * b1_v
print(b1_v.shape)
print(b2_v.shape)
print(np.sum(b1_v * b2_v))
for i in range(10):
tmp = np.empty_like(b1)
tmp[:, :] = b1_v + 0.1 * i * b2_v
current_png = png + '/seedb%d_%d_%d' % (seed, j, i) + '.png'
gen = Gs.components.synthesis.run(tmp, c, randomize_noise=False, **synthesis_kwargs)[-1]
misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1))
#---------------------------------------------------------------------------
# Main program.
def main():
tflib.init_tf()
os.makedirs(config.result_dir, exist_ok=True)
network_pkl = 'network-snapshot-010000.pkl'
G, D, Gs = misc.load_pkl(network_pkl)
draw_figure(config.result_dir, Gs, seeds = [2, 7, 8, 11, 23])
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
|
[
"numpy.sum",
"os.makedirs",
"training.misc.load_pkl",
"numpy.empty_like",
"numpy.random.RandomState",
"dnnlib.util.open_url",
"pickle.load",
"training.misc.save_image_grid",
"dnnlib.tflib.init_tf"
] |
[((2768, 2783), 'dnnlib.tflib.init_tf', 'tflib.init_tf', ([], {}), '()\n', (2781, 2783), True, 'import dnnlib.tflib as tflib\n'), ((2788, 2833), 'os.makedirs', 'os.makedirs', (['config.result_dir'], {'exist_ok': '(True)'}), '(config.result_dir, exist_ok=True)\n', (2799, 2833), False, 'import os\n'), ((2897, 2923), 'training.misc.load_pkl', 'misc.load_pkl', (['network_pkl'], {}), '(network_pkl)\n', (2910, 2923), False, 'from training import misc\n'), ((596, 623), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (617, 623), True, 'import numpy as np\n'), ((268, 321), 'dnnlib.util.open_url', 'dnnlib.util.open_url', (['url'], {'cache_dir': 'config.cache_dir'}), '(url, cache_dir=config.cache_dir)\n', (288, 321), False, 'import dnnlib\n'), ((353, 367), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (364, 367), False, 'import pickle\n'), ((1340, 1412), 'training.misc.save_image_grid', 'misc.save_image_grid', (['gen', 'current_png'], {'drange': '[-1, 1]', 'grid_size': '(1, 1)'}), '(gen, current_png, drange=[-1, 1], grid_size=(1, 1))\n', (1360, 1412), False, 'from training import misc\n'), ((1987, 2024), 'numpy.sum', 'np.sum', (['(b1_v * b2_v)'], {'dtype': 'np.float32'}), '(b1_v * b2_v, dtype=np.float32)\n', (1993, 2024), True, 'import numpy as np\n'), ((2042, 2079), 'numpy.sum', 'np.sum', (['(b1_v * b1_v)'], {'dtype': 'np.float32'}), '(b1_v * b1_v, dtype=np.float32)\n', (2048, 2079), True, 'import numpy as np\n'), ((2243, 2262), 'numpy.sum', 'np.sum', (['(b1_v * b2_v)'], {}), '(b1_v * b2_v)\n', (2249, 2262), True, 'import numpy as np\n'), ((2318, 2335), 'numpy.empty_like', 'np.empty_like', (['b1'], {}), '(b1)\n', (2331, 2335), True, 'import numpy as np\n'), ((2584, 2656), 'training.misc.save_image_grid', 'misc.save_image_grid', (['gen', 'current_png'], {'drange': '[-1, 1]', 'grid_size': '(1, 1)'}), '(gen, current_png, drange=[-1, 1], grid_size=(1, 1))\n', (2604, 2656), False, 'from training import misc\n')]
|
# -*- coding: utf-8 -*-
import time
import numpy as np
from classes import Debug, KalmanFilter
import smbus
bus = smbus.SMBus(2) # bus = smbus.SMBus(0) fuer Revision 1
address = 0x68 # via i2cdetect
power_mgmt_1 = 0x6b
ACCEL_CONFIG = 0x1C # Reg 28
ACCEL_CONFIG2 = 0x1D # Reg 29
class Imu(Debug, KalmanFilter):
def __init__(self, sim_mode=False):
self.debug = Debug('imu')
self.sim_mode = sim_mode
self.kf = self.filter_config()
self.raw = self.read_raw()
self.offset = self.offset_calc()
#self.port = port
self.imu_config()
def filter_config(self):
# paramter for kalman filter
dt = 1.0 / 50.0
# state transition model, A
F = np.array([[1, dt, 0], [0, 1, dt], [0, 0, 1]])
H = np.array([0, 0, 1]).reshape(1, 3) # transponieren #observation model C
q = 0.05
Q = np.array([[q, q, 0], [q, q, 0], [0, 0, 0]]) # process noise
R = np.array([0.8]).reshape(1, 1) # observation noise
return KalmanFilter(F=F, H=H, Q=Q, R=R)
def imu_config(self):
# Aktivieren, um das Modul ansprechen zu koennen
bus.write_byte_data(address, power_mgmt_1, 0) # full power mode
# bus.write_byte_data(address, power_mgmt_2, 0b00001111) #disabele=1, disabled accel_z, gyro_x bis _z
# setzt Accelerometer Full Scale Select (hier auf +-2g)
bus.write_byte_data(address, ACCEL_CONFIG, 0b00100000)
# setzt den Tiefpass-Filter
bus.write_byte_data(address, ACCEL_CONFIG2,
0b00000100) # entspricht dem Wert 4, also 19,8 ms ~50Hz
#print("IMU config ready..")
def read_word(self, reg):
h = bus.read_byte_data(address, reg)
l = bus.read_byte_data(address, reg + 1)
# h = bus.read_byte_data(self.address, reg)
# l = bus.read_byte_data(self.address, reg + 1)
value = (h << 8) + l
return value
def read_word_2c(self, reg):
val = self.read_word(reg)
if (val >= 0x8000):
return -((65535 - val) + 1)
else:
return val
def read_raw(self):
if self.sim_mode == True:
return 100, 200, 20
else:
beschleunigung_xout = self.read_word_2c(0x3b)
beschleunigung_yout = self.read_word_2c(0x3d)
gyroskop_zout = self.read_word_2c(0x47)
beschleunigung_xout_skaliert = beschleunigung_xout / 16384.0 # value from sensor documentation
beschleunigung_yout_skaliert = beschleunigung_yout / 16384.0
gyroskop_zout_skaliert = gyroskop_zout / 131
return beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, gyroskop_zout_skaliert
def offset_calc(self):
init_data = []
print("offset calc start...")
for count in range(0, 200):
init_data.append(self.read_raw())
offset = np.array(init_data)
print("finished calc..")
#print("offset:",offset)
return np.median(offset, axis=0)
def kalman_filter(self, z):
# das ist meine C matrix für den Ausgang, also müsste das mittlere die geschwindigkeit sein
np.dot(self.kf.H, self.kf.predict())
self.kf.update(z)
#print("kalmanfilter: ", self.kf.x[0], self.kf.x[1], self.kf.x[2])
return self.kf.x[1]
def process(self):
return self.kalman_filter(self.read_raw() - self.offset)
'''
def test_imu(save=False, draw=False):
print("stat testing...")
imu = Imu(sim_mode=False)
t_ref = int(round(time.time() * 1000))
if imu.sim_mode:
for i in range(0, 1000):
try:
imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run()
imu.debug.excecute(t_ref)
time.sleep(0.1)
except KeyboardInterrupt:
break
else:
while KeyboardInterrupt is not True:
try:
imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run()
imu.debug.excecute(t_ref)
except KeyboardInterrupt:
break
if save:
imu.debug.save()
if draw:
imu.debug.draw()
return
# if __name__== "__main":
test_imu(save=True)
'''
|
[
"classes.Debug",
"numpy.median",
"classes.KalmanFilter",
"numpy.array",
"smbus.SMBus"
] |
[((115, 129), 'smbus.SMBus', 'smbus.SMBus', (['(2)'], {}), '(2)\n', (126, 129), False, 'import smbus\n'), ((377, 389), 'classes.Debug', 'Debug', (['"""imu"""'], {}), "('imu')\n", (382, 389), False, 'from classes import Debug, KalmanFilter\n'), ((730, 775), 'numpy.array', 'np.array', (['[[1, dt, 0], [0, 1, dt], [0, 0, 1]]'], {}), '([[1, dt, 0], [0, 1, dt], [0, 0, 1]])\n', (738, 775), True, 'import numpy as np\n'), ((889, 932), 'numpy.array', 'np.array', (['[[q, q, 0], [q, q, 0], [0, 0, 0]]'], {}), '([[q, q, 0], [q, q, 0], [0, 0, 0]])\n', (897, 932), True, 'import numpy as np\n'), ((1028, 1060), 'classes.KalmanFilter', 'KalmanFilter', ([], {'F': 'F', 'H': 'H', 'Q': 'Q', 'R': 'R'}), '(F=F, H=H, Q=Q, R=R)\n', (1040, 1060), False, 'from classes import Debug, KalmanFilter\n'), ((2931, 2950), 'numpy.array', 'np.array', (['init_data'], {}), '(init_data)\n', (2939, 2950), True, 'import numpy as np\n'), ((3032, 3057), 'numpy.median', 'np.median', (['offset'], {'axis': '(0)'}), '(offset, axis=0)\n', (3041, 3057), True, 'import numpy as np\n'), ((788, 807), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (796, 807), True, 'import numpy as np\n'), ((962, 977), 'numpy.array', 'np.array', (['[0.8]'], {}), '([0.8])\n', (970, 977), True, 'import numpy as np\n')]
|
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace
from dotenv import find_dotenv, load_dotenv
from werkzeug import run_simple
def parse_args() -> Namespace:
parser_kw = {'formatter_class': ArgumentDefaultsHelpFormatter}
parser = ArgumentParser(description='Chitty auxiliary web service')
subparsers = parser.add_subparsers(help='Available commands')
run_parser = subparsers.add_parser('run', help='Launch the server', **parser_kw)
run_parser.add_argument(
'-H', '--host', default='127.0.0.1', help='IP address to bind to'
)
run_parser.add_argument(
'-p', '--port', type=int, default=5001, help='port number to bind to'
)
return parser.parse_args()
def main():
load_dotenv(find_dotenv())
from .app import make_app
application = make_app()
opts = parse_args()
run_simple(opts.host, opts.port, application, use_reloader=True, use_debugger=False)
|
[
"werkzeug.run_simple",
"argparse.ArgumentParser",
"dotenv.find_dotenv"
] |
[((268, 326), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Chitty auxiliary web service"""'}), "(description='Chitty auxiliary web service')\n", (282, 326), False, 'from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace\n'), ((863, 951), 'werkzeug.run_simple', 'run_simple', (['opts.host', 'opts.port', 'application'], {'use_reloader': '(True)', 'use_debugger': '(False)'}), '(opts.host, opts.port, application, use_reloader=True,\n use_debugger=False)\n', (873, 951), False, 'from werkzeug import run_simple\n'), ((761, 774), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (772, 774), False, 'from dotenv import find_dotenv, load_dotenv\n')]
|
import json
from django.urls import reverse
from rest_framework.views import status
from rest_framework.test import APITestCase, APIClient
class CommentsTestCase(APITestCase):
def setUp(self):
self.client = APIClient()
self.signup_url = reverse('authentication:auth-register')
self.create_article_url = reverse('articles:articles-listcreate')
self.user_two_details = {
"user": {
"username": "andela",
"email": "<EMAIL>",
"password": "<PASSWORD>"
}}
self.create_article_data = {
"title": "Programming Languages",
"body": "There are variety of programming languagr",
"description": "Programming",
"tagList": ["Programming", "language", "python"]
}
self.highlighted_text = {
"comment": {
"body": "Good work here!!",
"start_highlight_position": 2,
"end_highlight_position": 15
}}
self.selection_start_index_larger_than_end_index = {
"comment": {
"body": "Good work here!!",
"start_highlight_position": 28,
"end_highlight_position": 15
}}
self.invalid_index_datatype = {
"comment": {
"body": "Good work here!!",
"start_highlight_position": "one",
"end_highlight_position": 15
}}
self.missing_field = {
"comment": {
"body": "Good work here!!",
"end_highlight_position": 15
}}
self.update_comment = {
"comment": {
"body": "Nice Idea"
}}
def register_user(self, user_details):
"""Sign up a new user to get a token"""
register = self.client.post(self.signup_url,
user_details,
format='json')
token = register.data["token"]
return token
def create_article(self, token):
"""Create an article."""
response = self.client.post(
self.create_article_url,
self.create_article_data,
format='json',
HTTP_AUTHORIZATION='token {}'.format(token))
slug = response.data['slug']
return slug
def test_comment_highlighted_text(self):
"""Test comment highlighted text."""
token = self.register_user(self.user_two_details)
slug = self.create_article(token)
response = self.client.post(
reverse('articles:high_light', kwargs={'slug': slug}),
self.highlighted_text,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
self.assertIn('selected_text', response.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_rejects_start_index_larger_than_end_index(self):
"""Test rejects start index larger than end index."""
token = self.register_user(self.user_two_details)
slug = self.create_article(token)
response = self.client.post(
reverse('articles:high_light', kwargs={'slug': slug}),
self.selection_start_index_larger_than_end_index,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
self.assertEqual(response.data['error'],
'The start_index_position should not '
'be greater or equal end_index_position')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_rejects_invalid_types_for_highlight_index(self):
"""Test rejects index data type that are not integers."""
token = self.register_user(self.user_two_details)
slug = self.create_article(token)
response = self.client.post(
reverse('articles:high_light', kwargs={'slug': slug}),
self.invalid_index_datatype,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
self.assertEqual(response.data['error'],
'Start of highlight and end of highlight'
' indices should be both integers')
self.assertEqual(response.status_code,
status.HTTP_422_UNPROCESSABLE_ENTITY)
def test_rejects_missing_required_field(self):
"""Test for missing field."""
token = self.register_user(self.user_two_details)
slug = self.create_article(token)
response = self.client.post(
reverse('articles:high_light', kwargs={'slug': slug}),
self.missing_field,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
self.assertEqual(response.data['error'],
'start_highlight_position is required')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_get_all_comments(self):
"""Test get all comments."""
token = self.register_user(self.user_two_details)
# create an article
response = self.client.post(
self.create_article_url,
self.create_article_data,
format='json',
HTTP_AUTHORIZATION='token {}'.format(token))
slug = response.data['slug']
# highlight a text and comment it
self.client.post(
reverse('articles:high_light', kwargs={'slug': slug}),
self.highlighted_text,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
# get all comments
response = self.client.get(
reverse('articles:high_light', kwargs={'slug': slug}),
format='json')
response_data = json.loads(json.dumps(response.data))
self.assertIn('selected_text', response_data[0])
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_single_comments(self):
"""Test get single comments."""
token = self.register_user(self.user_two_details)
# create an article
response = self.client.post(
self.create_article_url,
self.create_article_data,
format='json',
HTTP_AUTHORIZATION='token {}'.format(token))
slug = response.data['slug']
# highlight a text and comment it
response = self.client.post(
reverse('articles:high_light', kwargs={'slug': slug}),
self.highlighted_text,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
# get single comment
article_id = response.data['id']
response = self.client.get(
'/api/articles/{}/highlight/{}'.format(slug, article_id),
format='json')
response_data = json.loads(json.dumps(response.data))
self.assertIn('selected_text', response_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_single_comments(self):
"""Test delete single comments."""
token = self.register_user(self.user_two_details)
# create an article
response = self.client.post(
self.create_article_url,
self.create_article_data,
format='json',
HTTP_AUTHORIZATION='token {}'.format(token))
slug = response.data['slug']
# highlight a text and comment it
response = self.client.post(
reverse('articles:high_light', kwargs={'slug': slug}),
self.highlighted_text,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
# delete single comment
article_id = response.data['id']
response = self.client.delete(
'/api/articles/{}/highlight/{}'.format(slug, article_id),
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
response_data = json.loads(json.dumps(response.data))
self.assertEqual(response.data['message'],
'Comment on highlighted text deleted successfully')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_update_single_comments(self):
"""Test update single comment."""
token = self.register_user(self.user_two_details)
# create an article
response = self.client.post(
self.create_article_url,
self.create_article_data,
format='json',
HTTP_AUTHORIZATION='token {}'.format(token))
slug = response.data['slug']
# highlight a text and comment on it
response = self.client.post(
reverse('articles:high_light', kwargs={'slug': slug}),
self.highlighted_text,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
article_id = response.data['id']
# update the comment
response = self.client.put(
'/api/articles/{}/highlight/{}'.format(slug, article_id),
self.update_comment,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
response_data = json.loads(json.dumps(response.data))
self.assertIn('selected_text', response_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_update_unexisting_comments(self):
"""Test update unexisting comment."""
token = self.register_user(self.user_two_details)
# create an article
response = self.client.post(
self.create_article_url,
self.create_article_data,
format='json',
HTTP_AUTHORIZATION='token {}'.format(token))
slug = response.data['slug']
# update the comment
response = self.client.put(
'/api/articles/{}/highlight/{}'.format(slug, 2),
self.update_comment,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
response_data = json.loads(json.dumps(response.data))
self.assertEqual(response.data['error'], 'The comment does not exist')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_get_delete_unexisting_comments(self):
"""Delete unexisting comment"""
token = self.register_user(self.user_two_details)
# create an article
response = self.client.post(
self.create_article_url,
self.create_article_data,
format='json',
HTTP_AUTHORIZATION='token {}'.format(token))
slug = response.data['slug']
# update the comment
response = self.client.delete(
'/api/articles/{}/highlight/{}'.format(slug, 2),
self.update_comment,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
response_data = json.loads(json.dumps(response.data))
self.assertEqual(response.data["error"], "The comment does not exist")
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
|
[
"django.urls.reverse",
"rest_framework.test.APIClient",
"json.dumps"
] |
[((221, 232), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (230, 232), False, 'from rest_framework.test import APITestCase, APIClient\n'), ((259, 298), 'django.urls.reverse', 'reverse', (['"""authentication:auth-register"""'], {}), "('authentication:auth-register')\n", (266, 298), False, 'from django.urls import reverse\n'), ((333, 372), 'django.urls.reverse', 'reverse', (['"""articles:articles-listcreate"""'], {}), "('articles:articles-listcreate')\n", (340, 372), False, 'from django.urls import reverse\n'), ((2665, 2718), 'django.urls.reverse', 'reverse', (['"""articles:high_light"""'], {'kwargs': "{'slug': slug}"}), "('articles:high_light', kwargs={'slug': slug})\n", (2672, 2718), False, 'from django.urls import reverse\n'), ((3239, 3292), 'django.urls.reverse', 'reverse', (['"""articles:high_light"""'], {'kwargs': "{'slug': slug}"}), "('articles:high_light', kwargs={'slug': slug})\n", (3246, 3292), False, 'from django.urls import reverse\n'), ((3965, 4018), 'django.urls.reverse', 'reverse', (['"""articles:high_light"""'], {'kwargs': "{'slug': slug}"}), "('articles:high_light', kwargs={'slug': slug})\n", (3972, 4018), False, 'from django.urls import reverse\n'), ((4671, 4724), 'django.urls.reverse', 'reverse', (['"""articles:high_light"""'], {'kwargs': "{'slug': slug}"}), "('articles:high_light', kwargs={'slug': slug})\n", (4678, 4724), False, 'from django.urls import reverse\n'), ((5517, 5570), 'django.urls.reverse', 'reverse', (['"""articles:high_light"""'], {'kwargs': "{'slug': slug}"}), "('articles:high_light', kwargs={'slug': slug})\n", (5524, 5570), False, 'from django.urls import reverse\n'), ((5775, 5828), 'django.urls.reverse', 'reverse', (['"""articles:high_light"""'], {'kwargs': "{'slug': slug}"}), "('articles:high_light', kwargs={'slug': slug})\n", (5782, 5828), False, 'from django.urls import reverse\n'), ((5892, 5917), 'json.dumps', 'json.dumps', (['response.data'], {}), '(response.data)\n', (5902, 5917), False, 'import json\n'), ((6548, 6601), 'django.urls.reverse', 'reverse', (['"""articles:high_light"""'], {'kwargs': "{'slug': slug}"}), "('articles:high_light', kwargs={'slug': slug})\n", (6555, 6601), False, 'from django.urls import reverse\n'), ((6979, 7004), 'json.dumps', 'json.dumps', (['response.data'], {}), '(response.data)\n', (6989, 7004), False, 'import json\n'), ((7639, 7692), 'django.urls.reverse', 'reverse', (['"""articles:high_light"""'], {'kwargs': "{'slug': slug}"}), "('articles:high_light', kwargs={'slug': slug})\n", (7646, 7692), False, 'from django.urls import reverse\n'), ((8116, 8141), 'json.dumps', 'json.dumps', (['response.data'], {}), '(response.data)\n', (8126, 8141), False, 'import json\n'), ((8843, 8896), 'django.urls.reverse', 'reverse', (['"""articles:high_light"""'], {'kwargs': "{'slug': slug}"}), "('articles:high_light', kwargs={'slug': slug})\n", (8850, 8896), False, 'from django.urls import reverse\n'), ((9346, 9371), 'json.dumps', 'json.dumps', (['response.data'], {}), '(response.data)\n', (9356, 9371), False, 'import json\n'), ((10187, 10212), 'json.dumps', 'json.dumps', (['response.data'], {}), '(response.data)\n', (10197, 10212), False, 'import json\n'), ((11065, 11090), 'json.dumps', 'json.dumps', (['response.data'], {}), '(response.data)\n', (11075, 11090), False, 'import json\n')]
|
from pyscf import lib, scf
#from pyphf import guess, suscf
from automr import autocas, guess
lib.num_threads(8)
xyz = '''C -2.94294278 0.39039038 0.00000000
C -1.54778278 0.39039038 0.00000000
C -0.85024478 1.59814138 0.00000000
C -1.54789878 2.80665038 -0.00119900
C -2.94272378 2.80657238 -0.00167800
C -3.64032478 1.59836638 -0.00068200
H -3.49270178 -0.56192662 0.00045000
H -0.99827478 -0.56212262 0.00131500
H 0.24943522 1.59822138 0.00063400
H -0.99769878 3.75879338 -0.00125800
H -3.49284578 3.75885338 -0.00263100
H -4.73992878 1.59854938 -0.00086200
'''
bas = 'def2-svp'
mf = guess.mix(xyz, bas, conv='tight')
mf2 = autocas.cas(mf)
|
[
"automr.guess.mix",
"automr.autocas.cas",
"pyscf.lib.num_threads"
] |
[((94, 112), 'pyscf.lib.num_threads', 'lib.num_threads', (['(8)'], {}), '(8)\n', (109, 112), False, 'from pyscf import lib, scf\n'), ((857, 890), 'automr.guess.mix', 'guess.mix', (['xyz', 'bas'], {'conv': '"""tight"""'}), "(xyz, bas, conv='tight')\n", (866, 890), False, 'from automr import autocas, guess\n'), ((898, 913), 'automr.autocas.cas', 'autocas.cas', (['mf'], {}), '(mf)\n', (909, 913), False, 'from automr import autocas, guess\n')]
|
#!/bin/python3
from gym import spaces
from gym.envs.registration import register
from frobs_rl.envs import robot_BasicEnv
import rospy
#- Uncomment the library modules as neeeed
# from frobs_rl.common import ros_gazebo
# from frobs_rl.common import ros_controllers
# from frobs_rl.common import ros_node
# from frobs_rl.common import ros_launch
# from frobs_rl.common import ros_params
# from frobs_rl.common import ros_urdf
# from frobs_rl.common import ros_spawn
"""
Although it is best to register only the task environment, one can also register the
robot environment.
"""
# register(
# id='CustomRobotEnv-v0',
# entry_point='frobs_rl.templates.CustomRobotEnv:CustomRobotEnv',
# max_episode_steps=10000,
# )
class CustomRobotEnv(robot_BasicEnv.RobotBasicEnv):
"""
Custom Robot Env, use this for all task envs using the custom robot.
"""
def __init__(self):
"""
Describe the robot used in the env.
"""
rospy.loginfo("Starting Custom Robot Env")
"""
If launching Gazebo with the env then set the corresponding environment variables.
"""
launch_gazebo=False
gazebo_init_paused=True
gazebo_use_gui=True
gazebo_recording=False
gazebo_freq=100
gazebo_max_freq=None
gazebo_timestep=None
"""
If launching Gazebo with a custom world then set the corresponding environment variables.
"""
world_path=None
world_pkg=None
world_filename=None
"""
If spawning the robot using the given spawner then set the corresponding environment variables.
"""
spawn_robot=False
model_name_in_gazebo="robot"
namespace="/robot"
pkg_name=None
urdf_file=None
urdf_folder="/urdf"
controller_file=None
controller_list=None
urdf_xacro_args=None
rob_state_publisher_max_freq= None
model_pos_x=0.0; model_pos_y=0.0; model_pos_z=0.0
model_ori_x=0.0; model_ori_y=0.0; model_ori_z=0.0; model_ori_w=0.0
"""
Set if the controllers in "controller_list" will be reset at the beginning of each episode, default is False.
"""
reset_controllers=False
"""
Set the reset mode of gazebo at the beginning of each episode: 1 is "reset_world", 2 is "reset_simulation". Default is 1.
"""
reset_mode=1
"""
Set the step mode of Gazebo. 1 is "using ROS services", 2 is "using step function of Gazebo". Default is 1.
If using the step mode 2 then set the number of steps of Gazebo to take in each episode. Default is 1.
"""
step_mode=1
num_gazebo_steps=1
"""
Init the parent class with the corresponding variables.
"""
super(CustomRobotEnv, self).__init__( launch_gazebo=launch_gazebo, gazebo_init_paused=gazebo_init_paused,
gazebo_use_gui=gazebo_use_gui, gazebo_recording=gazebo_recording, gazebo_freq=gazebo_freq, world_path=world_path,
world_pkg=world_pkg, world_filename=world_filename, gazebo_max_freq=gazebo_max_freq, gazebo_timestep=gazebo_timestep,
spawn_robot=spawn_robot, model_name_in_gazebo=model_name_in_gazebo, namespace=namespace, pkg_name=pkg_name,
urdf_file=urdf_file, urdf_folder=urdf_folder, controller_file=controller_file, controller_list=controller_list,
urdf_xacro_args=urdf_xacro_args, rob_state_publisher_max_freq= rob_state_publisher_max_freq,
model_pos_x=model_pos_x, model_pos_y=model_pos_y, model_pos_z=model_pos_z,
model_ori_x=model_ori_x, model_ori_y=model_ori_y, model_ori_z=model_ori_z, model_ori_w=model_ori_w,
reset_controllers=reset_controllers, reset_mode=reset_mode, step_mode=step_mode, num_gazebo_steps=num_gazebo_steps)
"""
Define publisher or subscribers as needed.
"""
# self.pub1 = rospy.Publisher('/robot/controller_manager/command', JointState, queue_size=1)
# self.sub1 = rospy.Subscriber('/robot/joint_states', JointState, self.callback1)
"""
If using the __check_subs_and_pubs_connection method, then un-comment the lines below.
"""
# ros_gazebo.gazebo_unpause_physics()
# self._check_subs_and_pubs_connection()
# ros_gazebo.gazebo_pause_physics()
"""
Finished __init__ method
"""
rospy.loginfo("Finished Init of Custom Robot env")
#------------------------------------------#
# Custom methods for the CustomRobotEnv #
def _check_subs_and_pubs_connection(self):
"""
Function to check if the Gazebo and ROS connections are ready
"""
return True
#-------------------------------------------------------#
# Custom available methods for the CustomRobotEnv #
# Although it is best to implement these methods in #
# the Task Env, one can use them here if needed. #
def _send_action(self, action):
"""
Function to send an action to the robot
"""
raise NotImplementedError()
def _get_observation(self):
"""
Function to get the observation from the enviroment.
"""
raise NotImplementedError()
def _get_reward(self):
"""
Function to get the reward from the enviroment.
"""
raise NotImplementedError()
def _check_if_done(self):
"""
Function to check if the episode is done.
If the episode has a success condition then set done as:
self.info['is_success'] = 1.0
"""
raise NotImplementedError()
def _set_episode_init_params(self):
"""
Function to set some parameters, like the position of the robot, at the begining of each episode.
"""
raise NotImplementedError()
|
[
"rospy.loginfo"
] |
[((986, 1028), 'rospy.loginfo', 'rospy.loginfo', (['"""Starting Custom Robot Env"""'], {}), "('Starting Custom Robot Env')\n", (999, 1028), False, 'import rospy\n'), ((4582, 4632), 'rospy.loginfo', 'rospy.loginfo', (['"""Finished Init of Custom Robot env"""'], {}), "('Finished Init of Custom Robot env')\n", (4595, 4632), False, 'import rospy\n')]
|
from os import path
import os
import matplotlib.pyplot as plt
import numpy as np
import autofit as af
"""
The `analysis.py` module contains the dataset and log likelihood function which given a model instance (set up by
the non-linear search) fits the dataset and returns the log likelihood of that model.
"""
class Analysis(af.Analysis):
def __init__(self, data: np.ndarray, noise_map:np.ndarray):
"""
In this example the `Analysis` object only contains the data and noise-map. It can be easily extended,
for more complex data-sets and model fitting problems.
Parameters
----------
data
A 1D numpy array containing the data (e.g. a noisy 1D Gaussian) fitted in the workspace examples.
noise_map
A 1D numpy array containing the noise values of the data, used for computing the goodness of fit
metric.
"""
super().__init__()
self.data = data
self.noise_map = noise_map
def log_likelihood_function(self, instance: af.ModelInstance) -> float:
"""
Determine the log likelihood of a fit of multiple profiles to the dataset.
Parameters
----------
instance : af.Collection
The model instances of the profiles.
Returns
-------
The log likelihood value indicating how well this model fit the dataset.
"""
xvalues = np.arange(self.data.shape[0])
try:
model_data_1d = sum(
profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance
)
except TypeError:
model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues)
residual_map = self.data - model_data_1d
chi_squared_map = (residual_map / self.noise_map) ** 2.0
log_likelihood = -0.5 * sum(chi_squared_map)
return log_likelihood
def visualize(self, paths: af.DirectoryPaths, instance: af.ModelInstance, during_analysis : bool):
"""
During a model-fit, the `visualize` method is called throughout the non-linear search and is used to output
images indicating the quality of the fit so far..
The `instance` passed into the visualize method is maximum log likelihood solution obtained by the model-fit
so far and it can be used to provide on-the-fly images showing how the model-fit is going.
For your model-fitting problem this function will be overwritten with plotting functions specific to your
problem.
Parameters
----------
paths
The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored,
visualization, and the pickled objects used by the aggregator output by this function.
instance
An instance of the model that is being fitted to the data by this analysis (whose parameters have been set
via a non-linear search).
during_analysis
If True the visualization is being performed midway through the non-linear search before it is finished,
which may change which images are output.
"""
xvalues = np.arange(self.data.shape[0])
try:
model_data_1d = sum(
profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance
)
except TypeError:
model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues)
plt.errorbar(
x=xvalues,
y=self.data,
yerr=self.noise_map,
color="k",
ecolor="k",
elinewidth=1,
capsize=2,
)
plt.plot(range(self.data.shape[0]), model_data_1d, color="r")
plt.title("Dynesty model fit to 1D Gaussian + Exponential dataset.")
plt.xlabel("x values of profile")
plt.ylabel("Profile normalization")
os.makedirs(paths.image_path, exist_ok=True)
plt.savefig(path.join(paths.image_path, "model_fit.png"))
plt.clf()
|
[
"matplotlib.pyplot.title",
"os.makedirs",
"matplotlib.pyplot.clf",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"matplotlib.pyplot.errorbar"
] |
[((1484, 1513), 'numpy.arange', 'np.arange', (['self.data.shape[0]'], {}), '(self.data.shape[0])\n', (1493, 1513), True, 'import numpy as np\n'), ((3328, 3357), 'numpy.arange', 'np.arange', (['self.data.shape[0]'], {}), '(self.data.shape[0])\n', (3337, 3357), True, 'import numpy as np\n'), ((3644, 3754), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': 'xvalues', 'y': 'self.data', 'yerr': 'self.noise_map', 'color': '"""k"""', 'ecolor': '"""k"""', 'elinewidth': '(1)', 'capsize': '(2)'}), "(x=xvalues, y=self.data, yerr=self.noise_map, color='k', ecolor\n ='k', elinewidth=1, capsize=2)\n", (3656, 3754), True, 'import matplotlib.pyplot as plt\n'), ((3933, 4001), 'matplotlib.pyplot.title', 'plt.title', (['"""Dynesty model fit to 1D Gaussian + Exponential dataset."""'], {}), "('Dynesty model fit to 1D Gaussian + Exponential dataset.')\n", (3942, 4001), True, 'import matplotlib.pyplot as plt\n'), ((4011, 4044), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x values of profile"""'], {}), "('x values of profile')\n", (4021, 4044), True, 'import matplotlib.pyplot as plt\n'), ((4054, 4089), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Profile normalization"""'], {}), "('Profile normalization')\n", (4064, 4089), True, 'import matplotlib.pyplot as plt\n'), ((4101, 4145), 'os.makedirs', 'os.makedirs', (['paths.image_path'], {'exist_ok': '(True)'}), '(paths.image_path, exist_ok=True)\n', (4112, 4145), False, 'import os\n'), ((4222, 4231), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4229, 4231), True, 'import matplotlib.pyplot as plt\n'), ((4167, 4211), 'os.path.join', 'path.join', (['paths.image_path', '"""model_fit.png"""'], {}), "(paths.image_path, 'model_fit.png')\n", (4176, 4211), False, 'from os import path\n')]
|
import logging
import uuid
import luigi
from luigi.task import flatten_output
from luigi.parameter import ParameterVisibility
logger = logging.getLogger('luigi-interface')
class IlluminaFastqHeader:
@classmethod
def parse(cls, s):
pieces = s.split(':')
if len(pieces) == 5:
device, flowcell_lane, tile, x, y = pieces
return cls(device, flowcell_lane=flowcell_lane, tile=tile, x=x, y=y)
elif len(pieces) == 7:
return cls(*pieces)
else:
raise TypeError('Unsupported Illumina FASTQ header format {}.'.format(s))
def __init__(self, device, run=None, flowcell=None, flowcell_lane=None, tile=None, x=None, y=None):
self.device = device
self.run = run
self.flowcell = flowcell
self.flowcell_lane = flowcell_lane
self.tile = tile
self.x = x
self.y = y
@property
def batch_factor(self):
if self.flowcell is None:
return self.device, self.flowcell_lane
return self.device, self.flowcell, self.flowcell_lane
def parse_illumina_fastq_header(s):
return IlluminaFastqHeader(*s.split(':'))
def max_retry(count):
"""
Set the maximum number of time a task can be retried before being disabled
as per Luigi retry policy.
"""
def wrapper(cls):
cls.retry_count = count
return cls
return wrapper
no_retry = max_retry(0)
class TaskWithPriorityMixin:
"""Mixin that adds a --priority flag to a given task."""
priority = luigi.IntParameter(default=0, positional=False, significant=False)
class RerunnableTaskMixin:
"""
Mixin for a task that can be rerun regardless of its completion status.
"""
rerun = luigi.BoolParameter(default=False, positional=False, significant=False)
def __init__(self, *kwargs, **kwds):
super().__init__(*kwargs, **kwds)
self._has_rerun = False
def run(self):
try:
return super().run()
finally:
self._has_rerun = True
def complete(self):
return (not self.rerun or self._has_rerun) and super().complete()
class CheckAfterCompleteMixin:
"""Ensures that a task is completed after a successful run()."""
def run(self):
ret = super().run()
if not self.complete():
raise RuntimeError('{} is not completed after successful run().'.format(repr(self)))
return ret
def remove_task_output(task):
logger.info('Cleaning up %s...', repr(task))
for out in flatten_output(task):
if hasattr(out, 'remove') and out.exists():
try:
out.remove()
logger.info('Removed %s.', repr(out))
except:
logger.exception('Failed to remove %s.', repr(out))
|
[
"luigi.task.flatten_output",
"luigi.BoolParameter",
"luigi.IntParameter",
"logging.getLogger"
] |
[((137, 173), 'logging.getLogger', 'logging.getLogger', (['"""luigi-interface"""'], {}), "('luigi-interface')\n", (154, 173), False, 'import logging\n'), ((1541, 1607), 'luigi.IntParameter', 'luigi.IntParameter', ([], {'default': '(0)', 'positional': '(False)', 'significant': '(False)'}), '(default=0, positional=False, significant=False)\n', (1559, 1607), False, 'import luigi\n'), ((1740, 1811), 'luigi.BoolParameter', 'luigi.BoolParameter', ([], {'default': '(False)', 'positional': '(False)', 'significant': '(False)'}), '(default=False, positional=False, significant=False)\n', (1759, 1811), False, 'import luigi\n'), ((2536, 2556), 'luigi.task.flatten_output', 'flatten_output', (['task'], {}), '(task)\n', (2550, 2556), False, 'from luigi.task import flatten_output\n')]
|
import numpy as np
import pandas as pd
from veneer.pest_runtime import *
from veneer.manage import start,kill_all_now
import pyapprox as pya
from functools import partial
from pyapprox.adaptive_sparse_grid import max_level_admissibility_function
from pyapprox.adaptive_polynomial_chaos import variance_pce_refinement_indicator
from pyapprox.univariate_quadrature import clenshaw_curtis_rule_growth
from pyapprox.variable_transformations import AffineRandomVariableTransformation
from funcs.read_data import variables_prep, file_settings
from funcs.modeling_funcs import vs_settings, \
modeling_settings, paralell_vs, obtain_initials, change_param_values
# Create the copy of models and veneer list
project_name = 'MW_BASE_RC10.rsproj'
veneer_name = 'vcmd45\\FlowMatters.Source.VeneerCmd.exe'
first_port=15000; num_copies = 8
_, things_to_record, _, _, _ = modeling_settings()
processes, ports = paralell_vs(first_port, num_copies, project_name, veneer_name)
vs_list = vs_settings(ports, things_to_record)
# obtain the initial values of parameters
initial_values = obtain_initials(vs_list[0])
def run_source_lsq(vars, vs_list=vs_list):
"""
Script used to run_source and return the output file.
The function is called by AdaptiveLejaPCE.
"""
from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble
import spotpy as sp
print('Read Parameters')
parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index')
# Define objective functions
# Use annual or monthly loads
def timeseries_sum(df, temp_scale = 'annual'):
"""
Obtain the sum of timeseries of different temporal scale.
temp_scale: str, default is 'Y', monthly using 'M'
"""
assert temp_scale in ['monthly', 'annual'], 'The temporal scale given is not supported.'
if temp_scale == 'monthly':
sum_126001A = df.resample('M').sum()
else:
month_126001A = df.resample('M').sum()
sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year),
columns=df.columns)
for i in range(sum_126001A.shape[0]):
sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12, :].sum()
return sum_126001A
# End timeseries_sum()
# import observation if the output.txt requires the use of obs.
date_range = pd.to_datetime(['2009/07/01', '2018/06/30'])
observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date')
observed_din.index = pd.to_datetime(observed_din.index)
observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x)
# loop over the vars and try to use parallel
parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short)
for i in range(vars.shape[1]):
parameter_df.iloc[i] = vars[:, i]
# set the time period of the results
retrieve_time = [pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')]
# define the modeling period and the recording variables
_, _, criteria, start_date, end_date = modeling_settings()
din = generate_observation_ensemble(vs_list,
criteria, start_date, end_date, parameter_df, retrieve_time)
# obtain the sum at a given temporal scale
# din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]])
din_126001A = timeseries_sum(din, temp_scale = 'annual')
obs_din = timeseries_sum(observed_din, temp_scale = 'annual')
din_126001A = pd.DataFrame(din_126001A,dtype='float').values
obs_din = pd.DataFrame(obs_din,dtype='float').values
# breakpoint()
resid = din_126001A - obs_din
rmse = (np.mean(resid ** 2, axis=0)) ** 0.5
if rmse[0] == 0: rmse[0] = 1e-8
rmse = rmse.reshape(rmse.shape[0], 1)
print(f'Finish {rmse.shape[0]} run')
return rmse
# END run_source_lsq()
# read parameter distributions
datapath = file_settings()[1]
para_info = pd.read_csv(datapath + 'Parameters-PCE.csv')
# define the variables for PCE
param_file = file_settings()[-1]
ind_vars, variable = variables_prep(param_file, product_uniform='uniform', dummy=False)
var_trans = AffineRandomVariableTransformation(variable, enforce_bounds=True)
# Create PyApprox model
n_candidate_samples = 10000
candidate_samples = -np.cos(np.pi*pya.sobol_sequence(var_trans.num_vars(),
n_candidate_samples))
pce = pya.AdaptiveLejaPCE(var_trans.num_vars(), candidate_samples=candidate_samples)
# Define criteria
max_level = 6
err_tol = 1e-8
max_num_samples = 100
max_level_1d = [max_level]*(pce.num_vars)
admissibility_function = partial(
max_level_admissibility_function, max_level, max_level_1d,
max_num_samples, err_tol)
refinement_indicator = variance_pce_refinement_indicator
pce.set_function(run_source_lsq, var_trans)
pce.set_refinement_functions(
refinement_indicator,
admissibility_function,
clenshaw_curtis_rule_growth
)
# Generate emulator
pce.build()
# store PCE
import pickle
pickle.dump(pce, open(f'{file_settings()[0]}\pce-rmse.pkl', "wb"))
# set the parameter values to initial values
for vs in vs_list:
vs = change_param_values(vs, initial_values, fromList=True)
kill_all_now(processes)
|
[
"pandas.DataFrame",
"functools.partial",
"pandas.Timestamp",
"funcs.modeling_funcs.modeling_settings",
"pyapprox.variable_transformations.AffineRandomVariableTransformation",
"pandas.read_csv",
"funcs.read_data.variables_prep",
"funcs.read_data.file_settings",
"funcs.modeling_funcs.generate_observation_ensemble",
"veneer.manage.kill_all_now",
"pandas.to_datetime",
"numpy.mean",
"numpy.arange",
"funcs.modeling_funcs.vs_settings",
"funcs.modeling_funcs.change_param_values",
"funcs.modeling_funcs.obtain_initials",
"funcs.modeling_funcs.paralell_vs"
] |
[((872, 891), 'funcs.modeling_funcs.modeling_settings', 'modeling_settings', ([], {}), '()\n', (889, 891), False, 'from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble\n'), ((911, 973), 'funcs.modeling_funcs.paralell_vs', 'paralell_vs', (['first_port', 'num_copies', 'project_name', 'veneer_name'], {}), '(first_port, num_copies, project_name, veneer_name)\n', (922, 973), False, 'from funcs.modeling_funcs import vs_settings, modeling_settings, paralell_vs, obtain_initials, change_param_values\n'), ((985, 1021), 'funcs.modeling_funcs.vs_settings', 'vs_settings', (['ports', 'things_to_record'], {}), '(ports, things_to_record)\n', (996, 1021), False, 'from funcs.modeling_funcs import vs_settings, modeling_settings, paralell_vs, obtain_initials, change_param_values\n'), ((1082, 1109), 'funcs.modeling_funcs.obtain_initials', 'obtain_initials', (['vs_list[0]'], {}), '(vs_list[0])\n', (1097, 1109), False, 'from funcs.modeling_funcs import vs_settings, modeling_settings, paralell_vs, obtain_initials, change_param_values\n'), ((4073, 4117), 'pandas.read_csv', 'pd.read_csv', (["(datapath + 'Parameters-PCE.csv')"], {}), "(datapath + 'Parameters-PCE.csv')\n", (4084, 4117), True, 'import pandas as pd\n'), ((4204, 4270), 'funcs.read_data.variables_prep', 'variables_prep', (['param_file'], {'product_uniform': '"""uniform"""', 'dummy': '(False)'}), "(param_file, product_uniform='uniform', dummy=False)\n", (4218, 4270), False, 'from funcs.read_data import variables_prep, file_settings\n'), ((4283, 4348), 'pyapprox.variable_transformations.AffineRandomVariableTransformation', 'AffineRandomVariableTransformation', (['variable'], {'enforce_bounds': '(True)'}), '(variable, enforce_bounds=True)\n', (4317, 4348), False, 'from pyapprox.variable_transformations import AffineRandomVariableTransformation\n'), ((4746, 4842), 'functools.partial', 'partial', (['max_level_admissibility_function', 'max_level', 'max_level_1d', 'max_num_samples', 'err_tol'], {}), '(max_level_admissibility_function, max_level, max_level_1d,\n max_num_samples, err_tol)\n', (4753, 4842), False, 'from functools import partial\n'), ((5327, 5350), 'veneer.manage.kill_all_now', 'kill_all_now', (['processes'], {}), '(processes)\n', (5339, 5350), False, 'from veneer.manage import start, kill_all_now\n'), ((1431, 1491), 'pandas.read_csv', 'pd.read_csv', (['"""../data/Parameters-PCE.csv"""'], {'index_col': '"""Index"""'}), "('../data/Parameters-PCE.csv', index_col='Index')\n", (1442, 1491), True, 'import pandas as pd\n'), ((2415, 2459), 'pandas.to_datetime', 'pd.to_datetime', (["['2009/07/01', '2018/06/30']"], {}), "(['2009/07/01', '2018/06/30'])\n", (2429, 2459), True, 'import pandas as pd\n'), ((2570, 2604), 'pandas.to_datetime', 'pd.to_datetime', (['observed_din.index'], {}), '(observed_din.index)\n', (2584, 2604), True, 'import pandas as pd\n'), ((3194, 3213), 'funcs.modeling_funcs.modeling_settings', 'modeling_settings', ([], {}), '()\n', (3211, 3213), False, 'from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble\n'), ((3224, 3327), 'funcs.modeling_funcs.generate_observation_ensemble', 'generate_observation_ensemble', (['vs_list', 'criteria', 'start_date', 'end_date', 'parameter_df', 'retrieve_time'], {}), '(vs_list, criteria, start_date, end_date,\n parameter_df, retrieve_time)\n', (3253, 3327), False, 'from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble\n'), ((4042, 4057), 'funcs.read_data.file_settings', 'file_settings', ([], {}), '()\n', (4055, 4057), False, 'from funcs.read_data import variables_prep, file_settings\n'), ((4163, 4178), 'funcs.read_data.file_settings', 'file_settings', ([], {}), '()\n', (4176, 4178), False, 'from funcs.read_data import variables_prep, file_settings\n'), ((5271, 5325), 'funcs.modeling_funcs.change_param_values', 'change_param_values', (['vs', 'initial_values'], {'fromList': '(True)'}), '(vs, initial_values, fromList=True)\n', (5290, 5325), False, 'from funcs.modeling_funcs import vs_settings, modeling_settings, paralell_vs, obtain_initials, change_param_values\n'), ((3033, 3059), 'pandas.Timestamp', 'pd.Timestamp', (['"""2009-07-01"""'], {}), "('2009-07-01')\n", (3045, 3059), True, 'import pandas as pd\n'), ((3061, 3087), 'pandas.Timestamp', 'pd.Timestamp', (['"""2018-06-30"""'], {}), "('2018-06-30')\n", (3073, 3087), True, 'import pandas as pd\n'), ((3633, 3673), 'pandas.DataFrame', 'pd.DataFrame', (['din_126001A'], {'dtype': '"""float"""'}), "(din_126001A, dtype='float')\n", (3645, 3673), True, 'import pandas as pd\n'), ((3694, 3730), 'pandas.DataFrame', 'pd.DataFrame', (['obs_din'], {'dtype': '"""float"""'}), "(obs_din, dtype='float')\n", (3706, 3730), True, 'import pandas as pd\n'), ((3803, 3830), 'numpy.mean', 'np.mean', (['(resid ** 2)'], {'axis': '(0)'}), '(resid ** 2, axis=0)\n', (3810, 3830), True, 'import numpy as np\n'), ((2836, 2860), 'numpy.arange', 'np.arange', (['vars.shape[1]'], {}), '(vars.shape[1])\n', (2845, 2860), True, 'import numpy as np\n'), ((2054, 2100), 'numpy.arange', 'np.arange', (['df.index[0].year', 'df.index[-1].year'], {}), '(df.index[0].year, df.index[-1].year)\n', (2063, 2100), True, 'import numpy as np\n'), ((2494, 2509), 'funcs.read_data.file_settings', 'file_settings', ([], {}), '()\n', (2507, 2509), False, 'from funcs.read_data import variables_prep, file_settings\n'), ((5155, 5170), 'funcs.read_data.file_settings', 'file_settings', ([], {}), '()\n', (5168, 5170), False, 'from funcs.read_data import variables_prep, file_settings\n')]
|
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy as _SQLAlchemy, BaseQuery
from sqlalchemy import inspect, Column, Integer, SmallInteger, orm
from contextlib import contextmanager
from app.libs.error_code import NotFound
class SQLAlchemy(_SQLAlchemy):
@contextmanager
def auto_commit(self):
try:
yield
self.session.commit()
except Exception as e:
db.session.rollback()
raise e
class Query(BaseQuery):
def filter_query(self, **kwargs):
if 'status' not in kwargs.keys():
kwargs['status'] = 1
return super(Query, self).filter_by(**kwargs)
db = SQLAlchemy(query_class = Query)
class Base(db.Model):
__abstract__ = True
def set_attrs(self, attrs_dict):
for key, value in attrs_dict.items():
if hasattr(self, key) and key != 'id':
setattr(self, key, value)
def keys(self):
return self.fields
class MixinJSONSerializer:
@orm.reconstructor
def init_on_load(self):
self._fields = []
# self._include = []
self._exclude = []
self._set_fields()
self.__prune_fields()
def _set_fields(self):
pass
def __prune_fields(self):
columns = inspect(self.__class__).columns
if not self._fields:
all_columns = set(columns.keys())
self._fields = list(all_columns - set(self._exclude))
def hide(self, *args):
for key in args:
self._fields.remove(key)
return self
def keys(self):
return self._fields
def __getitem__(self, key):
return getattr(self, key)
|
[
"sqlalchemy.inspect"
] |
[((1283, 1306), 'sqlalchemy.inspect', 'inspect', (['self.__class__'], {}), '(self.__class__)\n', (1290, 1306), False, 'from sqlalchemy import inspect, Column, Integer, SmallInteger, orm\n')]
|
import os
import discord
import Cogs #type: ignore
import glo #type: ignore
from discord.ext import commands
class PhaseBot(commands.Bot):
""" The bot """
async def on_ready(self):
print("Discodo!") # Great, it's working
await bot.change_presence(activity = discord.Activity(name = f"my startup...", type = discord.ActivityType.watching)) # Simplistic help
ud = glo.JSONREAD("userdata.json")
del ud["default"]
for k in ud:
k = int(k)
u = bot.get_user(k)
if u is None:
name = "Member left"
else:
name = u.name
glo.SETNAME(k, name)
await bot.change_presence(activity = discord.Activity(name = f"le noir | v{glo.VERSION}", type = discord.ActivityType.watching)) # Simplistic help
async def on_message(self, message):
if message.channel.id == 796374619900084255:
os.system("git pull")
os.system("pm2 restart Phase")
if message.author.bot: return # We don't like bots
return await bot.process_commands(message)
bot = PhaseBot(command_prefix = glo.PREFIX, intents = discord.Intents.all()) # Writing the embed
bot.remove_command('help') # Removing default help (I don't like it)
bot.add_cog(Cogs.Admin(bot)) # Many cog
bot.add_cog(Cogs.Bank(bot))
bot.add_cog(Cogs.Counting(bot))
bot.add_cog(Cogs.General(bot))
bot.add_cog(Cogs.Listeners(bot))
bot.add_cog(Cogs.Starboard(bot))
bot.add_cog(Cogs.Tasks(bot))
bot.run(glo.GLOBAL_READ("token"))
|
[
"discord.Activity",
"Cogs.Listeners",
"Cogs.Counting",
"Cogs.Admin",
"Cogs.Tasks",
"glo.GLOBAL_READ",
"os.system",
"Cogs.General",
"Cogs.Bank",
"glo.SETNAME",
"discord.Intents.all",
"glo.JSONREAD",
"Cogs.Starboard"
] |
[((1293, 1308), 'Cogs.Admin', 'Cogs.Admin', (['bot'], {}), '(bot)\n', (1303, 1308), False, 'import Cogs\n'), ((1333, 1347), 'Cogs.Bank', 'Cogs.Bank', (['bot'], {}), '(bot)\n', (1342, 1347), False, 'import Cogs\n'), ((1361, 1379), 'Cogs.Counting', 'Cogs.Counting', (['bot'], {}), '(bot)\n', (1374, 1379), False, 'import Cogs\n'), ((1393, 1410), 'Cogs.General', 'Cogs.General', (['bot'], {}), '(bot)\n', (1405, 1410), False, 'import Cogs\n'), ((1424, 1443), 'Cogs.Listeners', 'Cogs.Listeners', (['bot'], {}), '(bot)\n', (1438, 1443), False, 'import Cogs\n'), ((1457, 1476), 'Cogs.Starboard', 'Cogs.Starboard', (['bot'], {}), '(bot)\n', (1471, 1476), False, 'import Cogs\n'), ((1490, 1505), 'Cogs.Tasks', 'Cogs.Tasks', (['bot'], {}), '(bot)\n', (1500, 1505), False, 'import Cogs\n'), ((1515, 1539), 'glo.GLOBAL_READ', 'glo.GLOBAL_READ', (['"""token"""'], {}), "('token')\n", (1530, 1539), False, 'import glo\n'), ((398, 427), 'glo.JSONREAD', 'glo.JSONREAD', (['"""userdata.json"""'], {}), "('userdata.json')\n", (410, 427), False, 'import glo\n'), ((1169, 1190), 'discord.Intents.all', 'discord.Intents.all', ([], {}), '()\n', (1188, 1190), False, 'import discord\n'), ((655, 675), 'glo.SETNAME', 'glo.SETNAME', (['k', 'name'], {}), '(k, name)\n', (666, 675), False, 'import glo\n'), ((938, 959), 'os.system', 'os.system', (['"""git pull"""'], {}), "('git pull')\n", (947, 959), False, 'import os\n'), ((972, 1002), 'os.system', 'os.system', (['"""pm2 restart Phase"""'], {}), "('pm2 restart Phase')\n", (981, 1002), False, 'import os\n'), ((286, 361), 'discord.Activity', 'discord.Activity', ([], {'name': 'f"""my startup..."""', 'type': 'discord.ActivityType.watching'}), "(name=f'my startup...', type=discord.ActivityType.watching)\n", (302, 361), False, 'import discord\n'), ((721, 812), 'discord.Activity', 'discord.Activity', ([], {'name': 'f"""le noir | v{glo.VERSION}"""', 'type': 'discord.ActivityType.watching'}), "(name=f'le noir | v{glo.VERSION}', type=discord.\n ActivityType.watching)\n", (737, 812), False, 'import discord\n')]
|
import random
import time
print("this game is blackjack. If you are wondering which focus day this is connected to, it isn't connected to any of them. ")
print()
print("I started making it, and I forgot it had to be related to a focus day, but it was too late to switch, so here it is ")
print()
print("how to play: your goal is to get your card total closest to 21, and to beat the dealer. If you get over 21, you lose. stand to give the turn to the dealer, and hit to draw a new card")
#defines lists and values
cardlist=[]
dealerlist=[]
cardlistStr=[]
dealerlistStr=[]
c1=0
c2=0
c3=0
c4=0
a=0
b=0
da=0
db=0
winx=0
losex=0
#defines the list for where I will take card names
carddeck=['A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K']
#Assigns values to the card names
cardvalue={
'A': 11,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9,
'10': 10,
'J': 10,
'Q': 10,
'K': 10
}
#this function crashes python
def crash():
try:
crash()
except:
crash()
#blakcjack funtion
def blackjack():
#define lose, tie, win functions that happen when you lose, win or tie
def lose():
global cardlist
global dealerlist
global cardlistStr
global dealerlistStr
global losex
losex=losex+1
print('you have won '+str(winx) + "times and lost "+str(losex)+" times")
print()
print('you lost :(')
print("The dealer's cards are ")
print(dealerlistStr)
print()
print("your cards are ")
print(cardlistStr)
cardlist=[]
dealerlist=[]
cardlistStr=[]
dealerlistStr=[]
again=input('try again? ')
if again==('yes'):
blackjack()
if again==('no'):
crash()
else:
again=input('yes or no')
if again==('yes'):
blackjack()
if again==('no'):
crash()
def win():
global cardlist
global dealerlist
global cardlistStr
global dealerlistStr
global winx
winx=winx+1
print('you have won '+str(winx) + " times and lost "+str(losex)+" times")
print()
print('you won :)')
print("The dealer's cards are ")
print(dealerlistStr)
print()
print("your cards are ")
print(cardlistStr)
cardlist=[]
dealerlist=[]
cardlistStr=[]
dealerlistStr=[]
again2=input('play again? ')
if again2==('yes'):
blackjack()
if again2==('no'):
crash()
if again2 != ('yes') or again2 != ('no'):
again2=input('yes or no')
if again2==('yes'):
blackjack()
if again2==('no'):
crash()
def tie():
global cardlist
global dealerlist
global cardlistStr
global dealerlistStr
print("The dealer's cards are ")
print(dealerlistStr)
print()
print("your cards are ")
print(cardlistStr)
cardlist=[]
dealerlist=[]
cardlistStr=[]
dealerlistStr=[]
again2=input('you tied, play again? ')
if again2==('yes'):
blackjack()
if again2==('no'):
print('ok')
crash()
if again2 != ('yes') or again2 != ('no'):
again2=input('yes or no')
if again2==('yes'):
blackjack()
if again2==('no'):
print('ok')
crash()
#globals the lists
global cardlist
global dealerlist
global cardlistStr
global dealerlistStr
#defines lists and some random ints
cardlist=[]
dealerlist=[]
cardlistStr=[]
dealerlistStr=[]
c1=(random.randint(0,51))
c2=(random.randint(0,51))
c3=(random.randint(0,51))
c4=(random.randint(0,51))
#this prints what your cards are at the start of the game
print('Your cards are '+str(carddeck[c1])+' and '+str(carddeck[c2]))
print("The dealer's open card is "+str(carddeck[c3]))
#after the dealer finishes their turn, this code checks who wins, loses, or ties
def standcheck():
if sum(dealerlist)<=(21):
if sum(dealerlist)>sum(cardlist):
lose()
if sum(cardlist)>sum(dealerlist):
win()
if sum(dealerlist)==(21):
if sum(dealerlist)==sum(cardlist):
tie()
else:
lose()
if sum(dealerlist)>(21):
for x in range(len(dealerlist)):
if dealerlist[x]==(11):
dealerlist[x]=(1)
if sum(dealerlist)>(21):
win()
#This determines what move the dealer does when it is their turn
def stand():
if sum(dealerlist)>(17):
standcheck()
if sum(dealerlist)==sum(cardlist):
standcheck()
if sum(dealerlist)>sum(cardlist):
lose()
else:
dc1=(random.randint(0,51))
dealerlist.append(cardvalue[carddeck[dc1]])
dealerlistStr.append(carddeck[dc1])
while sum(dealerlist)<=(16):
dc2=(random.randint(0,51))
dealerlist.append(cardvalue[carddeck[dc2]])
dealerlistStr.append(carddeck[dc2])
standcheck()
if sum(dealerlist)>(17):
standcheck()
#Adds all the beginning variables to their resepctive lists
cardlist.append(cardvalue[carddeck[c1]])
cardlist.append(cardvalue[carddeck[c2]])
dealerlist.append(cardvalue[carddeck[c3]])
dealerlist.append(cardvalue[carddeck[c4]])
cardlistStr.append(carddeck[c1])
cardlistStr.append(carddeck[c2])
dealerlistStr.append(carddeck[c3])
dealerlistStr.append(carddeck[c4])
#asks w1=input('Hit or stand? ')
choice1=input('Hit or stand? ')
while choice1!=('hit') and choice1!=('stand'):
choice1=input('Pick either hit or stand ')
if (cardlist)==(21):
win()
if choice1==('hit'):
c5=random.randint(0,51)
cardlist.append(cardvalue[carddeck[c5]])
cardlistStr.append(carddeck[c5])
print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', and '+ str(carddeck[c5]))
if sum(cardlist)>(21):
for x in range(len(cardlist)):
if cardlist[x]==(11):
cardlist[x]=(1)
if sum(cardlist)>(21):
lose()
if sum(cardlist)==21:
print('BLACKJACK')
win()
if sum(cardlist)<(21):
choice1=input('Hit or stand? ')
while choice1!=('hit') and choice1!=('stand'):
choice1=input('Pick either hit or stand ')
if choice1==('stand'):
stand()
if choice1==('hit'):
c6=random.randint(0,51)
cardlist.append(cardvalue[carddeck[c6]])
cardlistStr.append(carddeck[c6])
print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', and '+(carddeck[c6]))
if sum(cardlist)>21:
for x in range(len(cardlist)):
if cardlist[x]==(11):
cardlist[x]=(1)
if sum(cardlist)>(21):
lose()
if sum(cardlist)==21:
print('BLACKJACK')
win()
else:
choice1=input('Hit or stand? ')
while choice1!=('hit') and choice1!=('stand'):
choice1=input('Pick either hit or stand ')
if choice1==('stand'):
stand()
if choice1==('hit'):
c7=(random.randint(0,51))
cardlist.append(cardvalue[carddeck[c7]])
cardlistStr.append(carddeck[c7])
print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', and '+(carddeck[c7]))
if sum(cardlist)>21:
for x in range(len(cardlist)):
if cardlist[x]==(11):
cardlist[x]=(1)
if sum(cardlist)>(21):
lose()
if sum(cardlist)==21:
print('BLACKJACK')
win()
else:
choice1=input('Hit or stand? ')
while choice1!=('hit') and choice1!=('stand'):
choice1=input('Pick either hit or stand ')
if choice1==('stand'):
stand()
if choice1==('hit'):
c8=(random.randint(0,51))
cardlist.append(cardvalue[carddeck[c8]])
cardlistStr.append(carddeck[c8])
print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', and '+(carddeck[c8]))
if sum(cardlist)>21:
for x in range(len(cardlist)):
if cardlist[x]==(11):
cardlist[x]=(1)
if sum(cardlist)>(21):
lose()
if sum(cardlist)==21:
print('BLACKJACK')
win()
else:
choice1=input('Hit or stand? ')
while choice1!=('hit') and choice1!=('stand'):
choice1=input('Pick either hit or stand ')
if choice1==('stand'):
stand()
if choice1==('hit'):
c9=(random.randint(0,51))
cardlist.append(cardvalue[carddeck[c9]])
cardlistStr.append(carddeck[c9])
print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', and '+(carddeck[c9]))
if sum(cardlist)>21:
for x in range(len(cardlist)):
if cardlist[x]==(11):
cardlist[x]=(1)
if sum(cardlist)>(21):
lose()
if sum(cardlist)==21:
print('BLACKJACK')
win()
else:
choice1=input('Hit or stand? ')
while choice1!=('hit') and choice1!=('stand'):
choice1=input('Pick either hit or stand ')
if choice1==('stand'):
stand()
if choice1==('hit'):
c10=(random.randint(0,51))
cardlist.append(cardvalue[carddeck[c10]])
cardlistStr.append(carddeck[c10])
print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck(c10)))
if sum(cardlist)>21:
for x in range(len(cardlist)):
if cardlist[x]==(11):
cardlist[x]=(1)
if sum(cardlist)>(21):
lose()
if sum(cardlist)==21:
print('BLACKJACK')
win()
else:
choice1=input('Hit or stand? ')
while choice1!=('hit') and choice1!=('stand'):
choice1=input('Pick either hit or stand ')
if choice1==('stand'):
stand()
if choice1==('hit'):
c11=(random.randint(0,51))
cardlist.append(cardvalue[carddeck[c11]])
cardlistStr.append(carddeck[c11])
print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck[c10])+" and "+(carddeck[c11]))
if sum(cardlist)>21:
for x in range(len(cardlist)):
if cardlist[x]==(11):
cardlist[x]=(1)
if sum(cardlist)>(21):
lose()
if sum(cardlist)==21:
print('BLACKJACK')
win()
if choice1==('stand'):
stand()
#a
blackjack()
|
[
"random.randint"
] |
[((3701, 3722), 'random.randint', 'random.randint', (['(0)', '(51)'], {}), '(0, 51)\n', (3715, 3722), False, 'import random\n'), ((3729, 3750), 'random.randint', 'random.randint', (['(0)', '(51)'], {}), '(0, 51)\n', (3743, 3750), False, 'import random\n'), ((3757, 3778), 'random.randint', 'random.randint', (['(0)', '(51)'], {}), '(0, 51)\n', (3771, 3778), False, 'import random\n'), ((3785, 3806), 'random.randint', 'random.randint', (['(0)', '(51)'], {}), '(0, 51)\n', (3799, 3806), False, 'import random\n'), ((5773, 5794), 'random.randint', 'random.randint', (['(0)', '(51)'], {}), '(0, 51)\n', (5787, 5794), False, 'import random\n'), ((4800, 4821), 'random.randint', 'random.randint', (['(0)', '(51)'], {}), '(0, 51)\n', (4814, 4821), False, 'import random\n'), ((4962, 4983), 'random.randint', 'random.randint', (['(0)', '(51)'], {}), '(0, 51)\n', (4976, 4983), False, 'import random\n'), ((6500, 6521), 'random.randint', 'random.randint', (['(0)', '(51)'], {}), '(0, 51)\n', (6514, 6521), False, 'import random\n'), ((7319, 7340), 'random.randint', 'random.randint', (['(0)', '(51)'], {}), '(0, 51)\n', (7333, 7340), False, 'import random\n'), ((8294, 8315), 'random.randint', 'random.randint', (['(0)', '(51)'], {}), '(0, 51)\n', (8308, 8315), False, 'import random\n'), ((9473, 9494), 'random.randint', 'random.randint', (['(0)', '(51)'], {}), '(0, 51)\n', (9487, 9494), False, 'import random\n'), ((10829, 10850), 'random.randint', 'random.randint', (['(0)', '(51)'], {}), '(0, 51)\n', (10843, 10850), False, 'import random\n'), ((12365, 12386), 'random.randint', 'random.randint', (['(0)', '(51)'], {}), '(0, 51)\n', (12379, 12386), False, 'import random\n')]
|
import pandas
import os
PATH_TO_DATASETS = './mlpractice/datasets/'
class DataSet(object):
def __init__(self, dir_name, extensions=['.csv'], path_to_datasets=PATH_TO_DATASETS):
data_dir = os.path.join(path_to_datasets, dir_name)
for file_name in os.listdir(data_dir):
name, ext = os.path.splitext(file_name)
if ext in extensions:
data = pandas.read_csv(filepath_or_buffer=os.path.join(data_dir, file_name))
setattr(self, name, data)
def load_iris():
return DataSet(dir_name='iris/')
def load_movieLens():
return DataSet(dir_name='ml-latest-small/')
|
[
"os.path.splitext",
"os.path.join",
"os.listdir"
] |
[((206, 246), 'os.path.join', 'os.path.join', (['path_to_datasets', 'dir_name'], {}), '(path_to_datasets, dir_name)\n', (218, 246), False, 'import os\n'), ((272, 292), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (282, 292), False, 'import os\n'), ((318, 345), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (334, 345), False, 'import os\n'), ((438, 471), 'os.path.join', 'os.path.join', (['data_dir', 'file_name'], {}), '(data_dir, file_name)\n', (450, 471), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'resamplingDialogUi.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_resamplingDialog(object):
def setupUi(self, resamplingDialog):
resamplingDialog.setObjectName("resamplingDialog")
resamplingDialog.resize(406, 540)
self.gridLayout = QtWidgets.QGridLayout(resamplingDialog)
self.gridLayout.setObjectName("gridLayout")
self.scrollArea = QtWidgets.QScrollArea(resamplingDialog)
self.scrollArea.setMinimumSize(QtCore.QSize(0, 0))
self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 386, 489))
self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(0, 0))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout_2.setObjectName("gridLayout_2")
self.groupBoxResample = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
self.groupBoxResample.setObjectName("groupBoxResample")
self.formLayout = QtWidgets.QFormLayout(self.groupBoxResample)
self.formLayout.setObjectName("formLayout")
self.labelCurrentRateHeading = QtWidgets.QLabel(self.groupBoxResample)
self.labelCurrentRateHeading.setObjectName("labelCurrentRateHeading")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelCurrentRateHeading)
self.labelCurrentRateValue = QtWidgets.QLabel(self.groupBoxResample)
self.labelCurrentRateValue.setText("")
self.labelCurrentRateValue.setObjectName("labelCurrentRateValue")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.labelCurrentRateValue)
self.labelNewRateHeading = QtWidgets.QLabel(self.groupBoxResample)
self.labelNewRateHeading.setObjectName("labelNewRateHeading")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelNewRateHeading)
self.doubleSpinBoxNewRate = QtWidgets.QDoubleSpinBox(self.groupBoxResample)
self.doubleSpinBoxNewRate.setMaximum(10000.0)
self.doubleSpinBoxNewRate.setProperty("value", 100.0)
self.doubleSpinBoxNewRate.setObjectName("doubleSpinBoxNewRate")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.doubleSpinBoxNewRate)
self.gridLayout_2.addWidget(self.groupBoxResample, 0, 0, 1, 1)
self.groupBoxBatching = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
self.groupBoxBatching.setObjectName("groupBoxBatching")
self.gridLayoutBatching = QtWidgets.QGridLayout(self.groupBoxBatching)
self.gridLayoutBatching.setObjectName("gridLayoutBatching")
self.batchingWidgetPlaceholder = QtWidgets.QWidget(self.groupBoxBatching)
self.batchingWidgetPlaceholder.setMinimumSize(QtCore.QSize(300, 300))
self.batchingWidgetPlaceholder.setObjectName("batchingWidgetPlaceholder")
self.gridLayoutBatching.addWidget(self.batchingWidgetPlaceholder, 0, 0, 1, 1)
self.gridLayout_2.addWidget(self.groupBoxBatching, 1, 0, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem, 2, 0, 1, 1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.gridLayout.addWidget(self.scrollArea, 0, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.pushButtonCancel = QtWidgets.QPushButton(resamplingDialog)
self.pushButtonCancel.setObjectName("pushButtonCancel")
self.horizontalLayout.addWidget(self.pushButtonCancel)
self.pushButtonBatch = QtWidgets.QPushButton(resamplingDialog)
self.pushButtonBatch.setObjectName("pushButtonBatch")
self.horizontalLayout.addWidget(self.pushButtonBatch)
self.pushButtonApply = QtWidgets.QPushButton(resamplingDialog)
self.pushButtonApply.setObjectName("pushButtonApply")
self.horizontalLayout.addWidget(self.pushButtonApply)
self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1)
self.retranslateUi(resamplingDialog)
self.pushButtonCancel.clicked.connect(resamplingDialog.reject)
self.pushButtonApply.clicked.connect(resamplingDialog.accept)
self.pushButtonBatch.clicked.connect(resamplingDialog.acceptBatch)
QtCore.QMetaObject.connectSlotsByName(resamplingDialog)
def retranslateUi(self, resamplingDialog):
_translate = QtCore.QCoreApplication.translate
resamplingDialog.setWindowTitle(_translate("resamplingDialog", "Meggie - Resampling"))
self.groupBoxResample.setTitle(_translate("resamplingDialog", "Resampling options:"))
self.labelCurrentRateHeading.setText(_translate("resamplingDialog", "Current rate:"))
self.labelNewRateHeading.setText(_translate("resamplingDialog", "Resample to:"))
self.groupBoxBatching.setTitle(_translate("resamplingDialog", "Batching"))
self.pushButtonCancel.setText(_translate("resamplingDialog", "Cancel"))
self.pushButtonBatch.setText(_translate("resamplingDialog", "Batch"))
self.pushButtonApply.setText(_translate("resamplingDialog", "Apply"))
|
[
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtCore.QSize",
"PyQt5.QtWidgets.QGroupBox",
"PyQt5.QtWidgets.QSpacerItem",
"PyQt5.QtWidgets.QScrollArea",
"PyQt5.QtWidgets.QDoubleSpinBox",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QFormLayout"
] |
[((453, 492), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['resamplingDialog'], {}), '(resamplingDialog)\n', (474, 492), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((571, 610), 'PyQt5.QtWidgets.QScrollArea', 'QtWidgets.QScrollArea', (['resamplingDialog'], {}), '(resamplingDialog)\n', (592, 610), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1065, 1084), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (1082, 1084), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1346, 1398), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.scrollAreaWidgetContents'], {}), '(self.scrollAreaWidgetContents)\n', (1367, 1398), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1487, 1537), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['self.scrollAreaWidgetContents'], {}), '(self.scrollAreaWidgetContents)\n', (1506, 1537), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1628, 1672), 'PyQt5.QtWidgets.QFormLayout', 'QtWidgets.QFormLayout', (['self.groupBoxResample'], {}), '(self.groupBoxResample)\n', (1649, 1672), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1764, 1803), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBoxResample'], {}), '(self.groupBoxResample)\n', (1780, 1803), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2019, 2058), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBoxResample'], {}), '(self.groupBoxResample)\n', (2035, 2058), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2313, 2352), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBoxResample'], {}), '(self.groupBoxResample)\n', (2329, 2352), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2555, 2602), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QtWidgets.QDoubleSpinBox', (['self.groupBoxResample'], {}), '(self.groupBoxResample)\n', (2579, 2602), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2991, 3041), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['self.scrollAreaWidgetContents'], {}), '(self.scrollAreaWidgetContents)\n', (3010, 3041), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3140, 3184), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.groupBoxBatching'], {}), '(self.groupBoxBatching)\n', (3161, 3184), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3294, 3334), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['self.groupBoxBatching'], {}), '(self.groupBoxBatching)\n', (3311, 3334), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3673, 3771), 'PyQt5.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(20)', '(40)', 'QtWidgets.QSizePolicy.Minimum', 'QtWidgets.QSizePolicy.Expanding'], {}), '(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.\n QSizePolicy.Expanding)\n', (3694, 3771), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3985, 4008), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (4006, 4008), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4095, 4193), 'PyQt5.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(40)', '(20)', 'QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Minimum'], {}), '(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Minimum)\n', (4116, 4193), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4272, 4311), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['resamplingDialog'], {}), '(resamplingDialog)\n', (4293, 4311), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4470, 4509), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['resamplingDialog'], {}), '(resamplingDialog)\n', (4491, 4509), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4665, 4704), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['resamplingDialog'], {}), '(resamplingDialog)\n', (4686, 4704), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5168, 5223), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['resamplingDialog'], {}), '(resamplingDialog)\n', (5205, 5223), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((650, 668), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(0)', '(0)'], {}), '(0, 0)\n', (662, 668), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1135, 1163), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(386)', '(489)'], {}), '(0, 0, 386, 489)\n', (1147, 1163), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1218, 1236), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(0)', '(0)'], {}), '(0, 0)\n', (1230, 1236), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3389, 3411), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(300)', '(300)'], {}), '(300, 300)\n', (3401, 3411), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')]
|
from zah.router.app import Router
# from zah.store import Store
from zah.urls import render, render_page
from zah.core.servers import BaseServer, DevelopmentServer
from zah.shortcuts import get_default_server
app = BaseServer()
# app = get_default_server()
# app.use_component(Router)
# app.use_component(Store)
# def view1(request, **kwargs):
# return render(request, 'home.html')
# @app.as_route('/test2', 'test2')
# def view2(request, **kwargs):
# return render(request, 'home.html')
# app.add_route('/test', view1, 'test1')
# app.add_route('/test3', render_page('home.html'))
|
[
"zah.core.servers.BaseServer"
] |
[((216, 228), 'zah.core.servers.BaseServer', 'BaseServer', ([], {}), '()\n', (226, 228), False, 'from zah.core.servers import BaseServer, DevelopmentServer\n')]
|
import argparse
import os.path as osp
import mmcv
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('img_dir', help='img config directory')
parser.add_argument('gt_dir', help='gt config directory')
parser.add_argument('out_dir', help='output config directory')
args = parser.parse_args()
return args
# def main():
# args = parse_args()
# img_suffix = '_leftImg8bit.png'
# seg_map_suffix = '_gtFine_color.png'
# mmcv.mkdir_or_exist(args.out_dir)
# for img_file in mmcv.scandir(args.img_dir, suffix=img_suffix):
# seg_file = img_file.replace(img_suffix, seg_map_suffix)
# img = mmcv.imread(osp.join(args.img_dir, img_file))
# seg = mmcv.imread(osp.join(args.gt_dir, seg_file))
# binded = img * 0.5 + seg * 0.5
# mmcv.imwrite(binded, osp.join(args.out_dir, img_file))
def main():
args = parse_args()
img_suffix = '.jpg'
seg_map_suffix = '.png'
mmcv.mkdir_or_exist(args.out_dir)
for img_file in mmcv.scandir(
args.img_dir, suffix=img_suffix, recursive=True):
seg_file = img_file.replace(img_suffix, seg_map_suffix)
if not osp.exists(osp.join(args.gt_dir, seg_file)):
continue
img = mmcv.imread(osp.join(args.img_dir, img_file))
seg = mmcv.imread(osp.join(args.gt_dir, seg_file))
binded = img * 0.5 + seg * 0.5
mmcv.imwrite(binded, osp.join(args.out_dir, img_file))
if __name__ == '__main__':
main()
|
[
"mmcv.mkdir_or_exist",
"os.path.join",
"argparse.ArgumentParser",
"mmcv.scandir"
] |
[((84, 159), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process a checkpoint to be published"""'}), "(description='Process a checkpoint to be published')\n", (107, 159), False, 'import argparse\n'), ((1030, 1063), 'mmcv.mkdir_or_exist', 'mmcv.mkdir_or_exist', (['args.out_dir'], {}), '(args.out_dir)\n', (1049, 1063), False, 'import mmcv\n'), ((1084, 1145), 'mmcv.scandir', 'mmcv.scandir', (['args.img_dir'], {'suffix': 'img_suffix', 'recursive': '(True)'}), '(args.img_dir, suffix=img_suffix, recursive=True)\n', (1096, 1145), False, 'import mmcv\n'), ((1331, 1363), 'os.path.join', 'osp.join', (['args.img_dir', 'img_file'], {}), '(args.img_dir, img_file)\n', (1339, 1363), True, 'import os.path as osp\n'), ((1391, 1422), 'os.path.join', 'osp.join', (['args.gt_dir', 'seg_file'], {}), '(args.gt_dir, seg_file)\n', (1399, 1422), True, 'import os.path as osp\n'), ((1492, 1524), 'os.path.join', 'osp.join', (['args.out_dir', 'img_file'], {}), '(args.out_dir, img_file)\n', (1500, 1524), True, 'import os.path as osp\n'), ((1250, 1281), 'os.path.join', 'osp.join', (['args.gt_dir', 'seg_file'], {}), '(args.gt_dir, seg_file)\n', (1258, 1281), True, 'import os.path as osp\n')]
|
import torch.nn as nn
import logging
logger = logging.getLogger(__name__)
def conv3x3(in_channels, out_channels,activation=nn.ReLU(inplace=True)):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.MaxPool2d(2),
nn.BatchNorm2d(out_channels, momentum=1., affine=True,
track_running_stats=False # When this is true is called the "transductive setting"
), activation
)
class FullyConnectedLayer(nn.Module):
def __init__(self, num_layer=2):
super(FullyConnectedLayer, self).__init__()
'''
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(in_shape, out_features))
self.hidden_size = self.hidden_size
'''
self.fc_net = nn.Sequential(nn.Linear(1,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64))
for j in range(num_layer-1):
self.fc_net = nn.Sequential(self.fc_net, nn.Linear(64,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64)
)
def forward(self, inputs, params=None):
#features = inputs.view((inputs.size(0), -1))
logits = self.fc_net(inputs)
return logits
def trainable_parameters(self):
"""
Returns an iterator over the trainable parameters of the model.
"""
for param in self.parameters():
if param.requires_grad:
yield param
class TaskFullyConnectedLayer(nn.Module):
def __init__(self,num_layer=1, task_conv=0):
super(TaskFullyConnectedLayer, self).__init__()
'''
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(in_shape, out_features))
self.hidden_size = self.hidden_size
'''
if num_layer>1:
self.classifier = nn.Linear(64,1)
self.classifier = nn.Sequential(nn.Linear(64,1))
def forward(self, inputs, params=None):
#features = inputs.view((inputs.size(0), -1))
logits = self.classifier(inputs)
return logits
def trainable_parameters(self):
"""
Returns an iterator over the trainable parameters of the model.
"""
for param in self.parameters():
if param.requires_grad:
yield param
class TaskLinearLayer(nn.Module):
def __init__(self, in_shape, out_features,hidden_size=32,task_conv=0,dfc=True):
super(TaskLinearLayer, self).__init__()
self.in_shape = in_shape
self.out_features = out_features
if task_conv ==0 and not dfc:
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(in_shape, out_features))
elif dfc:
self.classifier = nn.Sequential(conv3x3(hidden_size, hidden_size,activation=nn.Softplus()), nn.Flatten(), nn.Linear(128, out_features))
else:
self.classifier = conv3x3(hidden_size, hidden_size)
for j in range(task_conv-1):
self.classifier = nn.Sequential(self.classifier, conv3x3(hidden_size, hidden_size,activation=nn.Softplus()))
self.classifier = nn.Sequential(self.classifier,
nn.Flatten(), nn.Linear(in_shape, out_features))
def forward(self, inputs, params=None):
#features = inputs.view((inputs.size(0), -1))
logits = self.classifier(inputs)
return logits
def trainable_parameters(self):
"""
Returns an iterator over the trainable parameters of the model.
"""
for param in self.parameters():
if param.requires_grad:
yield param
class ConvolutionalNeuralNetwork(nn.Module):
def __init__(self, in_channels, out_features, hidden_size=32,device=None,task_conv=0):
super(ConvolutionalNeuralNetwork, self).__init__()
self.in_channels = in_channels
self.out_features = out_features
self.hidden_size = hidden_size
assert task_conv >= 0, "Wrong call for task nets!"
self.features = conv3x3(in_channels, hidden_size)
for i in range(3-task_conv):
self.features = nn.Sequential(self.features, conv3x3(hidden_size, hidden_size))
def forward(self, inputs, params=None):
features = self.features(inputs)
return features
def trainable_parameters(self):
"""
Returns an iterator over the trainable parameters of the model.
"""
for param in self.parameters():
if param.requires_grad:
yield param
|
[
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Softplus",
"torch.nn.LayerNorm",
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"logging.getLogger",
"torch.nn.Flatten"
] |
[((48, 75), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (65, 75), False, 'import logging\n'), ((127, 148), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (134, 148), True, 'import torch.nn as nn\n'), ((189, 239), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(3)'], {'padding': '(1)'}), '(in_channels, out_channels, 3, padding=1)\n', (198, 239), True, 'import torch.nn as nn\n'), ((241, 256), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (253, 256), True, 'import torch.nn as nn\n'), ((270, 357), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {'momentum': '(1.0)', 'affine': '(True)', 'track_running_stats': '(False)'}), '(out_channels, momentum=1.0, affine=True, track_running_stats\n =False)\n', (284, 357), True, 'import torch.nn as nn\n'), ((899, 915), 'torch.nn.Linear', 'nn.Linear', (['(1)', '(64)'], {}), '(1, 64)\n', (908, 915), True, 'import torch.nn as nn\n'), ((915, 936), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (922, 936), True, 'import torch.nn as nn\n'), ((937, 970), 'torch.nn.LayerNorm', 'nn.LayerNorm', ([], {'normalized_shape': '(64)'}), '(normalized_shape=64)\n', (949, 970), True, 'import torch.nn as nn\n'), ((2076, 2092), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(1)'], {}), '(64, 1)\n', (2085, 2092), True, 'import torch.nn as nn\n'), ((2133, 2149), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(1)'], {}), '(64, 1)\n', (2142, 2149), True, 'import torch.nn as nn\n'), ((1062, 1079), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(64)'], {}), '(64, 64)\n', (1071, 1079), True, 'import torch.nn as nn\n'), ((1079, 1100), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1086, 1100), True, 'import torch.nn as nn\n'), ((1101, 1134), 'torch.nn.LayerNorm', 'nn.LayerNorm', ([], {'normalized_shape': '(64)'}), '(normalized_shape=64)\n', (1113, 1134), True, 'import torch.nn as nn\n'), ((2893, 2905), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (2903, 2905), True, 'import torch.nn as nn\n'), ((2923, 2956), 'torch.nn.Linear', 'nn.Linear', (['in_shape', 'out_features'], {}), '(in_shape, out_features)\n', (2932, 2956), True, 'import torch.nn as nn\n'), ((3081, 3093), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (3091, 3093), True, 'import torch.nn as nn\n'), ((3095, 3123), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'out_features'], {}), '(128, out_features)\n', (3104, 3123), True, 'import torch.nn as nn\n'), ((3474, 3486), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (3484, 3486), True, 'import torch.nn as nn\n'), ((3488, 3521), 'torch.nn.Linear', 'nn.Linear', (['in_shape', 'out_features'], {}), '(in_shape, out_features)\n', (3497, 3521), True, 'import torch.nn as nn\n'), ((3065, 3078), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (3076, 3078), True, 'import torch.nn as nn\n'), ((3353, 3366), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (3364, 3366), True, 'import torch.nn as nn\n')]
|
# Source: https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_Tune_XLSR_Wav2Vec2_on_Turkish_ASR_with_%F0%9F%A4%97_Transformers.ipynb#scrollTo=lbQf5GuZyQ4_
import collections
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
import transformers
from audioengine.metrics.wer import Jiwer
from datasets import load_metric
from torch import nn
from torch.cuda.amp import autocast
from tqdm import tqdm
from transformers import (
Trainer,
Wav2Vec2Processor,
)
from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler
@dataclass
class DataCollatorCTCWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
processor (:class:`~transformers.Wav2Vec2Processor`)
The processor used for proccessing the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
max_length_labels (:obj:`int`, `optional`):
Maximum length of the ``labels`` returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
processor: Wav2Vec2Processor
padding: Union[bool, str] = True
max_length: Optional[int] = None
max_length_labels: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
pad_to_multiple_of_labels: Optional[int] = None
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
input_features = [{"input_values": feature["input_values"]} for feature in features]
label_features = [{"input_ids": feature["labels"]} for feature in features]
batch = self.processor.pad(
input_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
with self.processor.as_target_processor():
labels_batch = self.processor.pad(
label_features,
padding=self.padding,
max_length=self.max_length_labels,
pad_to_multiple_of=self.pad_to_multiple_of_labels,
return_tensors="pt",
)
# replace padding with -100 to ignore loss correctly
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
batch["labels"] = labels
return batch
class CTCTrainer(Trainer):
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
loss = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
loss = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
# elif self.use_apex:
# with amp.scale_loss(loss, self.optimizer) as scaled_loss:
# scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
# add less aggressive smoothing to progress bar for better estimate
class CustomProgressBarCallback(transformers.trainer_callback.ProgressCallback):
def on_train_begin(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar = tqdm(total=state.max_steps, smoothing=0.1)
self.current_step = 0
# solution from https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586/6
class GroupedLengthsTrainer(CTCTrainer):
# length_field_name should possibly be part of TrainingArguments instead
def __init__(self, train_seq_lengths: List[int], *args, **kwargs):
super().__init__(*args, **kwargs)
self.train_seq_lengths = train_seq_lengths
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Build the sampler.
if self.args.group_by_length:
# lengths = self.train_dataset[self.length_field_name] if self.length_field_name is not None else None
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, lengths=self.train_seq_lengths,
model_input_name=model_input_name
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
lengths=self.train_seq_lengths,
model_input_name=model_input_name,
)
else:
return super()._get_train_sampler()
wer_metric = load_metric("wer")
def compute_metrics(processor):
def __call__(pred):
pred_logits = pred.predictions
pred_ids = np.argmax(pred_logits, axis=-1)
pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id
pred_str = processor.batch_decode(pred_ids)
# we do not want to group tokens when computing the metrics
label_str = processor.batch_decode(pred.label_ids, group_tokens=False)
wer = wer_metric.compute(predictions=pred_str, references=label_str)
return {"wer": wer}
return __call__
|
[
"torch.cuda.amp.autocast",
"tqdm.tqdm",
"transformers.trainer_pt_utils.DistributedLengthGroupedSampler",
"numpy.argmax",
"datasets.load_metric",
"transformers.trainer_pt_utils.LengthGroupedSampler"
] |
[((7687, 7705), 'datasets.load_metric', 'load_metric', (['"""wer"""'], {}), "('wer')\n", (7698, 7705), False, 'from datasets import load_metric\n'), ((7822, 7853), 'numpy.argmax', 'np.argmax', (['pred_logits'], {'axis': '(-1)'}), '(pred_logits, axis=-1)\n', (7831, 7853), True, 'import numpy as np\n'), ((5953, 5995), 'tqdm.tqdm', 'tqdm', ([], {'total': 'state.max_steps', 'smoothing': '(0.1)'}), '(total=state.max_steps, smoothing=0.1)\n', (5957, 5995), False, 'from tqdm import tqdm\n'), ((4658, 4668), 'torch.cuda.amp.autocast', 'autocast', ([], {}), '()\n', (4666, 4668), False, 'from torch.cuda.amp import autocast\n'), ((7023, 7162), 'transformers.trainer_pt_utils.LengthGroupedSampler', 'LengthGroupedSampler', (['self.train_dataset', 'self.args.train_batch_size'], {'lengths': 'self.train_seq_lengths', 'model_input_name': 'model_input_name'}), '(self.train_dataset, self.args.train_batch_size,\n lengths=self.train_seq_lengths, model_input_name=model_input_name)\n', (7043, 7162), False, 'from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler\n'), ((7258, 7484), 'transformers.trainer_pt_utils.DistributedLengthGroupedSampler', 'DistributedLengthGroupedSampler', (['self.train_dataset', 'self.args.train_batch_size'], {'num_replicas': 'self.args.world_size', 'rank': 'self.args.process_index', 'lengths': 'self.train_seq_lengths', 'model_input_name': 'model_input_name'}), '(self.train_dataset, self.args.\n train_batch_size, num_replicas=self.args.world_size, rank=self.args.\n process_index, lengths=self.train_seq_lengths, model_input_name=\n model_input_name)\n', (7289, 7484), False, 'from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler\n')]
|
import numpy, sys, math, batman
import matplotlib.pyplot as plt
from scipy import interpolate
file = numpy.load('GJ436b_Trans_SED.npz')
SEDarray = file['SEDarray']
print(SEDarray.shape)
plt.imshow(SEDarray)
plt.show()
stellarwave, stellarspec = numpy.loadtxt('ODFNEW_GJ436.spec', unpack=True, skiprows=800)
stellarwave /= 10000. # to um
relevant = numpy.where((stellarwave>1.5) & (stellarwave<5.5))
stellarwave = stellarwave[relevant]
stellarspec = stellarspec[relevant]
StellarInterp = interpolate.interp1d(stellarwave, stellarspec, kind='cubic')
planetwave, planetspec = numpy.loadtxt('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True)
PlanetInterp = interpolate.interp1d(planetwave, planetspec, kind='cubic')
time = numpy.linspace(0.0,0.1,5000)
f = open('../BATMAN_Generation/Used/BatmanParams_PyNRC_GRISMR.txt', 'r')
params = batman.TransitParams
params.t0 = float(f.readline().split('=')[1]) # hardcoded readlines b/c the file I'm using has a fixed format
params.per = float(f.readline().split('=')[1])
params.inc = float(f.readline().split('=')[1])
params.rp = float(f.readline().split('=')[1])
params.a = float(f.readline().split('=')[1])
params.w = float(f.readline().split('=')[1])
params.ecc = float(f.readline().split('=')[1])
params.fp = float(f.readline().split('=')[1])
params.t_secondary = float(f.readline().split('=')[1])
limbdark = f.readline().split('=')[1] # ugh
u1 = float(limbdark.split(',')[0][2:])
u2 = float(limbdark.split(',')[1][1:-2])
params.u = [u1, u2]
params.limb_dark = "quadratic"
transitmodel = batman.TransitModel(params, time) # creates a transit model object using the time array; we can change the depth now by changing what's in params
SEDarray = numpy.zeros(time.shape[0]) # initialize so that we can vstack onto this
wave = numpy.linspace(1.75,5.25,3500)
for waveval in wave:
params.rp = math.sqrt(PlanetInterp(waveval)) # sqrt b/c trans. spec is in depth, but batman wants rp/rs
fluxtransit = transitmodel.light_curve(params)
actualflux = fluxtransit * StellarInterp(waveval)
SEDarray = numpy.vstack((SEDarray, actualflux))
SEDarray = numpy.delete(SEDarray, 0, 0) # trim that initial row with all zeroes
numpy.savez('GJ436b_Trans_SED', SEDarray=SEDarray, time=time, wave=wave)
plt.imshow(SEDarray)
plt.show()
|
[
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"numpy.where",
"numpy.loadtxt",
"numpy.linspace",
"scipy.interpolate.interp1d",
"numpy.savez",
"batman.TransitModel",
"numpy.delete",
"numpy.vstack"
] |
[((102, 136), 'numpy.load', 'numpy.load', (['"""GJ436b_Trans_SED.npz"""'], {}), "('GJ436b_Trans_SED.npz')\n", (112, 136), False, 'import numpy, sys, math, batman\n'), ((187, 207), 'matplotlib.pyplot.imshow', 'plt.imshow', (['SEDarray'], {}), '(SEDarray)\n', (197, 207), True, 'import matplotlib.pyplot as plt\n'), ((208, 218), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (216, 218), True, 'import matplotlib.pyplot as plt\n'), ((247, 308), 'numpy.loadtxt', 'numpy.loadtxt', (['"""ODFNEW_GJ436.spec"""'], {'unpack': '(True)', 'skiprows': '(800)'}), "('ODFNEW_GJ436.spec', unpack=True, skiprows=800)\n", (260, 308), False, 'import numpy, sys, math, batman\n'), ((350, 404), 'numpy.where', 'numpy.where', (['((stellarwave > 1.5) & (stellarwave < 5.5))'], {}), '((stellarwave > 1.5) & (stellarwave < 5.5))\n', (361, 404), False, 'import numpy, sys, math, batman\n'), ((489, 549), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['stellarwave', 'stellarspec'], {'kind': '"""cubic"""'}), "(stellarwave, stellarspec, kind='cubic')\n", (509, 549), False, 'from scipy import interpolate\n'), ((576, 661), 'numpy.loadtxt', 'numpy.loadtxt', (['"""../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt"""'], {'unpack': '(True)'}), "('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True\n )\n", (589, 661), False, 'import numpy, sys, math, batman\n'), ((672, 730), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['planetwave', 'planetspec'], {'kind': '"""cubic"""'}), "(planetwave, planetspec, kind='cubic')\n", (692, 730), False, 'from scipy import interpolate\n'), ((739, 769), 'numpy.linspace', 'numpy.linspace', (['(0.0)', '(0.1)', '(5000)'], {}), '(0.0, 0.1, 5000)\n', (753, 769), False, 'import numpy, sys, math, batman\n'), ((1553, 1586), 'batman.TransitModel', 'batman.TransitModel', (['params', 'time'], {}), '(params, time)\n', (1572, 1586), False, 'import numpy, sys, math, batman\n'), ((1711, 1737), 'numpy.zeros', 'numpy.zeros', (['time.shape[0]'], {}), '(time.shape[0])\n', (1722, 1737), False, 'import numpy, sys, math, batman\n'), ((1791, 1823), 'numpy.linspace', 'numpy.linspace', (['(1.75)', '(5.25)', '(3500)'], {}), '(1.75, 5.25, 3500)\n', (1805, 1823), False, 'import numpy, sys, math, batman\n'), ((2108, 2136), 'numpy.delete', 'numpy.delete', (['SEDarray', '(0)', '(0)'], {}), '(SEDarray, 0, 0)\n', (2120, 2136), False, 'import numpy, sys, math, batman\n'), ((2178, 2250), 'numpy.savez', 'numpy.savez', (['"""GJ436b_Trans_SED"""'], {'SEDarray': 'SEDarray', 'time': 'time', 'wave': 'wave'}), "('GJ436b_Trans_SED', SEDarray=SEDarray, time=time, wave=wave)\n", (2189, 2250), False, 'import numpy, sys, math, batman\n'), ((2252, 2272), 'matplotlib.pyplot.imshow', 'plt.imshow', (['SEDarray'], {}), '(SEDarray)\n', (2262, 2272), True, 'import matplotlib.pyplot as plt\n'), ((2273, 2283), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2281, 2283), True, 'import matplotlib.pyplot as plt\n'), ((2059, 2095), 'numpy.vstack', 'numpy.vstack', (['(SEDarray, actualflux)'], {}), '((SEDarray, actualflux))\n', (2071, 2095), False, 'import numpy, sys, math, batman\n')]
|
from Cython.Build import cythonize
from setuptools.extension import Extension
from setuptools import setup, find_packages
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
include_dirs = [dir_path + "/src", dir_path]
macros = [("CYTHON_TRACE", "1")]
extensions = [Extension("quicksect", ["src/quicksect.pyx"],
define_macros=macros,
include_dirs=include_dirs)]
setup(version='0.2.2',
name='quicksect',
description="fast, simple interval intersection",
ext_modules = cythonize(extensions, language_level=3),
long_description=open('README.rst').read(),
author="<NAME>,<NAME>",
author_email="<EMAIL>, <EMAIL>",
packages=find_packages(),
setup_requires=['cython'],
install_requires=['cython'],
test_suite='nose.collector',
license = 'The MIT License',
tests_require='nose',
package_data={'': ['*.pyx', '*.pxd']},
include_dirs=["."],
)
|
[
"Cython.Build.cythonize",
"os.path.realpath",
"setuptools.extension.Extension",
"setuptools.find_packages"
] |
[((159, 185), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (175, 185), False, 'import os\n'), ((279, 377), 'setuptools.extension.Extension', 'Extension', (['"""quicksect"""', "['src/quicksect.pyx']"], {'define_macros': 'macros', 'include_dirs': 'include_dirs'}), "('quicksect', ['src/quicksect.pyx'], define_macros=macros,\n include_dirs=include_dirs)\n", (288, 377), False, 'from setuptools.extension import Extension\n'), ((544, 583), 'Cython.Build.cythonize', 'cythonize', (['extensions'], {'language_level': '(3)'}), '(extensions, language_level=3)\n', (553, 583), False, 'from Cython.Build import cythonize\n'), ((719, 734), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (732, 734), False, 'from setuptools import setup, find_packages\n')]
|
"""A simple server with a REST API for the Notes App frontend."""
import tornado.escape
import tornado.ioloop
import tornado.web
import tornado.escape
from tornado_cors import CorsMixin
import logging
import json
import os
import signal
import sys
PORT = 3456
DB_PATH = "db.json"
TEST_DB_PATH = "test/test_db.json"
db = {
'version': {
'version': '0.0.1',
'api_version': '0.1',
'is_test_db': True
},
'notes': [
{
'title': 'some note title',
'text': 'some note text'
},
{
'title': 'other note title',
'text': 'other note text'
}
]
}
def tokenize(s):
"""Split string into tokens."""
return [p.lower() for p in s.split(" ") if p]
class NoteAlreadyExists(Exception):
"""Raised if trying to add a new note with title that is already taken."""
def __init__(self, title):
"""Show exception with the note title."""
super(NoteAlreadyExists, self).__init__(title)
class NoSuchNoteExists(Exception):
"""Raised if trying to delete a note that doesn't exist."""
def __init__(self, title):
"""Show exception with the note title."""
super(NoSuchNoteExists, self).__init__(title)
def add_note(note):
"""Add note to notes."""
if find_note(note["title"]):
raise NoteAlreadyExists(note["title"])
db['notes'].append(note)
def delete_note(title):
"""Delete note from notes."""
found = find_note(title)
if not found:
raise NoSuchNoteExists(title)
del db['notes'][found[0]]
def update_note(title, note):
"""Update an existing note with a given title, possibly retitling it."""
found = find_note(title)
if not found:
raise NoSuchNoteExists(title)
note["timestamp"]["created"] = found[1]["timestamp"]["created"]
db['notes'][found[0]] = note
def find_note(title):
"""Return (index, note) of note that has title or False if no such note."""
for i, note in enumerate(db['notes']):
if note["title"] == title:
return i, note
return False
def search_notes(query):
"""Search notes by query."""
def match_token(note, tokens):
"""Test if note contains any of the tokens.
A very simple implementation still. Return False if any of the tokens
is missing, True if any match.
"""
tokens_found = []
for token in tokens:
s = note["title"] + " " + note["text"]
if token not in s.lower():
return False
tokens_found.append(token)
return len(tokens_found) == len(tokens)
notes = []
query_tokens = tokenize(query)
for note in db['notes']:
if match_token(note, query_tokens):
notes.append(note)
return notes
class CorsBaseHandler(CorsMixin, tornado.web.RequestHandler):
"""Set up CORS and allow separate origin for the client."""
CORS_ORIGIN = 'http://localhost:8080'
CORS_METHODS = 'GET, PUT, DELETE'
CORS_HEADERS = (
'Access-Control-Allow-Headers, '
'Origin, '
'Accept, '
'X-Requested-With, '
'Content-Type, '
'Access-Control-Request-Method, '
'Access-Control-Request-Headers'
)
class VersionRootHandler(CorsBaseHandler):
"""Handle /version ."""
def get(self):
"""Handle get and return verision and api_version."""
response = {
'version': '0.0.1',
'api_version': '0.1',
'is_test_db': True
}
self.write(response)
class NotesRootHandler(CorsBaseHandler):
"""Handle /notes ."""
def get(self):
"""Handle get and return all notes from database."""
response = {
'notes': db['notes']
}
self.write(response)
def put(self, *args, **kwargs):
"""Handle put and create / update give note."""
note = json.loads(self.request.body.decode('utf-8'))
title_update = note["title"]
if isinstance(title_update, dict):
find_title = title_update["old"]
new_title = title_update["new"]
else:
find_title = title_update
new_title = title_update
_note = {
'title': new_title,
'text': note["text"],
'timestamp': note["timestamp"]
}
found = find_note(find_title)
if not found:
add_note(_note)
self.clear()
self.set_status(200)
self.finish("Note '{}' added.".format(find_title))
else:
update_note(find_title, _note)
self.clear()
self.set_status(204)
self.finish("Note '{}' updated.".format(new_title))
class NoteHandler(CorsBaseHandler):
"""Handle /note/(.*) .
/note/:title
GET
DELETE
"""
def get(self, title):
"""Handle get and return note with given title from database."""
found = find_note(title)
if not found:
self.clear()
self.set_status(404)
self.finish("Note '{}'' not found!".format(title))
return
response = found[1]
self.write(response)
def delete(self, title):
"""Handle delete and delete note with given title from database."""
try:
delete_note(title)
except NoSuchNoteExists:
self.clear()
self.set_status(404)
self.finish("Note '{}' does not even exist.".format(title))
class NotesTitlesHandler(CorsBaseHandler):
"""Handle /notes/titles ."""
def get(self):
"""Handle get and return all note titles from database."""
response = {
'note_titles': [note["title"] for note in db['notes']]
}
self.write(response)
class NotesSearchHandler(CorsBaseHandler):
"""Handle /search?q=(.*) ."""
def get(self):
"""Handle get and return all notes matching search query."""
response = {
'notes': []
}
if self.get_argument('q') == "":
response = {
'notes': db['notes']
}
else:
response = {
'notes': search_notes(self.get_argument('q'))
}
self.write(response)
class TestBeginHandler(CorsBaseHandler):
"""Handle /test/begin ."""
def get(self):
"""Setup test to have expected state."""
read_db()
class TestEndHandler(CorsBaseHandler):
"""Handle /test/begin ."""
def get(self):
"""Setup test to have end with expected state afterwards."""
read_db()
def is_using_test_db():
"""Check if started with use test db flag."""
return "--use-test-db" in sys.argv
routes = [
(r"/version", VersionRootHandler),
(r"/notes", NotesRootHandler),
(r"/notes/titles", NotesTitlesHandler),
(r"/note/(.*)", NoteHandler),
(r"/search", NotesSearchHandler),
]
test_routes = [
(r"/test/begin", TestBeginHandler),
(r"/test/end", TestEndHandler)
]
if is_using_test_db():
routes.extend(test_routes)
application = tornado.web.Application(routes)
def read_db():
"""'Read in' database for use."""
global db
db_path = DB_PATH
if is_using_test_db():
db_path = TEST_DB_PATH
logging.info("server path:", os.path.abspath(__file__))
logging.info("server: db_path:", db_path)
with open(db_path) as f:
db = json.load(f)
is_closing = False
def signal_handler(signum, frame):
"""Signal handler for closing tornado."""
global is_closing
logging.info('exiting...')
is_closing = True
def try_exit():
"""Try closing tornado."""
global is_closing
if is_closing:
# clean up here
tornado.ioloop.IOLoop.instance().stop()
logging.info('exit success')
def start():
"""Start tornado."""
logging.info("Starting server...")
read_db()
signal.signal(signal.SIGINT, signal_handler)
application.listen(PORT)
tornado.ioloop.PeriodicCallback(try_exit, 500).start()
tornado.ioloop.IOLoop.instance().start()
logging.info("Server stopped.")
if __name__ == "__main__":
start()
|
[
"logging.info",
"signal.signal",
"json.load",
"os.path.abspath"
] |
[((7399, 7440), 'logging.info', 'logging.info', (['"""server: db_path:"""', 'db_path'], {}), "('server: db_path:', db_path)\n", (7411, 7440), False, 'import logging\n'), ((7627, 7653), 'logging.info', 'logging.info', (['"""exiting..."""'], {}), "('exiting...')\n", (7639, 7653), False, 'import logging\n'), ((7919, 7953), 'logging.info', 'logging.info', (['"""Starting server..."""'], {}), "('Starting server...')\n", (7931, 7953), False, 'import logging\n'), ((7972, 8016), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal_handler'], {}), '(signal.SIGINT, signal_handler)\n', (7985, 8016), False, 'import signal\n'), ((8154, 8185), 'logging.info', 'logging.info', (['"""Server stopped."""'], {}), "('Server stopped.')\n", (8166, 8185), False, 'import logging\n'), ((7368, 7393), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (7383, 7393), False, 'import os\n'), ((7484, 7496), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7493, 7496), False, 'import json\n'), ((7846, 7874), 'logging.info', 'logging.info', (['"""exit success"""'], {}), "('exit success')\n", (7858, 7874), False, 'import logging\n')]
|
from sofi.ui import Inserted
def test_basic():
assert(str(Inserted()) == "<ins></ins>")
def test_text():
assert(str(Inserted("text")) == "<ins>text</ins>")
def test_custom_class_ident_style_and_attrs():
assert(str(Inserted("text", cl='abclass', ident='123', style="font-size:0.9em;", attrs={"data-test": 'abc'}))
== "<ins id=\"123\" class=\"abclass\" style=\"font-size:0.9em;\" data-test=\"abc\">text</ins>")
|
[
"sofi.ui.Inserted"
] |
[((63, 73), 'sofi.ui.Inserted', 'Inserted', ([], {}), '()\n', (71, 73), False, 'from sofi.ui import Inserted\n'), ((126, 142), 'sofi.ui.Inserted', 'Inserted', (['"""text"""'], {}), "('text')\n", (134, 142), False, 'from sofi.ui import Inserted\n'), ((229, 331), 'sofi.ui.Inserted', 'Inserted', (['"""text"""'], {'cl': '"""abclass"""', 'ident': '"""123"""', 'style': '"""font-size:0.9em;"""', 'attrs': "{'data-test': 'abc'}"}), "('text', cl='abclass', ident='123', style='font-size:0.9em;', attrs\n ={'data-test': 'abc'})\n", (237, 331), False, 'from sofi.ui import Inserted\n')]
|
import math
import sklearn.cluster as clstr
import cv2
import numpy as np
from PIL import Image, ImageOps, ImageDraw
import os, glob
import matplotlib.pyplot as pyplt
import scipy.cluster.vq as vq
import argparse
import glob
# We can specify these if need be.
brodatz = "D:\\ImageProcessing\\project\\OriginalBrodatz\\"
concatOut = "D:\\ImageProcessing\\project\\concat.png"
# This is the function that checks boundaries when performing spatial convolution.
def getRanges_for_window_with_adjust(row, col, height, width, W):
mRange = []
nRange = []
mRange.append(0)
mRange.append(W-1)
nRange.append(0)
nRange.append(W-1)
initm = int(round(row - math.floor(W / 2)))
initn = int(round(col - math.floor(W / 2)))
if (initm < 0):
mRange[1] += initm
initm = 0
if (initn < 0):
nRange[1] += initn
initn = 0
if(initm + mRange[1] > (height - 1)):
diff = ((initm + mRange[1]) - (height - 1))
mRange[1] -= diff
if(initn + nRange[1] > (width-1)):
diff = ((initn + nRange[1]) - (width - 1))
nRange[1] -= diff
windowHeight = mRange[1] - mRange[0]
windowWidth = nRange[1] - nRange[0]
return int(round(windowHeight)), int(round(windowWidth)), int(round(initm)), int(round(initn))
# Used to normalize data before clustering occurs.
# Whiten sets the variance to be 1 (unit variance),
# spatial weighting also takes place here.
# The mean can be subtracted if specified by the implementation.
def normalizeData(featureVectors, setMeanToZero, spatialWeight=1):
means = []
for col in range(0, len(featureVectors[0])):
colMean = 0
for row in range(0, len(featureVectors)):
colMean += featureVectors[row][col]
colMean /= len(featureVectors)
means.append(colMean)
for col in range(2, len(featureVectors[0])):
for row in range(0, len(featureVectors)):
featureVectors[row][col] -= means[col]
copy = vq.whiten(featureVectors)
if (setMeanToZero):
for row in range(0, len(featureVectors)):
for col in range(0, len(featureVectors[0])):
copy[row][col] -= means[col]
for row in range(0, len(featureVectors)):
copy[row][0] *= spatialWeight
copy[row][1] *= spatialWeight
return copy
# Create the feature vectors and add in row and column data
def constructFeatureVectors(featureImages, img):
featureVectors = []
height, width = img.shape
for row in range(height):
for col in range(width):
featureVector = []
featureVector.append(row)
featureVector.append(col)
for featureImage in featureImages:
featureVector.append(featureImage[row][col])
featureVectors.append(featureVector)
return featureVectors
# An extra function if we are looking to save our feature vectors for later
def printFeatureVectors(outDir, featureVectors):
f = open(outDir, 'w')
for vector in featureVectors:
for item in vector:
f.write(str(item) + " ")
f.write("\n")
f.close()
# If we want to read in some feature vectors instead of creating them.
def readInFeatureVectorsFromFile(dir):
list = [line.rstrip('\n') for line in open(dir)]
list = [i.split() for i in list]
newList = []
for row in list:
newRow = []
for item in row:
floatitem = float(item)
newRow.append(floatitem)
newList.append(newRow)
return newList
# Print the intermediate results before clustering occurs
def printFeatureImages(featureImages, naming, printlocation):
i =0
for image in featureImages:
# Normalize to intensity values
imageToPrint = cv2.normalize(image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
cv2.imwrite(printlocation + "\\" + naming + str(i) + ".png", imageToPrint)
i+=1
# Print the final result, the user can also choose to make the output grey
def printClassifiedImage(labels, k, img, outdir, greyOutput):
if(greyOutput):
labels = labels.reshape(img.shape)
for row in range(0, len(labels)):
for col in range(0, len(labels[0])):
outputIntensity = (255/k)*labels[row][col]
labels[row][col] = outputIntensity
cv2.imwrite(outdir, labels.reshape(img.shape))
else:
pyplt.imsave(outdir, labels.reshape(img.shape))
# Call the k means algorithm for classification
def clusterFeatureVectors(featureVectors, k):
kmeans = clstr.KMeans(n_clusters=k)
kmeans.fit(featureVectors)
labels = kmeans.labels_
return labels
# To clean up old filter and feature images if the user chose to print them.
def deleteExistingSubResults(outputPath):
for filename in os.listdir(outputPath):
if (filename.startswith("filter") or filename.startswith("feature")):
os.remove(filename)
# Checks user input (i.e. cannot have a negative mask size value)
def check_positive_int(n):
int_n = int(n)
if int_n < 0:
raise argparse.ArgumentTypeError("%s is negative" % n)
return int_n
# Checks user input (i.e. cannot have a negative weighting value)
def check_positive_float(n):
float_n = float(n)
if float_n < 0:
raise argparse.ArgumentTypeError("%s is negative " % n)
return float_n
#--------------------------------------------------------------------------
# All of the functions below were left here to demonstrate how I went about
# cropping the input images. I left them here, in the case that Brodatz
# textures were downloaded and cropped as new input images.
#--------------------------------------------------------------------------
def cropTexture(x_offset, Y_offset, width, height, inDir, outDir):
box = (x_offset, Y_offset, width, height)
image = Image.open(inDir)
crop = image.crop(box)
crop.save(outDir, "PNG")
def deleteCroppedImages():
for filename in glob.glob(brodatz + "*crop*"):
os.remove(filename)
def concatentationOfBrodatzTexturesIntoRows(pathsToImages, outdir, axisType):
images = []
for thisImage in pathsToImages:
images.append(cv2.imread(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE))
cv2.imwrite(outdir, np.concatenate(images, axis=axisType))
outimg = cv2.imread(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE)
return outimg
def createGrid(listOfBrodatzInts, outName, howManyPerRow):
listOfRowOutputs = []
for i in range(len(listOfBrodatzInts)):
brodatzCropInput = brodatz + "D" + str(listOfBrodatzInts[i]) + ".png"
brodatzCropOutput = brodatz + "cropD" + str(listOfBrodatzInts[i]) + ".png"
# 128x128 crops, in order to generate a 512x512 image
cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput)
listOfRowOutputs.append(brodatzCropOutput)
subOuts = [listOfRowOutputs[x:x + howManyPerRow] for x in xrange(0,len(listOfRowOutputs), howManyPerRow)]
dests = []
for i in range(len(subOuts)):
dest = brodatz + "cropRow" + str(i) + ".png"
dests.append(dest)
concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + "cropRow" + str(i) + ".png", 1)
concatentationOfBrodatzTexturesIntoRows(dests, brodatz + outName, 0)
# Destroy all sub crops (we can make this optional if we want!)
deleteCroppedImages()
def createGridWithCircle(listOfBrodatzInts, circleInt, outName):
listOfRowOutputs = []
for i in range(len(listOfBrodatzInts)):
brodatzCropInput = brodatz + "D" + str(listOfBrodatzInts[i]) + ".png"
brodatzCropOutput = brodatz + "cropD" + str(listOfBrodatzInts[i]) + ".png"
# 128x128 crops, in order to generate a 256x256 image
cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput)
listOfRowOutputs.append(brodatzCropOutput)
subOuts = [listOfRowOutputs[x:x + 2] for x in xrange(0, len(listOfRowOutputs), 2)]
dests = []
for i in range(len(subOuts)):
dest = brodatz + "cropRow" + str(i) + ".png"
dests.append(dest)
concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + "cropRow" + str(i) + ".png", 1)
concatentationOfBrodatzTexturesIntoRows(dests, brodatz + "Nat5crop.png", 0)
size = (128, 128)
mask = Image.new('L', size, color=255)
draw = ImageDraw.Draw(mask)
draw.ellipse((0, 0) + size, fill=0)
im = Image.open(brodatz + "D" + str(circleInt) + ".png")
output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5))
output.paste(0, mask=mask)
output.save(brodatz + 'circlecrop.png', transparency=0)
img = Image.open(brodatz + 'circlecrop.png').convert("RGBA")
img_w, img_h = img.size
background = Image.open(brodatz + "Nat5crop.png")
bg_w, bg_h = background.size
offset = ((bg_w - img_w) / 2, (bg_h - img_h) / 2)
background.paste(output, offset, img)
background.save(brodatz + outName, format="png")
deleteCroppedImages()
def createTexturePair(pair, outName):
pathsToTemp = [brodatz + "D" + str(pair[0]) + ".png", brodatz + "D" + str(pair[1]) + ".png"]
cropTexture(256, 256, 384, 384, pathsToTemp[0], brodatz + "outcrop1.png")
cropTexture(256, 256, 384, 384, pathsToTemp[1], brodatz + "outcrop2.png")
cropsToConcat = [brodatz + "outcrop1.png", brodatz + "outcrop2.png"]
concatentationOfBrodatzTexturesIntoRows(cropsToConcat, outName, 1)
deleteCroppedImages()
#--------------------------------------------------------------------------
# Create test images
#--------------------------------------------------------------------------
# Note that I did not write this to have an exhaustive approach in mind,
# where I pair all of the textures to every other texture. If I did so,
# I would have made it a little more efficient, instead I just decided to
# use the images that were in the papers already.
# # We can use any of the 112 images from the Brodatz album here
# nat16 = [29,12,17,55,32,5,84,68,77,24,9,4,3,33,51,54]
# howManyPerRow = 4
# outName = "Nat16.png"
# createGrid(nat16, outName, howManyPerRow)
#
# grid4 = [3,68,17,77]
# howManyPerRow = 2
# outName = "grid4.png"
# createGrid(grid4, outName, howManyPerRow)
# #the last int is the circle in the middle of the image!
# nat5 = [77,55,84,17]
# circleInt = 24
# outName = 'Nat5.png'
# createGridWithCircle(nat5, circleInt, outName)
#
# texturePairs = [[17,77],[3,68],[3,17],[55,68]]
# count = 0
# for pair in texturePairs:
# outName = brodatz + "pair" + str(count) + ".png"
# createTexturePair(pair, outName)
# count += 1
|
[
"PIL.Image.new",
"os.remove",
"numpy.concatenate",
"PIL.ImageOps.fit",
"sklearn.cluster.KMeans",
"math.floor",
"PIL.Image.open",
"cv2.imread",
"cv2.normalize",
"glob.glob",
"PIL.ImageDraw.Draw",
"os.listdir",
"scipy.cluster.vq.whiten",
"argparse.ArgumentTypeError"
] |
[((1992, 2017), 'scipy.cluster.vq.whiten', 'vq.whiten', (['featureVectors'], {}), '(featureVectors)\n', (2001, 2017), True, 'import scipy.cluster.vq as vq\n'), ((4589, 4615), 'sklearn.cluster.KMeans', 'clstr.KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (4601, 4615), True, 'import sklearn.cluster as clstr\n'), ((4834, 4856), 'os.listdir', 'os.listdir', (['outputPath'], {}), '(outputPath)\n', (4844, 4856), False, 'import os, glob\n'), ((5891, 5908), 'PIL.Image.open', 'Image.open', (['inDir'], {}), '(inDir)\n', (5901, 5908), False, 'from PIL import Image, ImageOps, ImageDraw\n'), ((6013, 6042), 'glob.glob', 'glob.glob', (["(brodatz + '*crop*')"], {}), "(brodatz + '*crop*')\n", (6022, 6042), False, 'import glob\n'), ((6354, 6401), 'cv2.imread', 'cv2.imread', (['outdir', 'cv2.CV_LOAD_IMAGE_GRAYSCALE'], {}), '(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE)\n', (6364, 6401), False, 'import cv2\n'), ((8332, 8363), 'PIL.Image.new', 'Image.new', (['"""L"""', 'size'], {'color': '(255)'}), "('L', size, color=255)\n", (8341, 8363), False, 'from PIL import Image, ImageOps, ImageDraw\n'), ((8375, 8395), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['mask'], {}), '(mask)\n', (8389, 8395), False, 'from PIL import Image, ImageOps, ImageDraw\n'), ((8510, 8559), 'PIL.ImageOps.fit', 'ImageOps.fit', (['im', 'mask.size'], {'centering': '(0.5, 0.5)'}), '(im, mask.size, centering=(0.5, 0.5))\n', (8522, 8559), False, 'from PIL import Image, ImageOps, ImageDraw\n'), ((8762, 8798), 'PIL.Image.open', 'Image.open', (["(brodatz + 'Nat5crop.png')"], {}), "(brodatz + 'Nat5crop.png')\n", (8772, 8798), False, 'from PIL import Image, ImageOps, ImageDraw\n'), ((3775, 3864), 'cv2.normalize', 'cv2.normalize', (['image'], {'alpha': '(0)', 'beta': '(255)', 'norm_type': 'cv2.NORM_MINMAX', 'dtype': 'cv2.CV_32F'}), '(image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=\n cv2.CV_32F)\n', (3788, 3864), False, 'import cv2\n'), ((5114, 5162), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%s is negative' % n)"], {}), "('%s is negative' % n)\n", (5140, 5162), False, 'import argparse\n'), ((5334, 5383), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%s is negative ' % n)"], {}), "('%s is negative ' % n)\n", (5360, 5383), False, 'import argparse\n'), ((6052, 6071), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (6061, 6071), False, 'import os, glob\n'), ((6301, 6338), 'numpy.concatenate', 'np.concatenate', (['images'], {'axis': 'axisType'}), '(images, axis=axisType)\n', (6315, 6338), True, 'import numpy as np\n'), ((4948, 4967), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (4957, 4967), False, 'import os, glob\n'), ((6225, 6275), 'cv2.imread', 'cv2.imread', (['thisImage', 'cv2.CV_LOAD_IMAGE_GRAYSCALE'], {}), '(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE)\n', (6235, 6275), False, 'import cv2\n'), ((8662, 8700), 'PIL.Image.open', 'Image.open', (["(brodatz + 'circlecrop.png')"], {}), "(brodatz + 'circlecrop.png')\n", (8672, 8700), False, 'from PIL import Image, ImageOps, ImageDraw\n'), ((679, 696), 'math.floor', 'math.floor', (['(W / 2)'], {}), '(W / 2)\n', (689, 696), False, 'import math\n'), ((727, 744), 'math.floor', 'math.floor', (['(W / 2)'], {}), '(W / 2)\n', (737, 744), False, 'import math\n')]
|
# Copyright 2018 @<NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import sys
sys.path.insert(0, os.getcwd())
import time
import random
import shutil
import dill
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn import LSTMCell, MultiRNNCell, DropoutWrapper
from helpers import Indexer, batch, checkpoint_model
from itertools import chain, product
from collections import defaultdict
from kmedoids import kMedoids
from scipy.spatial.distance import pdist, squareform
from sklearn.metrics import accuracy_score
from pairwise_classifier import *
class MixtureReader:
def __init__(self, data_dir, data_type, context):
assert data_type in ['nyt', 'wiki']
self.data_dir = data_dir
self.data_type = data_type
self.context = context # int: 0 or context-length.
def get_mixture(self, filename):
if self.data_type == 'nyt':
return self.__get_nyt_mixture(filename)
else: # == wiki
return self.__get_wiki_mixture(filename)
def __get_nyt_mixture(self, filename):
da, db, doc_mix = dill.load(open(self.data_dir+filename, 'rb'))
doc_lbs = []
for sentcode in doc_mix:
if sentcode in da:
doc_lbs.append(0)
else:
doc_lbs.append(1)
if self.context:
CTX_LEN = self.context
doc_mix_flat = list(chain.from_iterable(doc_mix))
doc_mix_len = len(doc_mix_flat)
ctx = np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)])
return doc_mix, doc_lbs, ctx
return doc_mix, doc_lbs
def __get_wiki_mixture(self, filename):
doc_mix, doc_lbs = dill.load(open(self.data_dir+filename, 'rb'))
if self.context:
CTX_LEN = self.context
doc_mix_flat = list(chain.from_iterable(doc_mix))
doc_mix_len = len(doc_mix_flat)
ctx = np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)])
return doc_mix, doc_lbs, ctx
return doc_mix, doc_lbs
class PscKMedoids:
def __init__(self, psc_clf, data_type):
self.psc_clf = psc_clf
self.mix_reader = MixtureReader(self.psc_clf.config['data_dir'],
data_type='nyt' if 'nyt' in self.psc_clf.config['data_dir'] else 'wiki',
context=self.psc_clf.config['context_length'] if self.psc_clf.config['context'] else 0)
self.out_file_path = psc_clf.config['out_file_path']
def __to_sentence(self, indices):
words = []
for index in indices:
word = self.psc_clf.indexer.get_object(index)
if word is None:
words.append('UNK')
else:
words.append(word)
return ' '.join(words)
def __to_labels(self, C, doc_len): # C: {cls:[datum_id, ...], ...}
lbs = [0]*doc_len
for idx in C[1]:
lbs[idx] = 1
return lbs
def __flip_clust(self, clust):
return np.array([0 if i==1 else 1 for i in clust])
def __clust_accuracy(self, true, pred):
return max(accuracy_score(true, pred),
accuracy_score(true, self.__flip_clust(pred)))
def __dist(self, x1, x2):
x1, x1_len = batch([x1])
x2, x2_len = batch([x2])
fd = {self.psc_clf.input_x1:x1, self.psc_clf.input_x1_length:x1_len,
self.psc_clf.input_x2:x2, self.psc_clf.input_x2_length:x2_len,
self.psc_clf.keep_prob:1.0}
if self.psc_clf.config['context']:
fd[self.psc_clf.input_ctx] = self.ctx
conf = self.psc_clf.sess.run(self.psc_clf.scores, feed_dict=fd)
return 1-conf[0]
def evaluate_single(self, doc_mix, doc_lbs, ctx=None, method='average', return_pred=True):
if ctx is not None:
self.ctx = ctx
doc_mix_sq, _ = batch(doc_mix)
doc_mix_sq = doc_mix_sq.T
_, doc_mix_clust = kMedoids(squareform(pdist(doc_mix_sq,metric=self.__dist)), 2)
doc_prd = self.__to_labels(doc_mix_clust, len(doc_mix))
acc = self.__clust_accuracy(doc_lbs, doc_prd)
if return_pred:
return acc, doc_prd
return acc
def evaluate_rand(self, k=100, verbose=True):
accs = []
filenames = np.random.choice(self.psc_clf.FILENAMES, size=k, replace=False)
if self.out_file_path is not None: # clear out file for new writing.
out_file = open(self.out_file_path, 'w')
for filename in filenames:
if self.mix_reader.context:
doc_mix, doc_lbs, ctx = self.mix_reader.get_mixture(filename)
result = self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is not None)
else:
doc_mix, doc_lbs = self.mix_reader.get_mixture(filename, self.out_file_path is not None)
result = self.evaluate_single(doc_mix, doc_lbs)
if out_file_path is None:
acc = result
else:
acc, prd = result
out_file.write('FILE ID: ' + str(filename) + '\n')
for prd_lb, true_lb, indices in zip(prd, doc_lbs, doc_mix):
out_file.write('TRUE = '+str(true_lb)+' | '+'PRED = '+str(prd_lb)+' | '+self.__to_sentence(indices)+'\n')
out_file.write('\n\n')
accs.append(acc)
if verbose:
print('File {}: acc = {}'.format(filename, acc))
out_file.close()
avg_acc = np.mean(accs)
print('\nAverage accuracy = {}'.format(avg_acc))
return avg_acc
def evaluate_given(self, filenames, verbose=True):
accs = []
if self.out_file_path is not None: # clear out file for new writing.
out_file = open(self.out_file_path, 'w')
for filename in filenames:
if self.mix_reader.context:
doc_mix, doc_lbs, ctx = self.mix_reader.get_mixture(filename)
result = self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is not None)
else:
doc_mix, doc_lbs = self.mix_reader.get_mixture(filename)
result = self.evaluate_single(doc_mix, doc_lbs)
if self.out_file_path is None:
acc = result
else:
acc, prd = result
out_file.write('FILE ID: ' + str(filename) + '\n')
for prd_lb, true_lb, indices in zip(prd, doc_lbs, doc_mix):
out_file.write('TRUE = '+str(true_lb)+' | '+'PRED = '+str(prd_lb)+' | '+self.__to_sentence(indices)+'\n')
out_file.write('\n\n')
accs.append(acc)
if verbose:
print('File {}: acc = {}'.format(filename, acc))
out_file.close()
avg_acc = np.mean(accs)
print('\nAverage accuracy = {}'.format(avg_acc))
return avg_acc
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int)
parser.add_argument('--vocab_size', type=int)
parser.add_argument('--emb_size', type=int)
parser.add_argument('--n_layer', type=int)
parser.add_argument('--hid_size', type=int)
parser.add_argument('--keep_prob', type=float)
parser.add_argument('--learning_rate', type=float)
parser.add_argument('--n_epoch', type=int)
parser.add_argument('--train_size', type=int)
parser.add_argument('--verbose', type=int)
parser.add_argument('--save_freq', type=int)
parser.add_argument('--data_dir', type=str)
parser.add_argument('--info_path', type=str)
parser.add_argument('--init_with_glove', type=bool)
parser.add_argument('--save_dir', type=str)
parser.add_argument('--save_name', type=str)
parser.add_argument('--restore_dir', type=str)
parser.add_argument('--restore_name', type=str)
parser.add_argument('--load_from_saved', type=bool)
parser.add_argument('--track_dir', type=str)
parser.add_argument('--new_track', type=bool)
parser.add_argument('--session_id', type=str)
parser.add_argument('--mutual_attention', type=bool)
parser.add_argument('--context', type=bool)
parser.add_argument('--context_length', type=int)
parser.add_argument('--out_file_path', type=str)
args = parser.parse_args()
config = {'batch_size': args.batch_size, 'vocab_size': args.vocab_size, 'emb_size': args.emb_size,
'n_layer': args.n_layer, 'hid_size': args.hid_size,
'keep_prob': args.keep_prob, 'learning_rate': args.learning_rate,
'n_epoch': args.n_epoch, 'train_size': args.train_size, 'verbose': args.verbose,
'save_freq': args.save_freq,
'data_dir': args.data_dir, 'info_path': args.info_path,
'init_with_glove': args.init_with_glove,
'save_dir': args.save_dir, 'save_name': args.save_name,
'restore_dir': args.restore_dir, 'restore_name': args.restore_name,
'load_from_saved': args.load_from_saved,
'track_dir': args.track_dir, 'new_track': args.new_track, 'session_id': args.session_id,
'mutual_attention': args.mutual_attention,
'context': args.context, 'context_length': args.context_length,
'out_file_path': args.out_file_path}
psc_clf = PairwiseSentenceClassifier(config)
kmed = PscKMedoids(psc_clf, data_type='nyt')
print('\n')
sample_files = os.listdir('nyt_sample/')
kmed.evaluate_given(sample_files)
|
[
"helpers.batch",
"argparse.ArgumentParser",
"os.getcwd",
"sklearn.metrics.accuracy_score",
"numpy.mean",
"numpy.array",
"scipy.spatial.distance.pdist",
"numpy.random.choice",
"itertools.chain.from_iterable",
"os.listdir"
] |
[((715, 726), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (724, 726), False, 'import os\n'), ((7932, 7957), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7955, 7957), False, 'import argparse\n'), ((10449, 10474), 'os.listdir', 'os.listdir', (['"""nyt_sample/"""'], {}), "('nyt_sample/')\n", (10459, 10474), False, 'import os\n'), ((3887, 3934), 'numpy.array', 'np.array', (['[(0 if i == 1 else 1) for i in clust]'], {}), '([(0 if i == 1 else 1) for i in clust])\n', (3895, 3934), True, 'import numpy as np\n'), ((4162, 4173), 'helpers.batch', 'batch', (['[x1]'], {}), '([x1])\n', (4167, 4173), False, 'from helpers import Indexer, batch, checkpoint_model\n'), ((4195, 4206), 'helpers.batch', 'batch', (['[x2]'], {}), '([x2])\n', (4200, 4206), False, 'from helpers import Indexer, batch, checkpoint_model\n'), ((4783, 4797), 'helpers.batch', 'batch', (['doc_mix'], {}), '(doc_mix)\n', (4788, 4797), False, 'from helpers import Indexer, batch, checkpoint_model\n'), ((5217, 5280), 'numpy.random.choice', 'np.random.choice', (['self.psc_clf.FILENAMES'], {'size': 'k', 'replace': '(False)'}), '(self.psc_clf.FILENAMES, size=k, replace=False)\n', (5233, 5280), True, 'import numpy as np\n'), ((6436, 6449), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (6443, 6449), True, 'import numpy as np\n'), ((7759, 7772), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (7766, 7772), True, 'import numpy as np\n'), ((3995, 4021), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['true', 'pred'], {}), '(true, pred)\n', (4009, 4021), False, 'from sklearn.metrics import accuracy_score\n'), ((2073, 2101), 'itertools.chain.from_iterable', 'chain.from_iterable', (['doc_mix'], {}), '(doc_mix)\n', (2092, 2101), False, 'from itertools import chain, product\n'), ((2165, 2199), 'numpy.array', 'np.array', (['[doc_mix_flat[:CTX_LEN]]'], {}), '([doc_mix_flat[:CTX_LEN]])\n', (2173, 2199), True, 'import numpy as np\n'), ((2229, 2285), 'numpy.array', 'np.array', (['[doc_mix_flat + [0] * (CTX_LEN - doc_mix_len)]'], {}), '([doc_mix_flat + [0] * (CTX_LEN - doc_mix_len)])\n', (2237, 2285), True, 'import numpy as np\n'), ((2588, 2616), 'itertools.chain.from_iterable', 'chain.from_iterable', (['doc_mix'], {}), '(doc_mix)\n', (2607, 2616), False, 'from itertools import chain, product\n'), ((2680, 2714), 'numpy.array', 'np.array', (['[doc_mix_flat[:CTX_LEN]]'], {}), '([doc_mix_flat[:CTX_LEN]])\n', (2688, 2714), True, 'import numpy as np\n'), ((2744, 2800), 'numpy.array', 'np.array', (['[doc_mix_flat + [0] * (CTX_LEN - doc_mix_len)]'], {}), '([doc_mix_flat + [0] * (CTX_LEN - doc_mix_len)])\n', (2752, 2800), True, 'import numpy as np\n'), ((4879, 4916), 'scipy.spatial.distance.pdist', 'pdist', (['doc_mix_sq'], {'metric': 'self.__dist'}), '(doc_mix_sq, metric=self.__dist)\n', (4884, 4916), False, 'from scipy.spatial.distance import pdist, squareform\n')]
|
"""
IsOpenStackCompute
==================
The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine
OpenStack Compute node. It checks if 'nova-compute' process exist, if not raises
``SkipComponent`` so that the dependent component will not fire. Can be added as
a dependency of a parser so that the parser only fires if the
``IsIsOpenStackCompute`` dependency is met.
"""
from insights.core.plugins import component
from insights.parsers.ps import PsAuxcww
from insights.core.dr import SkipComponent
@component(PsAuxcww)
class IsOpenStackCompute(object):
"""The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine
OpenStack Compute node. It checks if ``nova-compute`` process exist, if not
raises ``SkipComponent``.
Raises:
SkipComponent: When ``nova-compute`` process does not exist.
"""
def __init__(self, ps):
if 'nova-compute' not in ps.running:
raise SkipComponent('Not OpenStack Compute node')
|
[
"insights.core.dr.SkipComponent",
"insights.core.plugins.component"
] |
[((521, 540), 'insights.core.plugins.component', 'component', (['PsAuxcww'], {}), '(PsAuxcww)\n', (530, 540), False, 'from insights.core.plugins import component\n'), ((948, 991), 'insights.core.dr.SkipComponent', 'SkipComponent', (['"""Not OpenStack Compute node"""'], {}), "('Not OpenStack Compute node')\n", (961, 991), False, 'from insights.core.dr import SkipComponent\n')]
|
from . import app, api
from . import TestController, TestParameterController
from threading import Lock
TEST_PARAMETER_LOCK = Lock()
api.add_resource(TestController, '/api/tests')
api.add_resource(TestParameterController, '/api/parameters',
resource_class_kwargs ={'lock_obj': TEST_PARAMETER_LOCK})
|
[
"threading.Lock"
] |
[((128, 134), 'threading.Lock', 'Lock', ([], {}), '()\n', (132, 134), False, 'from threading import Lock\n')]
|
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import models
import cyder
from cyder.cydns.domain.models import Domain, _check_TLD_condition
from cyder.cydns.mixins import ObjectUrlMixin
from cyder.cydns.validation import validate_label, validate_name
from cyder.settings import CYDNS_BASE_URL
class CydnsRecord(models.Model, ObjectUrlMixin):
"""
This class provides common functionality that many DNS record
classes share. This includes a foreign key to the ``domain`` table
and a ``label`` CharField. This class also inherits from the
``ObjectUrlMixin`` class to provide the ``get_absolute_url``,
``get_edit_url``, and ``get_delete_url`` functions.
This class does validation on the ``label`` field. Call
``clean_all`` to trigger the validation functions. Failure to
validate will raise a ``ValidationError``.
If you plan on using the ``unique_together`` constraint on a Model
that inherits from ``CydnsRecord``, you must include ``domain`` and
``label`` explicitly if you need them to. ``CydnsRecord`` will not
enforce uniqueness for you.
All common records have a ``fqdn`` field. This field is updated
every time the object is saved::
fqdn = name + domain.name
or if name == ''
fqdn = domain.name
This field makes searching for records much easier. Instead of
looking at ``obj.label`` together with ``obj.domain.name``, you can
just search the ``obj.fqdn`` field.
As of commit 7b2fd19f, the build scripts do not care about ``fqdn``.
This could change.
"the total number of octets that represent a name (i.e., the sum of
all label octets and label lengths) is limited to 255" - RFC 4471
"""
domain = models.ForeignKey(Domain, null=False)
label = models.CharField(max_length=100, blank=True, null=True,
validators=[validate_label])
fqdn = models.CharField(max_length=255, blank=True, null=True,
validators=[validate_name])
# fqdn = label + domain.name <--- see set_fqdn
class Meta:
abstract = True
def clean(self):
self.set_fqdn()
self.check_TLD_condition()
def save(self, *args, **kwargs):
if kwargs.has_key('no_build'):
no_build = kwargs.pop('no_build') # Removes key.
else:
no_build = False # We are rebuilding
super(CydnsRecord, self).save(*args, **kwargs)
if no_build:
pass
else:
# Mark the domain as dirty so it can be rebuilt.
self.domain.dirty = True
self.domain.save()
def set_fqdn(self):
try:
if self.label == '':
self.fqdn = self.domain.name
else:
self.fqdn = "{0}.{1}".format(self.label, self.domain.name)
except ObjectDoesNotExist:
return
def check_for_cname(self):
""""If a CNAME RR is preent at a node, no other data should be
present; this ensures that the data for a canonical name and its
aliases cannot be different."
-- `RFC 1034 <http://tools.ietf.org/html/rfc1034>`_
Call this function in models that can't overlap with an existing
CNAME.
"""
CNAME = cyder.cydns.cname.models.CNAME
if CNAME.objects.filter(fqdn=self.fqdn).exists():
raise ValidationError("A CNAME with this name already exists.")
def check_for_delegation(self):
"""If an object's domain is delegated it should not be able to
be changed. Delegated domains cannot have objects created in
them.
"""
if not self.domain.delegated:
return
if not self.pk: # We don't exist yet.
raise ValidationError("No objects can be created in the {0}"
"domain. It is delegated."
.format(self.domain.name))
def check_TLD_condition(self):
_check_TLD_condition(self)
|
[
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.core.exceptions.ValidationError",
"cyder.cydns.domain.models._check_TLD_condition"
] |
[((1778, 1815), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Domain'], {'null': '(False)'}), '(Domain, null=False)\n', (1795, 1815), False, 'from django.db import models\n'), ((1828, 1917), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)', 'validators': '[validate_label]'}), '(max_length=100, blank=True, null=True, validators=[\n validate_label])\n', (1844, 1917), False, 'from django.db import models\n'), ((1953, 2041), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)', 'validators': '[validate_name]'}), '(max_length=255, blank=True, null=True, validators=[\n validate_name])\n', (1969, 2041), False, 'from django.db import models\n'), ((4044, 4070), 'cyder.cydns.domain.models._check_TLD_condition', '_check_TLD_condition', (['self'], {}), '(self)\n', (4064, 4070), False, 'from cyder.cydns.domain.models import Domain, _check_TLD_condition\n'), ((3439, 3496), 'django.core.exceptions.ValidationError', 'ValidationError', (['"""A CNAME with this name already exists."""'], {}), "('A CNAME with this name already exists.')\n", (3454, 3496), False, 'from django.core.exceptions import ObjectDoesNotExist, ValidationError\n')]
|
import os
from shutil import move as moveFile
os.chdir(os.getcwd())
print("".center(50, "="))
print("Update STEFFLIX-Daten".center(50))
print("".center(50, "="))
homeDir = os.getcwd()
allowedFileTypes = ["jpg", "jpeg", "mp4", "mp3", "png"]
diallowedItems = ["System Volume Information", "$RECYCLE.BIN", ".vscode", "sflix_sys"]
def recursiveCrawler(path, project="", serie="", staffel="", folge="", filelist={}, depth=0):
if depth == 0:
pass
elif depth == 1:
project = path.split("\\")[-1]
filelist.setdefault(project, {})
elif depth == 2:
serie = path.split("\\")[-1]
filelist[project].setdefault(serie, {})
elif depth == 3:
staffel = path.split("\\")[-1]
filelist[project][serie].setdefault(staffel, {})
elif depth == 4:
folge = path.split("\\")[-1]
filelist[project][serie][staffel].setdefault(folge, {})
# print(f"{project} {serie} {staffel}")
folderContent = os.listdir(path)
for item in folderContent:
if not item in diallowedItems:
if os.path.isfile(os.path.join(path, item)):
extension = item.split(".")[-1]
if extension in allowedFileTypes:
if depth == 1:
relPath = os.path.join(path, item)[len(homeDir):]
filelist[project].setdefault(os.path.join(".", relPath))
elif depth == 2:
relPath = os.path.join(path, item)[len(homeDir):]
filelist[project][serie].setdefault(os.path.join(".", relPath))
elif depth == 3:
relPath = os.path.join(path, item)[len(homeDir):]
filelist[project][serie][staffel].setdefault(os.path.join(".", relPath), None)
elif depth > 3:
relPath = os.path.join(path, item)[len(homeDir):]
filelist[project][serie][staffel][folge].setdefault(os.path.join(".", relPath), None)
elif os.path.isdir(os.path.join(path, item)):
filelist = recursiveCrawler(os.path.join(path, item), project, serie, staffel, folge, filelist, depth+1)
return filelist
print("Durchsuche Ordner...".ljust(40), end="")
try:
filelist = recursiveCrawler(homeDir)
print("OK")
except:
print("Fehler")
# fileWriter = open(os.path.join(homeDir, "output.txt"), "w", encoding="utf-8")
# fileWriter.write(str(filelist).replace("\\\\", "/").replace("None", "null"))
# fileWriter.close()
try:
print("Erstelle Backup...".ljust(40), end="")
if os.path.exists(os.path.join(homeDir, "sflix_sys", "data.js.bak")):
os.remove(os.path.join(homeDir, "sflix_sys", "data.js.bak"))
moveFile(os.path.join(homeDir, "sflix_sys", "data.js"), os.path.join(homeDir, "sflix_sys", "data.js.bak"))
print("OK")
except:
print("Fehler")
try:
print("Speichere neue Version...".ljust(40), end="")
fileWriter = open(os.path.join(homeDir, "sflix_sys", "data.js"), "w", encoding="utf-8")
fileWriter.write("var data = " + str(filelist).replace("\\\\", "/").replace("None", "null") + ";")
fileWriter.close()
print("OK")
except:
print("Fehler")
print("".center(50, "="))
print("Update abgeschlossen".center(50))
print("".center(50, "="))
print()
input("Enter zum Beenden")
|
[
"os.getcwd",
"os.path.join",
"os.listdir"
] |
[((175, 186), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (184, 186), False, 'import os\n'), ((56, 67), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (65, 67), False, 'import os\n'), ((979, 995), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (989, 995), False, 'import os\n'), ((2641, 2690), 'os.path.join', 'os.path.join', (['homeDir', '"""sflix_sys"""', '"""data.js.bak"""'], {}), "(homeDir, 'sflix_sys', 'data.js.bak')\n", (2653, 2690), False, 'import os\n'), ((2776, 2821), 'os.path.join', 'os.path.join', (['homeDir', '"""sflix_sys"""', '"""data.js"""'], {}), "(homeDir, 'sflix_sys', 'data.js')\n", (2788, 2821), False, 'import os\n'), ((2823, 2872), 'os.path.join', 'os.path.join', (['homeDir', '"""sflix_sys"""', '"""data.js.bak"""'], {}), "(homeDir, 'sflix_sys', 'data.js.bak')\n", (2835, 2872), False, 'import os\n'), ((3003, 3048), 'os.path.join', 'os.path.join', (['homeDir', '"""sflix_sys"""', '"""data.js"""'], {}), "(homeDir, 'sflix_sys', 'data.js')\n", (3015, 3048), False, 'import os\n'), ((2711, 2760), 'os.path.join', 'os.path.join', (['homeDir', '"""sflix_sys"""', '"""data.js.bak"""'], {}), "(homeDir, 'sflix_sys', 'data.js.bak')\n", (2723, 2760), False, 'import os\n'), ((1096, 1120), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (1108, 1120), False, 'import os\n'), ((2075, 2099), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (2087, 2099), False, 'import os\n'), ((2146, 2170), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (2158, 2170), False, 'import os\n'), ((1290, 1314), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (1302, 1314), False, 'import os\n'), ((1383, 1409), 'os.path.join', 'os.path.join', (['"""."""', 'relPath'], {}), "('.', relPath)\n", (1395, 1409), False, 'import os\n'), ((1482, 1506), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (1494, 1506), False, 'import os\n'), ((1582, 1608), 'os.path.join', 'os.path.join', (['"""."""', 'relPath'], {}), "('.', relPath)\n", (1594, 1608), False, 'import os\n'), ((1681, 1705), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (1693, 1705), False, 'import os\n'), ((1790, 1816), 'os.path.join', 'os.path.join', (['"""."""', 'relPath'], {}), "('.', relPath)\n", (1802, 1816), False, 'import os\n'), ((1894, 1918), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (1906, 1918), False, 'import os\n'), ((2010, 2036), 'os.path.join', 'os.path.join', (['"""."""', 'relPath'], {}), "('.', relPath)\n", (2022, 2036), False, 'import os\n')]
|
from gym.envs.registration import register
from .wrappers import *
from .logger import *
from .envs import *
register(
id='BanditsX2-v0',
kwargs = {'num_bandits' : 2},
entry_point='torch_rl.envs:BanditEnv',
)
register(
id='BanditsX4-v0',
kwargs = {'num_bandits' : 4},
entry_point='torch_rl.envs:BanditEnv',
)
register(
id='BanditsX8-v0',
kwargs = {'num_bandits' : 8},
entry_point='torch_rl.envs:BanditEnv',
)
try:
from .roboschool_envs import *
register(
id='TRLRoboschoolReacher-v1',
kwargs = {},
entry_point='torch_rl.envs:RoboschoolReacher',
max_episode_steps=150,
reward_threshold=18.0,
tags={ "pg_complexity": 1*1000000 },
)
except ImportError as e:
print('Roboschool environments excluded, import error')
try:
from .opensim_envs import *
register(
id='OsimArm2D-v1',
kwargs={'visualize': False},
entry_point='osim.env:Arm2DEnv'
)
register(
id='OsimArm3D-v1',
kwargs={'visualize': False},
entry_point='osim.env:Arm3DEnv'
)
register(
id='OsimRun3D-v1',
kwargs={'visualize': False},
entry_point='osim.env:Run3DEnv'
)
except ImportError as e:
print('Opensim environments excluded, import error ', e)
|
[
"gym.envs.registration.register"
] |
[((110, 208), 'gym.envs.registration.register', 'register', ([], {'id': '"""BanditsX2-v0"""', 'kwargs': "{'num_bandits': 2}", 'entry_point': '"""torch_rl.envs:BanditEnv"""'}), "(id='BanditsX2-v0', kwargs={'num_bandits': 2}, entry_point=\n 'torch_rl.envs:BanditEnv')\n", (118, 208), False, 'from gym.envs.registration import register\n'), ((224, 322), 'gym.envs.registration.register', 'register', ([], {'id': '"""BanditsX4-v0"""', 'kwargs': "{'num_bandits': 4}", 'entry_point': '"""torch_rl.envs:BanditEnv"""'}), "(id='BanditsX4-v0', kwargs={'num_bandits': 4}, entry_point=\n 'torch_rl.envs:BanditEnv')\n", (232, 322), False, 'from gym.envs.registration import register\n'), ((337, 435), 'gym.envs.registration.register', 'register', ([], {'id': '"""BanditsX8-v0"""', 'kwargs': "{'num_bandits': 8}", 'entry_point': '"""torch_rl.envs:BanditEnv"""'}), "(id='BanditsX8-v0', kwargs={'num_bandits': 8}, entry_point=\n 'torch_rl.envs:BanditEnv')\n", (345, 435), False, 'from gym.envs.registration import register\n'), ((498, 686), 'gym.envs.registration.register', 'register', ([], {'id': '"""TRLRoboschoolReacher-v1"""', 'kwargs': '{}', 'entry_point': '"""torch_rl.envs:RoboschoolReacher"""', 'max_episode_steps': '(150)', 'reward_threshold': '(18.0)', 'tags': "{'pg_complexity': 1 * 1000000}"}), "(id='TRLRoboschoolReacher-v1', kwargs={}, entry_point=\n 'torch_rl.envs:RoboschoolReacher', max_episode_steps=150,\n reward_threshold=18.0, tags={'pg_complexity': 1 * 1000000})\n", (506, 686), False, 'from gym.envs.registration import register\n'), ((866, 960), 'gym.envs.registration.register', 'register', ([], {'id': '"""OsimArm2D-v1"""', 'kwargs': "{'visualize': False}", 'entry_point': '"""osim.env:Arm2DEnv"""'}), "(id='OsimArm2D-v1', kwargs={'visualize': False}, entry_point=\n 'osim.env:Arm2DEnv')\n", (874, 960), False, 'from gym.envs.registration import register\n'), ((996, 1090), 'gym.envs.registration.register', 'register', ([], {'id': '"""OsimArm3D-v1"""', 'kwargs': "{'visualize': False}", 'entry_point': '"""osim.env:Arm3DEnv"""'}), "(id='OsimArm3D-v1', kwargs={'visualize': False}, entry_point=\n 'osim.env:Arm3DEnv')\n", (1004, 1090), False, 'from gym.envs.registration import register\n'), ((1125, 1219), 'gym.envs.registration.register', 'register', ([], {'id': '"""OsimRun3D-v1"""', 'kwargs': "{'visualize': False}", 'entry_point': '"""osim.env:Run3DEnv"""'}), "(id='OsimRun3D-v1', kwargs={'visualize': False}, entry_point=\n 'osim.env:Run3DEnv')\n", (1133, 1219), False, 'from gym.envs.registration import register\n')]
|
from django.shortcuts import render
from products.models import Product
from django.views.generic.list import ListView
from django.db.models import Q
class SearchProductView(ListView):
queryset = Product.objects.all()
template_name = "search/searched.html"
def get_context_data(self, *args, **kwargs):
context = super(SearchProductView, self).get_context_data(*args, **kwargs)
print(context)
context['query'] = self.request.GET.get('q')
return context
def get_queryset(self, *args, **kwargs):
request = self.request
dict = request.GET
query = dict.get('q', None)
if query is not None:
return Product.objects.search(query)
return Product.objects.featured()
|
[
"products.models.Product.objects.featured",
"products.models.Product.objects.all",
"products.models.Product.objects.search"
] |
[((202, 223), 'products.models.Product.objects.all', 'Product.objects.all', ([], {}), '()\n', (221, 223), False, 'from products.models import Product\n'), ((734, 760), 'products.models.Product.objects.featured', 'Product.objects.featured', ([], {}), '()\n', (758, 760), False, 'from products.models import Product\n'), ((689, 718), 'products.models.Product.objects.search', 'Product.objects.search', (['query'], {}), '(query)\n', (711, 718), False, 'from products.models import Product\n')]
|
"""this module is designed to test the version of the APIs required
to see if they are up to date so the program can be run"""
import logging
from news_api import check_news_version
from weather_api import check_weather_version
from covid_api import check_covid_version
logging.basicConfig(filename='pysys.log',level=logging.INFO, format='%(asctime)s %(levelname)-8s%(message)s', datefmt='%Y-%m-%d %H:%M:%S')
def test_api() -> bool:
"""this function checks to see if each API can be properly set up
and if there is an error, it is logged and the user
is told to abort the program"""
weather = False
news = False
covid = False
if check_weather_version():
logging.info("Weather API version is up to date (check_weather_version())")
weather = True
else:
logging.info("Weather API version is not up to date (check_weather_version()) - ACTION REQUIRED")
if check_news_version():
logging.info("News API version is up to date (check_news_version())")
news = True
else:
logging.info("News API version is not up to date (check_news_version()) - ACTION REQUIRED")
if check_covid_version():
logging.info("Covid-19 API version is up to date (check_covid_version())")
covid = True
else:
logging.info("Covid-19 API version is not up to date (check_covid_version()) - ACTION REQUIRED")
return bool(weather and news and covid)
if __name__ == '__main__':
logging.info("Test API Module Tested")
print(test_api())#tests the function
|
[
"logging.basicConfig",
"weather_api.check_weather_version",
"logging.info",
"news_api.check_news_version",
"covid_api.check_covid_version"
] |
[((280, 424), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""pysys.log"""', 'level': 'logging.INFO', 'format': '"""%(asctime)s %(levelname)-8s%(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(filename='pysys.log', level=logging.INFO, format=\n '%(asctime)s %(levelname)-8s%(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n", (299, 424), False, 'import logging\n'), ((677, 700), 'weather_api.check_weather_version', 'check_weather_version', ([], {}), '()\n', (698, 700), False, 'from weather_api import check_weather_version\n'), ((937, 957), 'news_api.check_news_version', 'check_news_version', ([], {}), '()\n', (955, 957), False, 'from news_api import check_news_version\n'), ((1179, 1200), 'covid_api.check_covid_version', 'check_covid_version', ([], {}), '()\n', (1198, 1200), False, 'from covid_api import check_covid_version\n'), ((1505, 1543), 'logging.info', 'logging.info', (['"""Test API Module Tested"""'], {}), "('Test API Module Tested')\n", (1517, 1543), False, 'import logging\n'), ((711, 786), 'logging.info', 'logging.info', (['"""Weather API version is up to date (check_weather_version())"""'], {}), "('Weather API version is up to date (check_weather_version())')\n", (723, 786), False, 'import logging\n'), ((831, 938), 'logging.info', 'logging.info', (['"""Weather API version is not up to date (check_weather_version()) - ACTION REQUIRED"""'], {}), "(\n 'Weather API version is not up to date (check_weather_version()) - ACTION REQUIRED'\n )\n", (843, 938), False, 'import logging\n'), ((968, 1037), 'logging.info', 'logging.info', (['"""News API version is up to date (check_news_version())"""'], {}), "('News API version is up to date (check_news_version())')\n", (980, 1037), False, 'import logging\n'), ((1079, 1180), 'logging.info', 'logging.info', (['"""News API version is not up to date (check_news_version()) - ACTION REQUIRED"""'], {}), "(\n 'News API version is not up to date (check_news_version()) - ACTION REQUIRED'\n )\n", (1091, 1180), False, 'import logging\n'), ((1211, 1285), 'logging.info', 'logging.info', (['"""Covid-19 API version is up to date (check_covid_version())"""'], {}), "('Covid-19 API version is up to date (check_covid_version())')\n", (1223, 1285), False, 'import logging\n'), ((1328, 1434), 'logging.info', 'logging.info', (['"""Covid-19 API version is not up to date (check_covid_version()) - ACTION REQUIRED"""'], {}), "(\n 'Covid-19 API version is not up to date (check_covid_version()) - ACTION REQUIRED'\n )\n", (1340, 1434), False, 'import logging\n')]
|
from datetime import datetime
import pandas as pd
from typing import Any, Dict, List, Tuple
class CredData():
"""
Parses information from Sourcecred
- Works with TimelineCred data format (sourcecred <= v0.7x)
"""
def __init__(self, cred_data, accounts_data):
self.cred_json_data = cred_data
self.weighted_graph = cred_data[1]['weightedGraph'][1]
self.cred_data = cred_data[1]['credData']
self.accounts_data = accounts_data
self.cache = {
'df': None,
'df_rank': None,
'df_grain': None,
'df_accounts': None,
'df_cred_ot': None,
'df_cred_eflow': None,
'df_cred_nflow': None,
}
def get_weighted_graph(self, data) -> Dict[str, Any]:
"""
Weighted graph from CredResult JSON data
"""
return self.weighted_graph
def get_cred_data(self) -> Dict[str, Any]:
"""
Raw CredResult JSON data
"""
return self.cred_data
def get_node(self, i: int) -> Dict[str, Any]:
"""
Returns specifc node's information
"""
node = dict()
address = self.weighted_graph['graphJSON'][1]['sortedNodeAddresses'][i]
node['address.source'] = f'{address[0]}/{address[1]}'
node['address.nodeType'] = address[2]
node['address.id'] = address[3]
node['totalCred'] = self.cred_data['nodeSummaries'][i]['cred']
node['credOverTime'] = self.cred_data['nodeOverTime'][i]['cred'] if self.cred_data['nodeOverTime'][i] else []
node['description'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description']
node['timestamp'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['timestampMs']
node['user'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] if node['address.nodeType'] == 'IDENTITY' else None
return node
@property
def total_nodes(self) -> int:
"""
Total amount of nodes (users, posts, etc) in the graph
"""
return len(self.cred_data['nodeSummaries'])
@property
def nodes(self) -> List[Any]:
"""
Returns all nodes in the graph
"""
return [self.get_node(i) for i in range(self.total_nodes)]
@property
def intervals(self, to_datetime=False) -> List[Any]:
"""
Returns timestamp intervals where cred was computed
"""
return self.cred_data['intervals']
def get_dt_intervals(self) -> List[Any]:
"""
Return intervals in datetime format
"""
return [datetime.fromtimestamp(interval[('endTimeMs')] / 1000) for interval in self.intervals]
@property
def distributed_cred(self) -> float:
"""
Returns total distributed cred
"""
if self.cache['df'] is None:
self.to_df()
return self.cache['df'].totalCred.sum()
@property
def distributed_grain(self) -> float:
"""
Returns total distributed grain
"""
if self.cache['df_grain'] is None:
self.get_grain_distribution()
return self.cache['df_grain'].amount.sum()
@property
def accounts(self) -> pd.DataFrame:
"""
Returns user accounts info from 'output/accounts.json' file
"""
if self.cache['df_accounts'] is None:
self.cache['df_accounts'] = pd.json_normalize(self.accounts_data['accounts'])
self.cache['df_accounts']['account.balance'] = self.cache['df_accounts']['account.balance'].map(float) / 1e18
self.cache['df_accounts']['account.paid'] = self.cache['df_accounts']['account.paid'].map(float) / 1e18
return self.cache['df_accounts']
def get_user_nodes(self) -> pd.DataFrame:
"""
Returns user nodes in the graph
"""
if self.cache['df'] is None:
self.to_df()
return self.cache['df'][self.cache['df']['address.nodeType'] == 'IDENTITY']
def get_user_ranking(self) -> pd.DataFrame:
"""
Returns the user raking by total amount of cred gained so far
"""
if self.cache['df_rank'] is None:
# self.cache['df_rank'] = self.get_user_nodes().sort_values('totalCred', ascending=False).reset_index(drop=True)
# distributed_cred = self.cache['df_rank'].totalCred.sum()
# self.cache['df_rank']['credShare'] = (self.cache['df_rank'].totalCred / distributed_cred) * 100
df_rank_p = self.get_user_nodes()[['address.id', 'totalCred', 'credOverTime']]
distributed_cred = df_rank_p.totalCred.sum()
df_rank_p['credShare'] = (df_rank_p.totalCred / distributed_cred) * 100
df_rank_p.set_index('address.id', inplace=True)
df_acc_p = self.accounts[['account.identity.id',
'account.identity.name',
'account.identity.subtype',
'account.active',
'account.balance',
'account.paid'
]]
self.cache['df_rank'] = df_acc_p.join(df_rank_p,
on='account.identity.id',
how='inner'
).sort_values('totalCred', ascending=False).reset_index(drop=True)
self.cache['df_rank'].columns = ['id', 'user', 'type', 'active', 'grainBalance', 'grainPaid', 'totalCred', 'credOverTime', 'credShare']
return self.cache['df_rank']
def get_grain_distribution(self) -> pd.DataFrame:
"""
Returns the history of grain distribution
"""
if self.cache['df_grain'] is None:
grain_history = [acc for acc in self.accounts_data['accounts'] if 'allocationHistory' in acc['account']]
if len(grain_history) > 0:
grain_distribution = [{'credTimestampMs': record['credTimestampMs'], 'amount': int(record['grainReceipt']['amount']) / 1e18} \
for acc in grain_history for record in acc['account']['allocationHistory']]
self.cache['df_grain'] = pd.json_normalize(grain_distribution)
self.cache['df_grain']['credTimestampMs'] = pd.to_datetime(self.cache['df_grain']['credTimestampMs'], unit='ms')
else:
# zeros
self.cache['df_grain'] = pd.DataFrame([self.get_dt_intervals(), [0.] * len(self.intervals)]).T
self.cache['df_grain'].columns = ['credTimestampMs', 'amount']
return self.cache['df_grain']
def get_cred_over_time(self) -> pd.DataFrame:
"""
Returns distributed cred summary over all intervals
"""
if self.cache['df_cred_ot'] is None:
if self.cache['df'] is None:
self.to_df()
self.cache['df_cred_ot'] = pd.DataFrame([self.get_dt_intervals(),
pd.DataFrame(self.cache['df'].credOverTime.to_list()).sum()
]).T
self.cache['df_cred_ot'].columns = ['credTimestampMs', 'amount']
self.cache['df_cred_ot'].set_index('credTimestampMs', drop=True, inplace=True)
return self.cache['df_cred_ot']
def to_df(self) -> pd.DataFrame:
"""
Retuns all nodes data as a DataFrame
"""
if self.cache['df'] is None:
self.cache['df'] = pd.json_normalize(self.nodes)
self.cache['df'].timestamp = pd.to_datetime(self.cache['df'].timestamp, unit='ms')
# distributedCred = self.df.totalCred.sum()
# self.df['credShare'] = self.df.totalCred / distributedCred
return self.cache['df']
def get_cred_flow_from_graph(self) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Gets cred flow through nodes & edges in the cred graph.
"""
if self.cache['df_cred_eflow'] is None:
def set_plugin(label):
for prefix, plugin in plugin_prefixes.items():
if label.startswith(prefix):
return plugin
return 'Not Found'
# PREPROCESSING
plugin_meta = dict()
edges = []
nodes = []
# edges_weights = dict()
# nodes_weights = dict()
for plugin in self.cred_json_data[1]['plugins'][1]:
plugin_meta[plugin['name']] = {
'nodePrefix': plugin['nodePrefix'],
'edgePrefix': plugin['edgePrefix'],
'edgeTypes': [{'prefix': et['prefix'], 'weight': et['defaultWeight']} for et in plugin['edgeTypes']],
'nodeTypes': [{'prefix': nt['prefix'], 'weight': nt['defaultWeight']} for nt in plugin['nodeTypes']],
}
edges.extend([et['prefix'] for et in plugin_meta[plugin['name']]['edgeTypes']])
# for et in plugin_meta[plugin['name']]['edgeTypes']:
# edges_weights[et['prefix']] = et['weight']
nodes.extend([nt['prefix'] for nt in plugin_meta[plugin['name']]['nodeTypes']])
# for nt in plugin_meta[plugin['name']]['nodeTypes']:
# nodes_weights[nt['prefix']] = nt['weight']
plugin_prefixes = {plugin_meta[p_name]['nodePrefix'].replace('\x00', ''): p_name for p_name in plugin_meta}
plugin_prefixes.update({plugin_meta[p_name]['edgePrefix'].replace('\x00', ''): p_name for p_name in plugin_meta})
# EDGES
df_ew = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['edgeWeights'].keys(),
[v['backwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()],
[v['forwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()]
]).T
df_ew.columns = ['edge', 'backward', 'forward']
cred_edges = dict()
for e in edges:
cred_edges[e.replace('\x00', '')] = [
df_ew[df_ew.edge.str.startswith(e)].backward.sum(),
df_ew[df_ew.edge.str.startswith(e)].forward.sum()
]
self.cache['df_cred_eflow'] = pd.json_normalize(cred_edges).T
self.cache['df_cred_eflow']['backward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[0])
self.cache['df_cred_eflow']['forward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[1])
self.cache['df_cred_eflow']['plugin'] = self.cache['df_cred_eflow'].index.map(set_plugin)
self.cache['df_cred_eflow'].drop(columns=[0], inplace=True)
# NODES
df_nw = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['nodeWeights'].keys(),
self.weighted_graph['weightsJSON'][1]['nodeWeights'].values()
]).T
df_nw.columns = ['node', 'weight']
cred_nodes = dict()
for n in nodes:
cred_nodes[n.replace('\x00', '')] = df_nw[df_nw.node.str.startswith(n)].weight.sum()
self.cache['df_cred_nflow'] = pd.json_normalize(cred_nodes).T
self.cache['df_cred_nflow'].columns = ['weight']
self.cache['df_cred_nflow']['plugin'] = self.cache['df_cred_nflow'].index.map(set_plugin)
return (self.cache['df_cred_nflow'], self.cache['df_cred_eflow'])
def __repr__(self) -> str:
return "<{} - ({} nodes & {} distributed CRED)>".format(self.__class__.__name__, self.total_nodes, self.distributed_cred)
|
[
"pandas.json_normalize",
"pandas.to_datetime",
"datetime.datetime.fromtimestamp"
] |
[((2679, 2731), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["(interval['endTimeMs'] / 1000)"], {}), "(interval['endTimeMs'] / 1000)\n", (2701, 2731), False, 'from datetime import datetime\n'), ((3497, 3546), 'pandas.json_normalize', 'pd.json_normalize', (["self.accounts_data['accounts']"], {}), "(self.accounts_data['accounts'])\n", (3514, 3546), True, 'import pandas as pd\n'), ((7735, 7764), 'pandas.json_normalize', 'pd.json_normalize', (['self.nodes'], {}), '(self.nodes)\n', (7752, 7764), True, 'import pandas as pd\n'), ((7806, 7859), 'pandas.to_datetime', 'pd.to_datetime', (["self.cache['df'].timestamp"], {'unit': '"""ms"""'}), "(self.cache['df'].timestamp, unit='ms')\n", (7820, 7859), True, 'import pandas as pd\n'), ((6413, 6450), 'pandas.json_normalize', 'pd.json_normalize', (['grain_distribution'], {}), '(grain_distribution)\n', (6430, 6450), True, 'import pandas as pd\n'), ((6511, 6579), 'pandas.to_datetime', 'pd.to_datetime', (["self.cache['df_grain']['credTimestampMs']"], {'unit': '"""ms"""'}), "(self.cache['df_grain']['credTimestampMs'], unit='ms')\n", (6525, 6579), True, 'import pandas as pd\n'), ((10641, 10670), 'pandas.json_normalize', 'pd.json_normalize', (['cred_edges'], {}), '(cred_edges)\n', (10658, 10670), True, 'import pandas as pd\n'), ((11601, 11630), 'pandas.json_normalize', 'pd.json_normalize', (['cred_nodes'], {}), '(cred_nodes)\n', (11618, 11630), True, 'import pandas as pd\n')]
|
from django.contrib import admin
from translations.admin import TranslatableAdmin, TranslationInline
from .models import Timezone, Continent, Country, City
class TimezoneAdmin(TranslatableAdmin):
inlines = [TranslationInline]
class ContinentAdmin(TranslatableAdmin):
inlines = [TranslationInline]
class CountryAdmin(TranslatableAdmin):
inlines = [TranslationInline]
class CityAdmin(TranslatableAdmin):
inlines = [TranslationInline]
admin.site.register(Timezone, TimezoneAdmin)
admin.site.register(Continent, ContinentAdmin)
admin.site.register(Country, CountryAdmin)
admin.site.register(City, CityAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((459, 503), 'django.contrib.admin.site.register', 'admin.site.register', (['Timezone', 'TimezoneAdmin'], {}), '(Timezone, TimezoneAdmin)\n', (478, 503), False, 'from django.contrib import admin\n'), ((504, 550), 'django.contrib.admin.site.register', 'admin.site.register', (['Continent', 'ContinentAdmin'], {}), '(Continent, ContinentAdmin)\n', (523, 550), False, 'from django.contrib import admin\n'), ((551, 593), 'django.contrib.admin.site.register', 'admin.site.register', (['Country', 'CountryAdmin'], {}), '(Country, CountryAdmin)\n', (570, 593), False, 'from django.contrib import admin\n'), ((594, 630), 'django.contrib.admin.site.register', 'admin.site.register', (['City', 'CityAdmin'], {}), '(City, CityAdmin)\n', (613, 630), False, 'from django.contrib import admin\n')]
|
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
from model import CassavaModel
from loss import DenseCrossEntropy
import dataset
from config import *
def train_one_fold(fold, model, optimizer):
df = pd.read_csv('./input/train_ohe.csv')
train_df = df[df.kfold != fold].reset_index(drop=True)
valid_df = df[df.kfold == fold].reset_index(drop=True)
train_dataset = dataset.CassavaDataset(train_df, device=DEVICE)
valid_dataset = dataset.CassavaDataset(valid_df, device=DEVICE)
train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True)
vaid_dataloader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True)
device = torch.device(DEVICE)
criterion = DenseCrossEntropy()
train_fold_results = []
for epoch in range(EPOCHS):
model.train()
t_loss = 0
for step, batch in enumerate(train_dataloader):
img = batch[0]
label = batch[1]
img = img.to(DEVICE, dtype=torch.float)
label = label.to(DEVICE, dtype=torch.float)
outputs = model(img)
# print(f'outputs \n {outputs}')
loss = criterion(outputs, label.squeeze(-1))
loss.backward()
t_loss += loss.item()
optimizer.step()
optimizer.zero_grad()
model.eval()
val_loss = 0
val_preds = None
val_labels = None
for step, batch in enumerate(vaid_dataloader):
img = batch[0]
label = batch[1]
if val_labels is None:
val_labels = label.clone().squeeze(-1)
else:
val_labels = torch.cat((val_labels, label.squeeze(-1)), dim=0)
img = img.to(DEVICE, dtype=torch.float)
label = label.to(DEVICE, dtype=torch.float)
with torch.no_grad():
outputs = model(img)
loss = criterion(outputs, label.squeeze(-1))
val_loss += loss.item()
preds = torch.softmax(outputs, dim=1).data.cuda()
if val_preds is None:
val_preds = preds
else:
val_preds = torch.cat((val_preds, preds), dim=0)
val_preds = torch.argmax(val_preds, dim=1)
print(f'EPOCH : {epoch}, train_loss: {t_loss}, valid_loss: {val_loss}')
train_fold_results.append({
'fold': fold,
'epoch': epoch,
'train_loss': t_loss / len(train_dataloader),
'valid_loss': val_loss / len(vaid_dataloader)
})
return val_preds, train_fold_results
def k_fold_train(folds):
model = CassavaModel()
model.to(DEVICE)
plist = [{'params':model.parameters(), 'lr':5e-5}]
optimizer = optim.Adam(plist)
df = pd.read_csv('./input/train_ohe.csv')
oof_preds = np.zeros((df.shape[0]))
train_results = []
for i in range(folds):
valid_idx = df[df.kfold == i].index
val_preds, train_fold_results = train_one_fold(i, model, optimizer)
oof_preds[valid_idx] = val_preds.numpy()
train_results += train_fold_results
torch.save({
'fold': i,
'lr': optimizer.state_dict()['params_groups'][0]['lr'],
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
}, f'./model/baseline/val_loss {train_results[i].val_loss}.pth')
if __name__ == '__main__':
k_fold_train(5)
|
[
"loss.DenseCrossEntropy",
"torch.utils.data.DataLoader",
"dataset.CassavaDataset",
"pandas.read_csv",
"torch.argmax",
"numpy.zeros",
"torch.cat",
"torch.softmax",
"torch.optim.Adam",
"torch.device",
"torch.no_grad",
"model.CassavaModel"
] |
[((319, 355), 'pandas.read_csv', 'pd.read_csv', (['"""./input/train_ohe.csv"""'], {}), "('./input/train_ohe.csv')\n", (330, 355), True, 'import pandas as pd\n'), ((487, 534), 'dataset.CassavaDataset', 'dataset.CassavaDataset', (['train_df'], {'device': 'DEVICE'}), '(train_df, device=DEVICE)\n', (509, 534), False, 'import dataset\n'), ((552, 599), 'dataset.CassavaDataset', 'dataset.CassavaDataset', (['valid_df'], {'device': 'DEVICE'}), '(valid_df, device=DEVICE)\n', (574, 599), False, 'import dataset\n'), ((621, 698), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'BATCH_SIZE', 'num_workers': '(4)', 'shuffle': '(True)'}), '(train_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True)\n', (631, 698), False, 'from torch.utils.data import DataLoader\n'), ((718, 795), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_dataset'], {'batch_size': 'BATCH_SIZE', 'num_workers': '(4)', 'shuffle': '(True)'}), '(valid_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True)\n', (728, 795), False, 'from torch.utils.data import DataLoader\n'), ((807, 827), 'torch.device', 'torch.device', (['DEVICE'], {}), '(DEVICE)\n', (819, 827), False, 'import torch\n'), ((845, 864), 'loss.DenseCrossEntropy', 'DenseCrossEntropy', ([], {}), '()\n', (862, 864), False, 'from loss import DenseCrossEntropy\n'), ((2392, 2406), 'model.CassavaModel', 'CassavaModel', ([], {}), '()\n', (2404, 2406), False, 'from model import CassavaModel\n'), ((2491, 2508), 'torch.optim.Adam', 'optim.Adam', (['plist'], {}), '(plist)\n', (2501, 2508), True, 'import torch.optim as optim\n'), ((2516, 2552), 'pandas.read_csv', 'pd.read_csv', (['"""./input/train_ohe.csv"""'], {}), "('./input/train_ohe.csv')\n", (2527, 2552), True, 'import pandas as pd\n'), ((2566, 2587), 'numpy.zeros', 'np.zeros', (['df.shape[0]'], {}), '(df.shape[0])\n', (2574, 2587), True, 'import numpy as np\n'), ((2038, 2068), 'torch.argmax', 'torch.argmax', (['val_preds'], {'dim': '(1)'}), '(val_preds, dim=1)\n', (2050, 2068), False, 'import torch\n'), ((1735, 1750), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1748, 1750), False, 'import torch\n'), ((1987, 2023), 'torch.cat', 'torch.cat', (['(val_preds, preds)'], {'dim': '(0)'}), '((val_preds, preds), dim=0)\n', (1996, 2023), False, 'import torch\n'), ((1868, 1897), 'torch.softmax', 'torch.softmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (1881, 1897), False, 'import torch\n')]
|
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FqeOperator."""
from fqe.fqe_ops import fqe_operator
from fqe import wavefunction
def test_operator():
"""Testing abstract FqeOperator class using a dummy class"""
# pylint: disable=useless-super-delegation
class TestFQEOperator(fqe_operator.FqeOperator):
"""
This class is just to make sure the abstract FqeOperator class is tested.
"""
def contract(
self,
brastate: "wavefunction.Wavefunction",
ketstate: "wavefunction.Wavefunction",
) -> complex:
return super().contract(brastate, ketstate)
def representation(self) -> str:
return super().representation()
def rank(self) -> int:
return super().rank()
test = TestFQEOperator()
wfn = wavefunction.Wavefunction([[1, 1, 1]])
assert round(abs(0.0 + 0.0j - test.contract(wfn, wfn)), 7) == 0
assert "fqe-operator" == test.representation()
assert 0 == test.rank()
|
[
"fqe.wavefunction.Wavefunction"
] |
[((1409, 1447), 'fqe.wavefunction.Wavefunction', 'wavefunction.Wavefunction', (['[[1, 1, 1]]'], {}), '([[1, 1, 1]])\n', (1434, 1447), False, 'from fqe import wavefunction\n')]
|
# Generated by Django 3.0.8 on 2020-07-31 12:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Africa', '0005_auto_20200731_1203'),
]
operations = [
migrations.AddField(
model_name='dish',
name='images01',
field=models.ImageField(default=1, upload_to='main_product/'),
preserve_default=False,
),
]
|
[
"django.db.models.ImageField"
] |
[((333, 388), 'django.db.models.ImageField', 'models.ImageField', ([], {'default': '(1)', 'upload_to': '"""main_product/"""'}), "(default=1, upload_to='main_product/')\n", (350, 388), False, 'from django.db import migrations, models\n')]
|
"""handles request for user gchart widget."""
from apps.managers.challenge_mgr import challenge_mgr
from apps.widgets.status.models import DailyStatus
def supply(request, page_name):
"""supply view_objects for user status."""
_ = page_name
_ = request
#todays_users = Profile.objects.filter(last_visit_date=datetime.datetime.today())
rounds_info = challenge_mgr.get_all_round_info()
start = rounds_info["competition_start"]
daily_status = DailyStatus.objects.filter(short_date__gte=start).order_by('short_date')
prior_day_users = 0
for status in daily_status:
status.display_date = "%d/%d" % (status.short_date.month, status.short_date.day)
status.new_users = status.setup_users - prior_day_users
prior_day_users = status.setup_users
return {
"daily_status": daily_status,
}
|
[
"apps.widgets.status.models.DailyStatus.objects.filter",
"apps.managers.challenge_mgr.challenge_mgr.get_all_round_info"
] |
[((372, 406), 'apps.managers.challenge_mgr.challenge_mgr.get_all_round_info', 'challenge_mgr.get_all_round_info', ([], {}), '()\n', (404, 406), False, 'from apps.managers.challenge_mgr import challenge_mgr\n'), ((471, 520), 'apps.widgets.status.models.DailyStatus.objects.filter', 'DailyStatus.objects.filter', ([], {'short_date__gte': 'start'}), '(short_date__gte=start)\n', (497, 520), False, 'from apps.widgets.status.models import DailyStatus\n')]
|
import cv2
from src.utils.heatmap import getHeatmaps
from src.visualization.visualize import visualizeAllHeatmap, visualizeBackgroundHeatmap
keypoints = [ [[100, 100, 2], [105,105, 2]] ]
image = cv2.imread('images/person.jpg')
hmaps = getHeatmaps(image, keypoints, 7)
visualizeAllHeatmap(image, hmaps)
visualizeBackgroundHeatmap(image, hmaps)
cv2.waitKey(0)
|
[
"src.visualization.visualize.visualizeAllHeatmap",
"src.utils.heatmap.getHeatmaps",
"cv2.waitKey",
"cv2.imread",
"src.visualization.visualize.visualizeBackgroundHeatmap"
] |
[((197, 228), 'cv2.imread', 'cv2.imread', (['"""images/person.jpg"""'], {}), "('images/person.jpg')\n", (207, 228), False, 'import cv2\n'), ((238, 270), 'src.utils.heatmap.getHeatmaps', 'getHeatmaps', (['image', 'keypoints', '(7)'], {}), '(image, keypoints, 7)\n', (249, 270), False, 'from src.utils.heatmap import getHeatmaps\n'), ((272, 305), 'src.visualization.visualize.visualizeAllHeatmap', 'visualizeAllHeatmap', (['image', 'hmaps'], {}), '(image, hmaps)\n', (291, 305), False, 'from src.visualization.visualize import visualizeAllHeatmap, visualizeBackgroundHeatmap\n'), ((306, 346), 'src.visualization.visualize.visualizeBackgroundHeatmap', 'visualizeBackgroundHeatmap', (['image', 'hmaps'], {}), '(image, hmaps)\n', (332, 346), False, 'from src.visualization.visualize import visualizeAllHeatmap, visualizeBackgroundHeatmap\n'), ((347, 361), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (358, 361), False, 'import cv2\n')]
|
'''
i actually didn't write this. credit to https://github.com/lipsumar/meme-caption
'''
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import sys
img = Image.open(sys.argv[1])
draw = ImageDraw.Draw(img)
def drawText(msg, pos):
fontSize = img.width//10
lines = []
font = ImageFont.truetype("impact.ttf", fontSize)
w, h = draw.textsize(msg, font)
imgWidthWithPadding = img.width * 0.99
# 1. how many lines for the msg to fit ?
lineCount = 1
if(w > imgWidthWithPadding):
lineCount = int(round((w / imgWidthWithPadding) + 1))
if lineCount > 2:
while 1:
fontSize -= 2
font = ImageFont.truetype("impact.ttf", fontSize)
w, h = draw.textsize(msg, font)
lineCount = int(round((w / imgWidthWithPadding) + 1))
print("try again with fontSize={} => {}".format(fontSize, lineCount))
if lineCount < 3 or fontSize < 10:
break
print("img.width: {}, text width: {}".format(img.width, w))
print("Text length: {}".format(len(msg)))
print("Lines: {}".format(lineCount))
# 2. divide text in X lines
lastCut = 0
isLast = False
for i in range(0,lineCount):
if lastCut == 0:
cut = (len(msg) / lineCount) * i
else:
cut = lastCut
if i < lineCount-1:
nextCut = (len(msg) / lineCount) * (i+1)
else:
nextCut = len(msg)
isLast = True
print("cut: {} -> {}".format(cut, nextCut))
# make sure we don't cut words in half
if nextCut == len(msg) or msg[int(nextCut)] == " ":
print("may cut")
else:
print("may not cut")
while msg[int(nextCut)] != " ":
nextCut += 1
print("new cut: {}".format(nextCut))
line = msg[int(cut):int(nextCut)].strip()
# is line still fitting ?
w, h = draw.textsize(line, font)
if not isLast and w > imgWidthWithPadding:
print("overshot")
nextCut -= 1
while msg[nextCut] != " ":
nextCut -= 1
print("new cut: {}".format(nextCut))
lastCut = nextCut
lines.append(msg[int(cut):int(nextCut)].strip())
print(lines)
# 3. print each line centered
lastY = -h
if pos == "bottom":
lastY = img.height - h * (lineCount+1) - 10
for i in range(0,lineCount):
w, h = draw.textsize(lines[i], font)
textX = img.width/2 - w/2
#if pos == "top":
# textY = h * i
#else:
# textY = img.height - h * i
textY = lastY + h
offset = fontSize//28
draw.text((textX-offset, textY-offset),lines[i],(0,0,0),font=font)
draw.text((textX+offset, textY-offset),lines[i],(0,0,0),font=font)
draw.text((textX+offset, textY+offset),lines[i],(0,0,0),font=font)
draw.text((textX-offset, textY+offset),lines[i],(0,0,0),font=font)
draw.text((textX, textY),lines[i],(255,255,255),font=font)
lastY = textY
return
drawText(sys.argv[2].upper(), "top")
drawText(sys.argv[3].upper(), "bottom")
img.save(sys.argv[4])
|
[
"PIL.ImageDraw.Draw",
"PIL.ImageFont.truetype",
"PIL.Image.open"
] |
[((183, 206), 'PIL.Image.open', 'Image.open', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (193, 206), False, 'from PIL import Image\n'), ((214, 233), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (228, 233), False, 'from PIL import ImageDraw\n'), ((318, 360), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""impact.ttf"""', 'fontSize'], {}), "('impact.ttf', fontSize)\n", (336, 360), False, 'from PIL import ImageFont\n'), ((685, 727), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""impact.ttf"""', 'fontSize'], {}), "('impact.ttf', fontSize)\n", (703, 727), False, 'from PIL import ImageFont\n')]
|
import numpy as np
from ipec.ip.core import parse_subnet_str
from ipec.ip.core import IPStructure
from ipec.ip.core import Interface
from ipec.ip.encoder import Encoder
from ipec.ip.decoder import Decoder
from ipec.ip.core import max_decimal_value_of_binary
# convolutional layer fields
CONV_FIELDS = {
'filter_size': 5,
'num_of_feature_maps': 7,
'stride_size': 4,
'mean': 9,
'std_dev': 9
}
# convolutional layer subnet
CONV_SUBNET = '0.0.0.0.0/6'
# pooling layer fields
POOLING_FIELDS = {
'kernel_size': 5,
'stride_size': 4,
'type': 1
}
# pooling layer subnet
POOLING_SUBNET = '4.32.0.0.0/30'
# fully-connected layer fields
FULLYCONNECTED_FIELDS = {
'num_of_neurons': 11,
'mean': 9,
'std_dev': 9
}
# fully-connected layer subnet
FULLYCONNECTED_SUBNET = '4.0.0.0.0/11'
# disabled layer fields
DISABLED_FIELDS = {
'disabled': 10,
}
# disabled layer subnet
DISABLED_SUBNET = '4.32.0.4.0/30'
def initialise_cnn_layers_3_bytes():
"""
initialise cnn layers with 3 bytes IP
:return:
"""
# convolutional layer fields
conv_fields = {
'filter_size': 3, #8
'num_of_feature_maps': 7, #128
'stride_size': 2, #4
'mean': 4, #(0~15-7)/8
'std_dev': 4 # 0~16/16
#total bits: 20
}
# convolutional layer subnet
conv_subnet = '0.0.0/4'
# pooling layer fields
pooling_fields = {
'kernel_size': 2,
'stride_size': 2,
'type': 1,
'placeholder': 14
# total bits: 19
}
# pooling layer subnet
pooling_subnet = '16.0.0/5'
# fully-connected layer fields
fullyconnected_fields = {
'num_of_neurons': 11,
'mean': 4,
'std_dev': 4
# total bits: 19
}
# fully-connected layer subnet
fullyconnected_subnet = '24.0.0/5'
# disabled layer fields
disabled_fields = {
'disabled': 19,
}
# disabled layer subnet
disabled_subnet = '32.0.0/5'
return {
'conv': ConvLayer(conv_subnet,conv_fields),
'pooling': PoolingLayer(pooling_subnet, pooling_fields),
'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields),
'disabled': DisabledLayer(disabled_subnet, disabled_fields)
}
def initialise_cnn_layers_with_xavier_weights():
"""
initialise cnn layers with xavier weight initialisation
:return:
"""
# convolutional layer fields
conv_fields = {
'filter_size': 3, #8
'num_of_feature_maps': 7, #128
'stride_size': 2, #4
#total bits: 12
}
# convolutional layer subnet
conv_subnet = '0.0/4'
# pooling layer fields
pooling_fields = {
'kernel_size': 2,
'stride_size': 2,
'type': 1,
'placeholder': 6
# total bits: 11
}
# pooling layer subnet
pooling_subnet = '16.0/5'
# fully-connected layer fields
fullyconnected_fields = {
'num_of_neurons': 11,
# total bits: 11
}
# fully-connected layer subnet
fullyconnected_subnet = '24.0/5'
# disabled layer fields
disabled_fields = {
'disabled': 11,
}
# disabled layer subnet
disabled_subnet = '32.0/5'
return {
'conv': ConvLayer(conv_subnet,conv_fields),
'pooling': PoolingLayer(pooling_subnet, pooling_fields),
'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields),
'disabled': DisabledLayer(disabled_subnet, disabled_fields)
}
class BaseCNNLayer:
"""
BaseCNNLayer class
"""
def __init__(self, str_subnet, fields):
"""
constructor
:param str_subnet: subnet string, e.g. 127.0.0.1/24
:type str_subnet: string
:param fields: a dict of (field_name, num_of_bits) pair
:type fields: dict
"""
self.str_subnet = str_subnet
self.fields = fields
self.subnet = parse_subnet_str(str_subnet)
self.ip_structure = IPStructure(fields)
self.encoder = Encoder(self.ip_structure, self.subnet)
self.decoder = Decoder()
def encode_2_interface(self, field_values):
"""
encode filed values to an IP interface
:param field_values: field values
:type field_values: a dict of (field_name, field_value) pairs
:return: the layer interface
:rtype: Interface
"""
interface = self.encoder.encode_2_interface(field_values)
return interface
def decode_2_field_values(self, interface):
"""
decode an IP interface to field values
:param interface: an IP interface
:type interface: Interface
:return: a dict of (field_name, field_value) pairs
:rtype: dict
"""
field_values = self.decoder.decode_2_field_values(interface)
return field_values
def generate_random_interface(self):
"""
generate an IP interface with random settings
:rtype: Interface
:return: an IP interface
"""
field_values = {}
for field_name in self.fields:
num_of_bits = self.fields[field_name]
max_value = max_decimal_value_of_binary(num_of_bits)
rand_value = np.random.randint(0, max_value+1)
field_values[field_name] = rand_value
return self.encode_2_interface(field_values)
def check_interface_in_type(self, interface):
"""
check whether the interface belongs to this type
:param interface: an IP interface
:type interface: Interface
:return: boolean
:rtype: bool
"""
return self.subnet.check_ip_in_subnet(interface.ip)
class ConvLayer(BaseCNNLayer):
"""
ConvLayer class
"""
def __init__(self, str_subnet=None, fields=None):
"""
constructor
:param str_subnet: subnet string, e.g. 127.0.0.1/24
:type str_subnet: string
:param fields: a dict of (field_name, num_of_bits) pair
:type fields: dict
"""
if str_subnet is None:
str_subnet = CONV_SUBNET
if fields is None:
fields = CONV_FIELDS
super(ConvLayer, self).__init__(str_subnet, fields)
class PoolingLayer(BaseCNNLayer):
"""
PoolingLayer class
"""
def __init__(self, str_subnet=None, fields=None):
"""
constructor
:param str_subnet: subnet string, e.g. 127.0.0.1/24
:type str_subnet: string
:param fields: a dict of (field_name, num_of_bits) pair
:type fields: dict
"""
if str_subnet is None:
str_subnet = POOLING_SUBNET
if fields is None:
fields = POOLING_FIELDS
super(PoolingLayer, self).__init__(str_subnet, fields)
class FullyConnectedLayer(BaseCNNLayer):
"""
FullyConnectedLayer class
"""
def __init__(self, str_subnet=None, fields=None):
"""
constructor
:param str_subnet: subnet string, e.g. 127.0.0.1/24
:type str_subnet: string
:param fields: a dict of (field_name, num_of_bits) pair
:type fields: dict
"""
if str_subnet is None:
str_subnet = FULLYCONNECTED_SUBNET
if fields is None:
fields = FULLYCONNECTED_FIELDS
super(FullyConnectedLayer, self).__init__(str_subnet, fields)
class DisabledLayer(BaseCNNLayer):
"""
DisabledLayer class
"""
def __init__(self, str_subnet=None, fields=None):
"""
constructor
:param str_subnet: subnet string, e.g. 127.0.0.1/24
:type str_subnet: string
:param fields: a dict of (field_name, num_of_bits) pair
:type fields: dict
"""
if str_subnet is None:
str_subnet = DISABLED_SUBNET
if fields is None:
fields = DISABLED_FIELDS
super(DisabledLayer, self).__init__(str_subnet, fields)
|
[
"ipec.ip.core.IPStructure",
"ipec.ip.core.max_decimal_value_of_binary",
"ipec.ip.core.parse_subnet_str",
"numpy.random.randint",
"ipec.ip.encoder.Encoder",
"ipec.ip.decoder.Decoder"
] |
[((3939, 3967), 'ipec.ip.core.parse_subnet_str', 'parse_subnet_str', (['str_subnet'], {}), '(str_subnet)\n', (3955, 3967), False, 'from ipec.ip.core import parse_subnet_str\n'), ((3996, 4015), 'ipec.ip.core.IPStructure', 'IPStructure', (['fields'], {}), '(fields)\n', (4007, 4015), False, 'from ipec.ip.core import IPStructure\n'), ((4039, 4078), 'ipec.ip.encoder.Encoder', 'Encoder', (['self.ip_structure', 'self.subnet'], {}), '(self.ip_structure, self.subnet)\n', (4046, 4078), False, 'from ipec.ip.encoder import Encoder\n'), ((4102, 4111), 'ipec.ip.decoder.Decoder', 'Decoder', ([], {}), '()\n', (4109, 4111), False, 'from ipec.ip.decoder import Decoder\n'), ((5193, 5233), 'ipec.ip.core.max_decimal_value_of_binary', 'max_decimal_value_of_binary', (['num_of_bits'], {}), '(num_of_bits)\n', (5220, 5233), False, 'from ipec.ip.core import max_decimal_value_of_binary\n'), ((5259, 5294), 'numpy.random.randint', 'np.random.randint', (['(0)', '(max_value + 1)'], {}), '(0, max_value + 1)\n', (5276, 5294), True, 'import numpy as np\n')]
|
"""
This file holds common functions across all database processing such as
calculating statistics.
"""
import numpy as np
from src import em_constants as emc
def is_outlier(wav, lower, upper):
"""
Checks if an audio sample is an outlier. Bounds are inclusive.
:param wav: The audio time series data points
:param lower: The lower bound
:param upper: The upper bound
:return: Boolean
"""
return False if lower <= len(wav) <= upper else True
def get_label(filename, delimiter, index, db_emo_map):
"""
Gets the k-hot encoded label from a sample's filename.
:param filename: The sample's filename
:param delimiter: The delimiter used in the filename
:param index: Where in the filename the label/emotion is located
:param db_emo_map: The database-specific emotion mapping
:return: The label k-hot encoded to this program's standard emotion map or
False if the label doesn't map to the standard emotions
"""
label = filename.split(delimiter)[index]
standard_emotion = db_emo_map[label]
emotion_id = [emc.EMOTION_MAP[standard_emotion]]
return k_hot_encode_label(emotion_id)
def repr_label(label):
"""
Represents a label in a filename-friendly format. Mostly used in the
"read_to_melspecgram()" function to write out labels in the filename.
Sample input:
[1. 0. 0. 0. 0. 0. 0.]
Sample output:
"1_0_0_0_0_0_0"
:param label: Numpy array representing the k-hot encoded label
:return: String representation of the label
"""
return "_".join(str(emo) for emo in label)
def k_hot_encode_label(label):
"""
K-hot encodes a label. Takes a list of emotion IDs and returns a list
encoding the most voted for emotion.
Sample input:
[0, 1, 2, 0, 6, 2]
Sample output:
[1, 0, 1, 0, 0, 0, 0]
:param label: List of labels to encode
:return: List of k-hot encoded labels or False if the label is unused
"""
# If there's only one label/vote, then use the quicker method of encoding
if len(label) == 1:
return _one_hot_encode_label(label)
# Convert the emotion numbers into an array where the index is the emotion
# and the value is the number of votes for that emotion
unique, counts = np.unique(label, return_counts=True)
k_hot_label = np.zeros(emc.NUM_EMOTIONS)
for emo_index, emo_count in zip(unique, counts):
k_hot_label[emo_index] = emo_count
# Only count the emotions with the highest amount of votes
k_hot_label = k_hot_label / np.max(k_hot_label)
k_hot_label = np.floor(k_hot_label).astype(int)
# If they're all zero, then this sample doesn't fit with the set of labels
# that we're considering so drop it
if not np.any(k_hot_label):
print("No usable label.")
return False
return k_hot_label
def _one_hot_encode_label(label):
"""
One hot encodes a label. Private function to quickly one-hot encode a label.
Sample input:
[4]
Sample output:
[0, 0, 0, 0, 1, 0, 0]
:param label: A list with one label (length is one)
:return: One-hot encoding of the label
"""
one_hot_label = np.zeros(emc.NUM_EMOTIONS, dtype=int)
one_hot_label[label[0]] = 1
return one_hot_label
def inverse_k_hot_encode_label(k_hot_label):
"""
Inverses a k-hot encoded label back into emotion ids.
Sample input:
[1, 0, 0, 0, 1, 0, 0]
Sample output:
[0, 4]
:param k_hot_label: A list of the k-hot encoded label
:return: A list of the emotion ids in the label
"""
return np.where(k_hot_label == 1)[0]
|
[
"numpy.floor",
"numpy.zeros",
"numpy.any",
"numpy.max",
"numpy.where",
"numpy.unique"
] |
[((2301, 2337), 'numpy.unique', 'np.unique', (['label'], {'return_counts': '(True)'}), '(label, return_counts=True)\n', (2310, 2337), True, 'import numpy as np\n'), ((2356, 2382), 'numpy.zeros', 'np.zeros', (['emc.NUM_EMOTIONS'], {}), '(emc.NUM_EMOTIONS)\n', (2364, 2382), True, 'import numpy as np\n'), ((3212, 3249), 'numpy.zeros', 'np.zeros', (['emc.NUM_EMOTIONS'], {'dtype': 'int'}), '(emc.NUM_EMOTIONS, dtype=int)\n', (3220, 3249), True, 'import numpy as np\n'), ((2575, 2594), 'numpy.max', 'np.max', (['k_hot_label'], {}), '(k_hot_label)\n', (2581, 2594), True, 'import numpy as np\n'), ((2778, 2797), 'numpy.any', 'np.any', (['k_hot_label'], {}), '(k_hot_label)\n', (2784, 2797), True, 'import numpy as np\n'), ((3634, 3660), 'numpy.where', 'np.where', (['(k_hot_label == 1)'], {}), '(k_hot_label == 1)\n', (3642, 3660), True, 'import numpy as np\n'), ((2613, 2634), 'numpy.floor', 'np.floor', (['k_hot_label'], {}), '(k_hot_label)\n', (2621, 2634), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.