code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
# -*- coding: utf-8 -*-
import logging
import requests as rqc
from flask.ext.restful import Resource
from flask import Response, request
from flask import stream_with_context
INPUT_DATA_SERVER_LOCATION = 'http://dataserver:3000/'
class UploadResource(Resource):
def get(self):
req = rqc.get(INPUT_DATA_SERVER_LOCATION, stream=True)
return Response(
stream_with_context(req.iter_content()),
content_type=req.headers['content-type']
)
def post(self):
"""
Stream input to data file server.
:return:
"""
logging.debug('UPLOAD POST')
req = rqc.post(INPUT_DATA_SERVER_LOCATION + 'data',
json=request.stream.read(),
stream=True)
return Response(
stream_with_context(req.iter_content()),
content_type=req.headers['content-type']
)
|
[
"logging.debug",
"requests.get",
"flask.request.stream.read"
] |
[((299, 347), 'requests.get', 'rqc.get', (['INPUT_DATA_SERVER_LOCATION'], {'stream': '(True)'}), '(INPUT_DATA_SERVER_LOCATION, stream=True)\n', (306, 347), True, 'import requests as rqc\n'), ((601, 629), 'logging.debug', 'logging.debug', (['"""UPLOAD POST"""'], {}), "('UPLOAD POST')\n", (614, 629), False, 'import logging\n'), ((719, 740), 'flask.request.stream.read', 'request.stream.read', ([], {}), '()\n', (738, 740), False, 'from flask import Response, request\n')]
|
import torch, add_path
import numpy as np
from baselines.all_baselines import Poly2SLS, Vanilla2SLS, DirectNN, \
GMM, DeepIV, AGMM
import os
import tensorflow
from MMR_IVs.util import ROOT_PATH, load_data
import random
random.seed(527)
def eval_model(model, test):
g_pred_test = model.predict(test.x)
mse = float(((g_pred_test - test.g) ** 2).mean())
return mse
def save_model(model, save_path, test):
g_pred = model.predict(test.x)
np.savez(save_path, x=test.w, y=test.y, g_true=test.g, g_hat=g_pred)
def run_experiment(scenario_name,mid,repid, num_reps=10, seed=527,training=False):
# set random seed
torch.manual_seed(seed)
np.random.seed(seed)
tensorflow.set_random_seed(seed)
train, dev, test = load_data(ROOT_PATH + "/data/mendelian/" + scenario_name+'.npz')
# result folder
folder = ROOT_PATH + "/results/mendelian/"+scenario_name+"/"
os.makedirs(folder, exist_ok=True)
means = []
times = []
for rep in range(num_reps):
# Not all methods are applicable in all scenarios
methods = []
# baseline methods
methods += [("DirectNN", DirectNN())]
methods += [("Vanilla2SLS", Vanilla2SLS())]
methods += [("Poly2SLS", Poly2SLS())]
methods += [("GMM", GMM(g_model="2-layer", n_steps=20))]
methods += [("AGMM", AGMM())]
methods += [("DeepIV", DeepIV())]
if training:
if rep < repid:
continue
elif rep >repid:
break
else:
pass
for method_name, method in methods[mid:mid+1]:
print("Running " + method_name +" " + str(rep))
file_name = "%s_%d.npz" % (method_name, rep)
save_path = os.path.join(folder, file_name)
model, time = method.fit(train.x, train.y, train.z, None)
np.save(folder+"%s_%d_time.npy" % (method_name, rep),time)
save_model(model, save_path, test)
test_mse = eval_model(model, test)
model_type_name = type(model).__name__
print("Test MSE of %s: %f" % (model_type_name, test_mse))
else:
means2 = []
times2 = []
for method_name, method in methods:
# print("Running " + method_name +" " + str(rep))
file_name = "%s_%d.npz" % (method_name, rep)
save_path = os.path.join(folder, file_name)
if os.path.exists(save_path):
res = np.load(save_path)
mse = float(((res['g_hat'] - res['g_true']) ** 2).mean())
# print('mse: {}'.format(mse))
means2 += [mse]
else:
print(save_path, ' not exists')
time_path = folder+"%s_%d_time.npy" % (method_name, rep)
if os.path.exists(time_path):
res = np.load(time_path)
times2 += [res]
else:
print(time_path, ' not exists')
if len(means2) == len(methods):
means += [means2]
if len(times2) == len(methods):
times += [times2]
#print('means',np.mean(np.array(means),axis=0))
#print('std',np.std(np.array(means),axis=0))
return means,times
if __name__ == "__main__":
scenarios = ["mendelian_{}_{}_{}".format(s, i, j) for s in [8,16,32] for i,j in [[1,1]]]
scenarios += ["mendelian_{}_{}_{}".format(16, i, j) for i, j in [[1, 0.5],[1, 2]]]
scenarios += ["mendelian_{}_{}_{}".format(16, i, j)for i, j in [[0.5, 1],[2, 1]]]
for sce in scenarios:
for mid in range(6):
for repid in range(10):
run_experiment(sce, mid, repid, training=True)
rows = []
for i in range(len(scenarios)):
s = scenarios[i]
means,times = run_experiment(s,0,0,training=False)
mean = np.mean(means,axis=0)
std = np.std(means,axis=0)
rows += [["({},{:.4f}) +- ({:.3f},{:.3f})".format(s,mean[j],std[j],std[j]) for j in range(len(mean))]]
print('time: ',np.mean(times,axis=0),np.std(times,axis=0))
# methods = np.array(["DirectNN","Vanilla2SLS","Poly2SLS","GMM","AGMM","DeepIV"])[:,None]
rows = np.array(rows)
#rows = np.vstack((methods,rows))
print('addplot+[mark=*,error bars/.cd, y dir=both,y explicit] coordinates'.join(['{'+'\n'.join(e)+'};\n' for e in rows.T]))
print('Tabulate Table:')
# print(tabulate(np.vstack((np.append([""],scenarios),rows)), headers='firstrow',tablefmt='latex'))
|
[
"numpy.load",
"numpy.random.seed",
"numpy.mean",
"MMR_IVs.util.load_data",
"os.path.join",
"numpy.std",
"os.path.exists",
"baselines.all_baselines.DeepIV",
"tensorflow.set_random_seed",
"random.seed",
"baselines.all_baselines.DirectNN",
"numpy.save",
"torch.manual_seed",
"baselines.all_baselines.Poly2SLS",
"baselines.all_baselines.GMM",
"baselines.all_baselines.Vanilla2SLS",
"numpy.savez",
"baselines.all_baselines.AGMM",
"os.makedirs",
"numpy.array"
] |
[((223, 239), 'random.seed', 'random.seed', (['(527)'], {}), '(527)\n', (234, 239), False, 'import random\n'), ((460, 528), 'numpy.savez', 'np.savez', (['save_path'], {'x': 'test.w', 'y': 'test.y', 'g_true': 'test.g', 'g_hat': 'g_pred'}), '(save_path, x=test.w, y=test.y, g_true=test.g, g_hat=g_pred)\n', (468, 528), True, 'import numpy as np\n'), ((640, 663), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (657, 663), False, 'import torch, add_path\n'), ((668, 688), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (682, 688), True, 'import numpy as np\n'), ((693, 725), 'tensorflow.set_random_seed', 'tensorflow.set_random_seed', (['seed'], {}), '(seed)\n', (719, 725), False, 'import tensorflow\n'), ((750, 816), 'MMR_IVs.util.load_data', 'load_data', (["(ROOT_PATH + '/data/mendelian/' + scenario_name + '.npz')"], {}), "(ROOT_PATH + '/data/mendelian/' + scenario_name + '.npz')\n", (759, 816), False, 'from MMR_IVs.util import ROOT_PATH, load_data\n'), ((905, 939), 'os.makedirs', 'os.makedirs', (['folder'], {'exist_ok': '(True)'}), '(folder, exist_ok=True)\n', (916, 939), False, 'import os\n'), ((4325, 4339), 'numpy.array', 'np.array', (['rows'], {}), '(rows)\n', (4333, 4339), True, 'import numpy as np\n'), ((3984, 4006), 'numpy.mean', 'np.mean', (['means'], {'axis': '(0)'}), '(means, axis=0)\n', (3991, 4006), True, 'import numpy as np\n'), ((4020, 4041), 'numpy.std', 'np.std', (['means'], {'axis': '(0)'}), '(means, axis=0)\n', (4026, 4041), True, 'import numpy as np\n'), ((4175, 4197), 'numpy.mean', 'np.mean', (['times'], {'axis': '(0)'}), '(times, axis=0)\n', (4182, 4197), True, 'import numpy as np\n'), ((4197, 4218), 'numpy.std', 'np.std', (['times'], {'axis': '(0)'}), '(times, axis=0)\n', (4203, 4218), True, 'import numpy as np\n'), ((1142, 1152), 'baselines.all_baselines.DirectNN', 'DirectNN', ([], {}), '()\n', (1150, 1152), False, 'from baselines.all_baselines import Poly2SLS, Vanilla2SLS, DirectNN, GMM, DeepIV, AGMM\n'), ((1191, 1204), 'baselines.all_baselines.Vanilla2SLS', 'Vanilla2SLS', ([], {}), '()\n', (1202, 1204), False, 'from baselines.all_baselines import Poly2SLS, Vanilla2SLS, DirectNN, GMM, DeepIV, AGMM\n'), ((1240, 1250), 'baselines.all_baselines.Poly2SLS', 'Poly2SLS', ([], {}), '()\n', (1248, 1250), False, 'from baselines.all_baselines import Poly2SLS, Vanilla2SLS, DirectNN, GMM, DeepIV, AGMM\n'), ((1281, 1315), 'baselines.all_baselines.GMM', 'GMM', ([], {'g_model': '"""2-layer"""', 'n_steps': '(20)'}), "(g_model='2-layer', n_steps=20)\n", (1284, 1315), False, 'from baselines.all_baselines import Poly2SLS, Vanilla2SLS, DirectNN, GMM, DeepIV, AGMM\n'), ((1347, 1353), 'baselines.all_baselines.AGMM', 'AGMM', ([], {}), '()\n', (1351, 1353), False, 'from baselines.all_baselines import Poly2SLS, Vanilla2SLS, DirectNN, GMM, DeepIV, AGMM\n'), ((1387, 1395), 'baselines.all_baselines.DeepIV', 'DeepIV', ([], {}), '()\n', (1393, 1395), False, 'from baselines.all_baselines import Poly2SLS, Vanilla2SLS, DirectNN, GMM, DeepIV, AGMM\n'), ((1775, 1806), 'os.path.join', 'os.path.join', (['folder', 'file_name'], {}), '(folder, file_name)\n', (1787, 1806), False, 'import os\n'), ((1914, 1975), 'numpy.save', 'np.save', (["(folder + '%s_%d_time.npy' % (method_name, rep))", 'time'], {}), "(folder + '%s_%d_time.npy' % (method_name, rep), time)\n", (1921, 1975), True, 'import numpy as np\n'), ((2469, 2500), 'os.path.join', 'os.path.join', (['folder', 'file_name'], {}), '(folder, file_name)\n', (2481, 2500), False, 'import os\n'), ((2520, 2545), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (2534, 2545), False, 'import os\n'), ((2922, 2947), 'os.path.exists', 'os.path.exists', (['time_path'], {}), '(time_path)\n', (2936, 2947), False, 'import os\n'), ((2573, 2591), 'numpy.load', 'np.load', (['save_path'], {}), '(save_path)\n', (2580, 2591), True, 'import numpy as np\n'), ((2975, 2993), 'numpy.load', 'np.load', (['time_path'], {}), '(time_path)\n', (2982, 2993), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import discord
from classes.Plugin import Plugin
NAME = "Status"
DESCRIPTION = "Change the bot status and his played game on discord"
USAGE = {}
class StatusPlugin(Plugin):
def __init__(self, cdb):
super().__init__(cdb)
self.status_dict = {"online": discord.Status.online,
"offline": discord.Status.offline,
"idle": discord.Status.idle,
"dnd": discord.Status.do_not_disturb,
"do_not_disturb": discord.Status.do_not_disturb,
"invisible": discord.Status.invisible}
self.status = None
self.game = None
cdb.reserve_keywords(["status", "game"], "Status")
cdb.add_plugin_description(DESCRIPTION, NAME)
cdb.add_plugin_usage(USAGE, NAME)
async def on_message(self, message, cmd):
if not cmd.triggered \
or cmd.action not in ["status", "game"]:
return
if not self.cdb.isop_user(message.author):
await message.channel.send("You don't have the right to do that.")
self.cdb.log_warn("Changing bot status requested by NON-OP %s, FAILED" % (str(cmd.author)), message)
else:
if cmd.action == "status":
if len(cmd.args) == 0:
await message.channel.send("Try with an argument for this command next time.")
await message.channel.send("Valid arguments: online, offline, idle, dnd, invisible.")
elif cmd.args[0].lower() in self.status_dict:
self.cdb.log_info("Change bot's status to %s requested by %s" % (cmd.args[0].lower(), str(cmd.author)), message)
self.status = self.status_dict[cmd.args[0].lower()]
else:
await message.channel.send("It's not a valid argument.")
await message.channel.send("Valid arguments: online, offline, idle, dnd, invisible.")
elif cmd.action == "game":
if len(cmd.args) == 0:
self.game = None
self.cdb.log_info("Erasing bot's game requested by %s" % (str(cmd.author)), message)
else:
self.game = discord.Game(name=message.content[6:])
self.cdb.log_info("Change bot's game requested by %s" % (str(cmd.author)), message)
await self.cdb.change_presence(game=self.game, status=self.status)
|
[
"discord.Game"
] |
[((2320, 2358), 'discord.Game', 'discord.Game', ([], {'name': 'message.content[6:]'}), '(name=message.content[6:])\n', (2332, 2358), False, 'import discord\n')]
|
import sys
import os
import shutil
if len(sys.argv) < 2:
print("Failed to obtain GView.hpp location")
exit(1)
header_location = sys.argv[1]
if not os.path.exists(header_location):
print("Path {} does not exists!".format(header_location))
exit(1)
default_version_to_update = 1 # major=0, minor=1, patch=2
if len(sys.argv) > 2:
version_to_update = sys.argv[2]
defined_versions = {
"major": 0,
"minor": 1,
"patch": 2
}
default_version_to_update = defined_versions[version_to_update]
reset_lower_versions = True
header_output_location = header_location+'.new'
found_version = False
with open(header_location, 'r') as f:
with open(header_output_location, 'w') as g:
for line in f:
if line.startswith('#define GVIEW_VERSION '):
version = line.split('#define GVIEW_VERSION ')[
1].strip(' \r\n\t\"')
version_array = version.split('.')
value = int(version_array[default_version_to_update])+1
version_array[default_version_to_update] = value
for i in range(default_version_to_update+1, 3):
version_array[i] = 0
version = "{}.{}.{}".format(
version_array[0], version_array[1], version_array[2])
line = '#define GVIEW_VERSION "{}"\n'.format(version)
found_version = True
os.putenv('GVIEW_VERSION', version)
g.write(line)
if not found_version:
print("Failed to find GVIEW_VERSION")
exit(1)
shutil.move(header_output_location, header_location)
exit(0)
|
[
"os.putenv",
"os.path.exists",
"shutil.move"
] |
[((1593, 1645), 'shutil.move', 'shutil.move', (['header_output_location', 'header_location'], {}), '(header_output_location, header_location)\n', (1604, 1645), False, 'import shutil\n'), ((157, 188), 'os.path.exists', 'os.path.exists', (['header_location'], {}), '(header_location)\n', (171, 188), False, 'import os\n'), ((1453, 1488), 'os.putenv', 'os.putenv', (['"""GVIEW_VERSION"""', 'version'], {}), "('GVIEW_VERSION', version)\n", (1462, 1488), False, 'import os\n')]
|
try:
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import pyspark.sql.functions as f
# from operator import add
except Exception as e:
print(e)
## http://www.hongyusu.com/imt/technology/spark-via-python-basic-setup-count-lines-and-word-counts.html
def push_mongo():
spark = SparkSession \
.builder \
.appName("Push to MongoDB") \
.master("spark://master:7077") \
.config("spark.mongodb.input.uri", "mongodb://root:password@mongo/test.coll?authSource=admin") \
.config("spark.mongodb.output.uri", "mongodb://root:password@mongo/test.coll?authSource=admin") \
.config('spark.jars.packages', 'org.mongodb.spark:mongo-spark-connector_2.11:2.4.0')\
.getOrCreate()
sc = spark.sparkContext
sc.setLogLevel('WARN')
# Reading Data from volume
acc_mongo=spark.read.csv("/volume/data")
#Show Mongo data
acc_mongo.show()
# Store data in MongoDB
acc_mongo.write.format("com.mongodb.spark.sql.DefaultSource").mode("append").save()
# End the Spark Context
spark.stop()
if __name__ == "__main__":
push_mongo()
|
[
"pyspark.sql.SparkSession.builder.appName"
] |
[((333, 380), 'pyspark.sql.SparkSession.builder.appName', 'SparkSession.builder.appName', (['"""Push to MongoDB"""'], {}), "('Push to MongoDB')\n", (361, 380), False, 'from pyspark.sql import SparkSession\n')]
|
from typing import List
from tudelft_utilities_logging.Reporter import Reporter
from geniusweb.deadline.Deadline import Deadline
from geniusweb.protocol.session.SessionProtocol import SessionProtocol
from geniusweb.protocol.session.SessionSettings import SessionSettings
from geniusweb.protocol.session.TeamInfo import TeamInfo
from geniusweb.protocol.tournament.Team import Team
from geniusweb.references.PartyWithProfile import PartyWithProfile
from geniusweb.voting.VotingEvaluator import VotingEvaluator
class MOPACSettings (SessionSettings):
'''
Settings for MOPAC negotiation. in MOPAC, each party may get a "power"
parameter containing an natural number ≤1.
'''
def __init__(self, participants:List[TeamInfo] ,
deadline:Deadline ,
votingevaluator:VotingEvaluator):
'''
@param participants the list of {@link PartyWithProfile} in clockwise
order. There must be at least 2 to run the MOPAC
protocol. This is not tested in the constructor
because this can be initialized with less, for use in
TournamentSettings.
@param deadline the {@link Deadline} for the negotiation
@param votingeval the {@link VotingEvaluator} to use.
'''
self._participants = participants;
self._deadline = deadline;
if participants == None or deadline == None or votingevaluator == None:
raise ValueError(
"participants, deadline and votingeval must be not none")
self._votingevaluator = votingevaluator
self._checkTeams();
def getMaxRunTime(self)->float:
return self._deadline.getDuration() / 1000.
def getProtocol(self, logger:Reporter) -> SessionProtocol :
from geniusweb.protocol.session.mopac.MOPACState import MOPACState
from geniusweb.protocol.session.mopac.MOPAC import MOPAC
return MOPAC(MOPACState(None, [], None, self, {}), logger)
def getTeams(self ) -> List[TeamInfo] :
return list(self._participants)
def getParticipants(self ) -> List[TeamInfo] :
'''
bit hacky, same as getTeams, for deserialization...
'''
return list(self._participants)
def getDeadline(self)-> Deadline :
'''
@return the deadline for this negotiation
'''
return self._deadline
def getAllParties(self)->List[PartyWithProfile] :
return [ particip.getParties()[0] for particip in self._participants]
def getVotingEvaluator(self)->VotingEvaluator :
'''
@return a class that allows us to evaluate the voting results in
different ways, selectable by the user.
'''
return self._votingevaluator
def With(self, team:TeamInfo ) -> "MOPACSettings" :
if team.getSize() != 1:
raise ValueError(
"Added party must have one party but got " + str(team))
newparts:List[TeamInfo] = list(self._participants)
newparts.append(team)
return MOPACSettings(newparts, self._deadline, self._votingevaluator)
def __repr__(self)->str:
return "MOPACSettings[" + str(self._participants) + "," +\
str(self._deadline) + "," + \
type(self._votingevaluator).__name__ + "]";
def getTeamSize(self)->int:
return 1;
def __hash__(self):
return hash((tuple(self._participants), self._deadline, self._votingevaluator))
def __eq__(self, other):
return isinstance(other, self.__class__)\
and self._participants == other._participants \
and self._deadline == other._deadline \
and self._votingevaluator == other._votingevaluator
def _checkTeams(self):
'''
@throws IllegalArgumentException if teams have improper power settings.
'''
for team in self._participants:
if team.getSize() != 1:
raise ValueError("All teams must be size 1 but found " + str(team))
party = team.getParties()[0]
if 'power' in party.getParty().getParameters().getParameters():
power = party.getParty().getParameters().get("power")
if not isinstance(power, int):
raise ValueError(
"parameter 'power' for party" + str(party)
+ " must be integer but found " + str(power))
if power < 1:
raise ValueError(
"parameter 'power' for party" + str(party)
+ " must be >=1 but found " + str(power))
|
[
"geniusweb.protocol.session.mopac.MOPACState.MOPACState"
] |
[((1800, 1836), 'geniusweb.protocol.session.mopac.MOPACState.MOPACState', 'MOPACState', (['None', '[]', 'None', 'self', '{}'], {}), '(None, [], None, self, {})\n', (1810, 1836), False, 'from geniusweb.protocol.session.mopac.MOPACState import MOPACState\n')]
|
import streamlit as st
import plotly_express as px
import pandas as pd
# configuration
st.set_option('deprecation.showfileUploaderEncoding', False)
# title of the app
st.title("Data Visualization App")
# Add a sidebar
st.sidebar.subheader("Visualization Settings")
# Setup file upload
uploaded_file = st.sidebar.file_uploader(
label="Upload your CSV or Excel file. (200MB max)",
type=['csv', 'xlsx'])
global df
if uploaded_file is not None:
print(uploaded_file)
print("hello")
try:
df = pd.read_csv(uploaded_file)
except Exception as e:
print(e)
df = pd.read_excel(uploaded_file)
global numeric_columns
global non_numeric_columns
try:
st.write(df)
numeric_columns = list(df.select_dtypes(['float', 'int']).columns)
non_numeric_columns = list(df.select_dtypes(['object']).columns)
non_numeric_columns.append(None)
print(non_numeric_columns)
except Exception as e:
print(e)
st.write("Please upload file to the application.")
# add a select widget to the side bar
chart_select = st.sidebar.selectbox(
label="Select the chart type",
options=['Scatterplots', 'Lineplots', 'Histogram', 'Boxplot']
)
if chart_select == 'Scatterplots':
st.sidebar.subheader("Scatterplot Settings")
try:
x_values = st.sidebar.selectbox('X axis', options=numeric_columns)
y_values = st.sidebar.selectbox('Y axis', options=numeric_columns)
color_value = st.sidebar.selectbox("Color", options=non_numeric_columns)
plot = px.scatter(data_frame=df, x=x_values, y=y_values, color=color_value)
# display the chart
st.plotly_chart(plot)
except Exception as e:
print(e)
if chart_select == 'Lineplots':
st.sidebar.subheader("Line Plot Settings")
try:
x_values = st.sidebar.selectbox('X axis', options=numeric_columns)
y_values = st.sidebar.selectbox('Y axis', options=numeric_columns)
color_value = st.sidebar.selectbox("Color", options=non_numeric_columns)
plot = px.line(data_frame=df, x=x_values, y=y_values, color=color_value)
st.plotly_chart(plot)
except Exception as e:
print(e)
if chart_select == 'Histogram':
st.sidebar.subheader("Histogram Settings")
try:
x = st.sidebar.selectbox('Feature', options=numeric_columns)
bin_size = st.sidebar.slider("Number of Bins", min_value=10,
max_value=100, value=40)
color_value = st.sidebar.selectbox("Color", options=non_numeric_columns)
plot = px.histogram(x=x, data_frame=df, color=color_value)
st.plotly_chart(plot)
except Exception as e:
print(e)
if chart_select == 'Boxplot':
st.sidebar.subheader("Boxplot Settings")
try:
y = st.sidebar.selectbox("Y axis", options=numeric_columns)
x = st.sidebar.selectbox("X axis", options=non_numeric_columns)
color_value = st.sidebar.selectbox("Color", options=non_numeric_columns)
plot = px.box(data_frame=df, y=y, x=x, color=color_value)
st.plotly_chart(plot)
except Exception as e:
print(e)
|
[
"streamlit.sidebar.slider",
"plotly_express.histogram",
"streamlit.sidebar.subheader",
"streamlit.set_option",
"streamlit.plotly_chart",
"pandas.read_csv",
"streamlit.title",
"streamlit.write",
"pandas.read_excel",
"streamlit.sidebar.selectbox",
"plotly_express.scatter",
"plotly_express.box",
"plotly_express.line",
"streamlit.sidebar.file_uploader"
] |
[((88, 148), 'streamlit.set_option', 'st.set_option', (['"""deprecation.showfileUploaderEncoding"""', '(False)'], {}), "('deprecation.showfileUploaderEncoding', False)\n", (101, 148), True, 'import streamlit as st\n'), ((169, 203), 'streamlit.title', 'st.title', (['"""Data Visualization App"""'], {}), "('Data Visualization App')\n", (177, 203), True, 'import streamlit as st\n'), ((221, 267), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Visualization Settings"""'], {}), "('Visualization Settings')\n", (241, 267), True, 'import streamlit as st\n'), ((305, 407), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', ([], {'label': '"""Upload your CSV or Excel file. (200MB max)"""', 'type': "['csv', 'xlsx']"}), "(label='Upload your CSV or Excel file. (200MB max)',\n type=['csv', 'xlsx'])\n", (329, 407), True, 'import streamlit as st\n'), ((1101, 1219), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', ([], {'label': '"""Select the chart type"""', 'options': "['Scatterplots', 'Lineplots', 'Histogram', 'Boxplot']"}), "(label='Select the chart type', options=['Scatterplots',\n 'Lineplots', 'Histogram', 'Boxplot'])\n", (1121, 1219), True, 'import streamlit as st\n'), ((735, 747), 'streamlit.write', 'st.write', (['df'], {}), '(df)\n', (743, 747), True, 'import streamlit as st\n'), ((1266, 1310), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Scatterplot Settings"""'], {}), "('Scatterplot Settings')\n", (1286, 1310), True, 'import streamlit as st\n'), ((1774, 1816), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Line Plot Settings"""'], {}), "('Line Plot Settings')\n", (1794, 1816), True, 'import streamlit as st\n'), ((2249, 2291), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Histogram Settings"""'], {}), "('Histogram Settings')\n", (2269, 2291), True, 'import streamlit as st\n'), ((2758, 2798), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Boxplot Settings"""'], {}), "('Boxplot Settings')\n", (2778, 2798), True, 'import streamlit as st\n'), ((562, 588), 'pandas.read_csv', 'pd.read_csv', (['uploaded_file'], {}), '(uploaded_file)\n', (573, 588), True, 'import pandas as pd\n'), ((996, 1046), 'streamlit.write', 'st.write', (['"""Please upload file to the application."""'], {}), "('Please upload file to the application.')\n", (1004, 1046), True, 'import streamlit as st\n'), ((1339, 1394), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""X axis"""'], {'options': 'numeric_columns'}), "('X axis', options=numeric_columns)\n", (1359, 1394), True, 'import streamlit as st\n'), ((1414, 1469), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Y axis"""'], {'options': 'numeric_columns'}), "('Y axis', options=numeric_columns)\n", (1434, 1469), True, 'import streamlit as st\n'), ((1492, 1550), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Color"""'], {'options': 'non_numeric_columns'}), "('Color', options=non_numeric_columns)\n", (1512, 1550), True, 'import streamlit as st\n'), ((1566, 1634), 'plotly_express.scatter', 'px.scatter', ([], {'data_frame': 'df', 'x': 'x_values', 'y': 'y_values', 'color': 'color_value'}), '(data_frame=df, x=x_values, y=y_values, color=color_value)\n', (1576, 1634), True, 'import plotly_express as px\n'), ((1671, 1692), 'streamlit.plotly_chart', 'st.plotly_chart', (['plot'], {}), '(plot)\n', (1686, 1692), True, 'import streamlit as st\n'), ((1845, 1900), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""X axis"""'], {'options': 'numeric_columns'}), "('X axis', options=numeric_columns)\n", (1865, 1900), True, 'import streamlit as st\n'), ((1920, 1975), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Y axis"""'], {'options': 'numeric_columns'}), "('Y axis', options=numeric_columns)\n", (1940, 1975), True, 'import streamlit as st\n'), ((1998, 2056), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Color"""'], {'options': 'non_numeric_columns'}), "('Color', options=non_numeric_columns)\n", (2018, 2056), True, 'import streamlit as st\n'), ((2072, 2137), 'plotly_express.line', 'px.line', ([], {'data_frame': 'df', 'x': 'x_values', 'y': 'y_values', 'color': 'color_value'}), '(data_frame=df, x=x_values, y=y_values, color=color_value)\n', (2079, 2137), True, 'import plotly_express as px\n'), ((2146, 2167), 'streamlit.plotly_chart', 'st.plotly_chart', (['plot'], {}), '(plot)\n', (2161, 2167), True, 'import streamlit as st\n'), ((2313, 2369), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Feature"""'], {'options': 'numeric_columns'}), "('Feature', options=numeric_columns)\n", (2333, 2369), True, 'import streamlit as st\n'), ((2389, 2463), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""Number of Bins"""'], {'min_value': '(10)', 'max_value': '(100)', 'value': '(40)'}), "('Number of Bins', min_value=10, max_value=100, value=40)\n", (2406, 2463), True, 'import streamlit as st\n'), ((2523, 2581), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Color"""'], {'options': 'non_numeric_columns'}), "('Color', options=non_numeric_columns)\n", (2543, 2581), True, 'import streamlit as st\n'), ((2597, 2648), 'plotly_express.histogram', 'px.histogram', ([], {'x': 'x', 'data_frame': 'df', 'color': 'color_value'}), '(x=x, data_frame=df, color=color_value)\n', (2609, 2648), True, 'import plotly_express as px\n'), ((2657, 2678), 'streamlit.plotly_chart', 'st.plotly_chart', (['plot'], {}), '(plot)\n', (2672, 2678), True, 'import streamlit as st\n'), ((2820, 2875), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Y axis"""'], {'options': 'numeric_columns'}), "('Y axis', options=numeric_columns)\n", (2840, 2875), True, 'import streamlit as st\n'), ((2888, 2947), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""X axis"""'], {'options': 'non_numeric_columns'}), "('X axis', options=non_numeric_columns)\n", (2908, 2947), True, 'import streamlit as st\n'), ((2970, 3028), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Color"""'], {'options': 'non_numeric_columns'}), "('Color', options=non_numeric_columns)\n", (2990, 3028), True, 'import streamlit as st\n'), ((3044, 3094), 'plotly_express.box', 'px.box', ([], {'data_frame': 'df', 'y': 'y', 'x': 'x', 'color': 'color_value'}), '(data_frame=df, y=y, x=x, color=color_value)\n', (3050, 3094), True, 'import plotly_express as px\n'), ((3103, 3124), 'streamlit.plotly_chart', 'st.plotly_chart', (['plot'], {}), '(plot)\n', (3118, 3124), True, 'import streamlit as st\n'), ((646, 674), 'pandas.read_excel', 'pd.read_excel', (['uploaded_file'], {}), '(uploaded_file)\n', (659, 674), True, 'import pandas as pd\n')]
|
# coding: utf-8
import os
import shutil
import json
import click
from werkzeug.utils import cached_property
class Context(object):
def __init__(self):
self.config_filename = os.environ.get('RIGIDSEARCH_CONFIG')
@cached_property
def app(self):
from rigidsearch.app import create_app
return create_app(self.config_filename)
pass_ctx = click.make_pass_decorator(Context, ensure=True)
@click.group()
@click.option('--config', type=click.Path(),
help='Path to the config file.')
@pass_ctx
def cli(ctx, config):
if config is not None:
ctx.config_filename = os.path.abspath(config)
@cli.command('index-folder')
@click.argument('config', type=click.File('rb'))
@click.option('--index-path', type=click.Path(),
help='Where to write the index to other than config default.')
@click.option('--save-zip', type=click.File('wb'),
help='Optional a zip file the index should be stored at '
'instead of modifying the index in-place.')
@pass_ctx
def index_folder_cmd(ctx, config, index_path, save_zip):
"""Indexes a path."""
from rigidsearch.search import index_tree, get_index_path
index_path = get_index_path(index_path=index_path, app=ctx.app)
try:
shutil.rmtree(index_path)
except (OSError, IOError):
pass
for event in index_tree(json.load(config), index_zip=save_zip,
index_path=index_path):
click.echo(event)
@cli.command('search')
@click.argument('query')
@click.option('--section', default='generic')
@click.option('--index-path', help='Path to the search index.')
@pass_ctx
def search_cmd(ctx, query, section, index_path):
"""Triggers a search from the command line."""
from rigidsearch.search import get_index, get_index_path
index_path = get_index_path(app=ctx.app)
index = get_index(index_path)
results = index.search(query, section=section)
for result in results['items']:
click.echo('%s (%s)' % (
result['path'],
result['title']
))
@cli.command('devserver')
@click.option('--bind', '-b', default='127.0.0.1:5001')
@pass_ctx
def devserver_cmd(ctx, bind):
"""Runs a local development server."""
parts = bind.split(':', 1)
if len(parts) == 2:
addr, port = parts
elif len(parts) == 1:
addr, port = bind, '5001'
if addr == '':
addr = '127.0.0.1'
ctx.app.run(addr, int(port), debug=True)
@cli.command('run')
@click.option('--bind', '-b', default='127.0.0.1:5001')
@click.option('--workers', '-w', default=1)
@click.option('--timeout', '-t', default=30)
@click.option('--loglevel', default='info')
@click.option('--accesslog', default='-')
@click.option('--errorlog', default='-')
@pass_ctx
def run_cmd(ctx, **options):
"""Runs the http web server."""
from rigidsearch.app import make_production_server
make_production_server(app=ctx.app, options=options).run()
def main():
cli(auto_envvar_prefix='RIGIDSEARCH')
|
[
"rigidsearch.app.make_production_server",
"os.path.abspath",
"json.load",
"click.argument",
"click.option",
"rigidsearch.search.get_index",
"click.echo",
"rigidsearch.app.create_app",
"rigidsearch.search.get_index_path",
"os.environ.get",
"click.make_pass_decorator",
"click.File",
"click.Path",
"shutil.rmtree",
"click.group"
] |
[((376, 423), 'click.make_pass_decorator', 'click.make_pass_decorator', (['Context'], {'ensure': '(True)'}), '(Context, ensure=True)\n', (401, 423), False, 'import click\n'), ((427, 440), 'click.group', 'click.group', ([], {}), '()\n', (438, 440), False, 'import click\n'), ((1514, 1537), 'click.argument', 'click.argument', (['"""query"""'], {}), "('query')\n", (1528, 1537), False, 'import click\n'), ((1539, 1583), 'click.option', 'click.option', (['"""--section"""'], {'default': '"""generic"""'}), "('--section', default='generic')\n", (1551, 1583), False, 'import click\n'), ((1585, 1647), 'click.option', 'click.option', (['"""--index-path"""'], {'help': '"""Path to the search index."""'}), "('--index-path', help='Path to the search index.')\n", (1597, 1647), False, 'import click\n'), ((2115, 2169), 'click.option', 'click.option', (['"""--bind"""', '"""-b"""'], {'default': '"""127.0.0.1:5001"""'}), "('--bind', '-b', default='127.0.0.1:5001')\n", (2127, 2169), False, 'import click\n'), ((2509, 2563), 'click.option', 'click.option', (['"""--bind"""', '"""-b"""'], {'default': '"""127.0.0.1:5001"""'}), "('--bind', '-b', default='127.0.0.1:5001')\n", (2521, 2563), False, 'import click\n'), ((2565, 2607), 'click.option', 'click.option', (['"""--workers"""', '"""-w"""'], {'default': '(1)'}), "('--workers', '-w', default=1)\n", (2577, 2607), False, 'import click\n'), ((2609, 2652), 'click.option', 'click.option', (['"""--timeout"""', '"""-t"""'], {'default': '(30)'}), "('--timeout', '-t', default=30)\n", (2621, 2652), False, 'import click\n'), ((2654, 2696), 'click.option', 'click.option', (['"""--loglevel"""'], {'default': '"""info"""'}), "('--loglevel', default='info')\n", (2666, 2696), False, 'import click\n'), ((2698, 2738), 'click.option', 'click.option', (['"""--accesslog"""'], {'default': '"""-"""'}), "('--accesslog', default='-')\n", (2710, 2738), False, 'import click\n'), ((2740, 2779), 'click.option', 'click.option', (['"""--errorlog"""'], {'default': '"""-"""'}), "('--errorlog', default='-')\n", (2752, 2779), False, 'import click\n'), ((1205, 1255), 'rigidsearch.search.get_index_path', 'get_index_path', ([], {'index_path': 'index_path', 'app': 'ctx.app'}), '(index_path=index_path, app=ctx.app)\n', (1219, 1255), False, 'from rigidsearch.search import get_index, get_index_path\n'), ((1837, 1864), 'rigidsearch.search.get_index_path', 'get_index_path', ([], {'app': 'ctx.app'}), '(app=ctx.app)\n', (1851, 1864), False, 'from rigidsearch.search import get_index, get_index_path\n'), ((1877, 1898), 'rigidsearch.search.get_index', 'get_index', (['index_path'], {}), '(index_path)\n', (1886, 1898), False, 'from rigidsearch.search import get_index, get_index_path\n'), ((190, 226), 'os.environ.get', 'os.environ.get', (['"""RIGIDSEARCH_CONFIG"""'], {}), "('RIGIDSEARCH_CONFIG')\n", (204, 226), False, 'import os\n'), ((330, 362), 'rigidsearch.app.create_app', 'create_app', (['self.config_filename'], {}), '(self.config_filename)\n', (340, 362), False, 'from rigidsearch.app import create_app\n'), ((622, 645), 'os.path.abspath', 'os.path.abspath', (['config'], {}), '(config)\n', (637, 645), False, 'import os\n'), ((472, 484), 'click.Path', 'click.Path', ([], {}), '()\n', (482, 484), False, 'import click\n'), ((1273, 1298), 'shutil.rmtree', 'shutil.rmtree', (['index_path'], {}), '(index_path)\n', (1286, 1298), False, 'import shutil\n'), ((1371, 1388), 'json.load', 'json.load', (['config'], {}), '(config)\n', (1380, 1388), False, 'import json\n'), ((1470, 1487), 'click.echo', 'click.echo', (['event'], {}), '(event)\n', (1480, 1487), False, 'import click\n'), ((708, 724), 'click.File', 'click.File', (['"""rb"""'], {}), "('rb')\n", (718, 724), False, 'import click\n'), ((761, 773), 'click.Path', 'click.Path', ([], {}), '()\n', (771, 773), False, 'import click\n'), ((885, 901), 'click.File', 'click.File', (['"""wb"""'], {}), "('wb')\n", (895, 901), False, 'import click\n'), ((1994, 2051), 'click.echo', 'click.echo', (["('%s (%s)' % (result['path'], result['title']))"], {}), "('%s (%s)' % (result['path'], result['title']))\n", (2004, 2051), False, 'import click\n'), ((2914, 2966), 'rigidsearch.app.make_production_server', 'make_production_server', ([], {'app': 'ctx.app', 'options': 'options'}), '(app=ctx.app, options=options)\n', (2936, 2966), False, 'from rigidsearch.app import make_production_server\n')]
|
# Created by kamimura on 2018/07/21.
# Copyright © 2018 kamimura. All rights reserved.
import sys
import datetime
from antlr4 import *
from SIONLexer import SIONLexer
from SIONParser import SIONParser
from SIONVisitor import SIONVisitor
def load(file, encoding: str='utf-8', errors: str='strict') -> object:
data = file.read()
if isinstance(data, (bytes, bytearray)):
data = data.decode(encoding, errors)
stream = InputStream(data)
lexer = SIONLexer(stream)
tokens = CommonTokenStream(lexer)
parser = SIONParser(tokens)
tree = parser.si_self()
visitor = SIONVisitor()
return visitor.visit(tree)
def loads(s):
if isinstance(s, (bytes, bytearray)):
s = s.decode()
stream = InputStream(s)
lexer = SIONLexer(stream)
tokens = CommonTokenStream(lexer)
parser = SIONParser(tokens)
tree = parser.si_self()
visitor = SIONVisitor()
return visitor.visit(tree)
def str_esc(s):
for o, n in [('"', '\\"'), ('\n', '\\n'), ('\r', '\\r'), ('\\', '\\\\')]:
s = s.replace(o, n)
return s
def dump(obj, file):
if obj is None:
print('nil', file=file, end='')
elif isinstance(obj, bool):
if obj:
print('ture', file=file, end='')
else:
print('false', file=file, end='')
elif isinstance(obj, (int, float)):
print(obj, file=file, end='')
elif isinstance(obj, str):
print(f'"{str_esc(obj)}"', file=file, end='')
elif isinstance(obj, (bytes, bytearray)):
print(f'.Data("{str(obj)[2:-1]}")', file=file, end='')
elif isinstance(obj, datetime.datetime):
print(f'.Date({obj.timestamp()})', file=file, end='')
elif isinstance(obj, (list, tuple)):
print(f'[', file=file, end='')
if len(obj) > 0:
for o in obj[:-1]:
dump(o, file)
print(',', file=file, end='')
dump(obj[-1], file)
print(']', file=file, end='')
elif isinstance(obj, dict):
print('[', file=file, end='')
ks = list(obj.keys())
if len(ks) == 0:
print(':', file=file, end='')
elif len(ks) == 1:
dump(ks[0], file)
print(':', file=file, end='')
dump(obj[ks[0]], file)
else:
for k in ks[:-1]:
dump(k, file)
print(':', file=file, end='')
dump(obj[k], file)
print(',', file=file, end='')
dump(ks[-1], file)
print(':', file=file, end='')
dump(obj[ks[-1]], file)
print(']', file=file, end='')
else:
raise TypeError(
f"Object of type '{obj.__class__.__name__}' is not SION serializable")
def dumps(obj: object):
if obj is None:
return 'nil'
if isinstance(obj, bool):
if obj:
return 'true'
return 'false'
if isinstance(obj, (int, float)):
return str(obj)
if isinstance(obj, str):
return f'"{str_esc(obj)}"'
if isinstance(obj, (bytes, bytearray)):
return f'.Data("{str(obj)[2:-1]}")'
if isinstance(obj, datetime.datetime):
return f'.Date({obj.timestamp(obj)})'
if isinstance(obj, (list, tuple)):
res = '['
if len(obj) > 0:
for o in obj[:-1]:
res += dumps(o) + ','
res += dumps(obj[-1])
res += ']'
return res
if isinstance(obj, dict):
res = '['
ks = list(obj.keys())
if len(ks) == 0:
res += ':'
elif len(ks) == 1:
res += dumps(ks[0]) + ':' + dumps(obj[ks[0]])
else:
for k in ks[:-1]:
res += dumps(k) + ':' + str(obj[k]) + ','
res += dumps(ks[-1]) + ':' + dumps(obj[ks[-1]])
res += ']'
return res
raise TypeError(
f"Object of type '{obj.__class__.__name__}' is not SION serializable")
if __name__ == '__main__':
import pprint
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = '../test/t.sion'
with open(filename) as f:
obj = load(f)
pprint.pprint(obj)
with open('../test/output.sion', 'w') as f:
dump(obj, f)
s = '''
[
"array" : [
nil,
true,
1, // Int in decimal
1.0, // Double in decimal
"one",
[1],
["one" : 1.0]
],
"bool" : true,
"data" : .Data("R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7"),
"date" : .Date(0x0p+0),
"dictionary" : [
"array" : [],
"bool" : false,
"double" : 0x0p+0,
"int" : 0,
"nil" : nil,
"object" : [:],
"string" : ""
],
"double" : 0x1.518f5c28f5c29p+5, // Double in hexadecimal
"int" : -0x2a, // Int in hexadecimal
"nil" : nil,
"string" : "漢字、カタカナ、ひらがなの入ったstring😇",
"url" : "https://github.com/dankogai/",
nil : "Unlike JSON and Property Lists,",
true : "Yes, SION",
1 : "does accept",
1.0 : "non-String keys.",
[] : "like",
[:] : "Map of ECMAScript."
]
'''
obj = loads(s)
pprint.pprint(obj)
s = dumps(obj)
print(s)
|
[
"SIONParser.SIONParser",
"SIONLexer.SIONLexer",
"pprint.pprint",
"SIONVisitor.SIONVisitor"
] |
[((466, 483), 'SIONLexer.SIONLexer', 'SIONLexer', (['stream'], {}), '(stream)\n', (475, 483), False, 'from SIONLexer import SIONLexer\n'), ((535, 553), 'SIONParser.SIONParser', 'SIONParser', (['tokens'], {}), '(tokens)\n', (545, 553), False, 'from SIONParser import SIONParser\n'), ((596, 609), 'SIONVisitor.SIONVisitor', 'SIONVisitor', ([], {}), '()\n', (607, 609), False, 'from SIONVisitor import SIONVisitor\n'), ((762, 779), 'SIONLexer.SIONLexer', 'SIONLexer', (['stream'], {}), '(stream)\n', (771, 779), False, 'from SIONLexer import SIONLexer\n'), ((831, 849), 'SIONParser.SIONParser', 'SIONParser', (['tokens'], {}), '(tokens)\n', (841, 849), False, 'from SIONParser import SIONParser\n'), ((892, 905), 'SIONVisitor.SIONVisitor', 'SIONVisitor', ([], {}), '()\n', (903, 905), False, 'from SIONVisitor import SIONVisitor\n'), ((4143, 4161), 'pprint.pprint', 'pprint.pprint', (['obj'], {}), '(obj)\n', (4156, 4161), False, 'import pprint\n'), ((5145, 5163), 'pprint.pprint', 'pprint.pprint', (['obj'], {}), '(obj)\n', (5158, 5163), False, 'import pprint\n')]
|
"""Implements widgets to visualize and modify basic fields. (french language)
ASSOCIATION should be updated with custom widgets, since common.abstractDetails will use it.
"""
import datetime
import re
from collections import defaultdict
from typing import List, Any
from PyQt5.QtCore import pyqtSignal, Qt, QPoint
from PyQt5.QtGui import QColor, QPen, QBrush, QIcon
from PyQt5.QtWidgets import (QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox,
QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip)
from . import list_views, clear_layout, Icons
from ..Core import formats
class NouveauTelephone(list_views.abstractNewButton):
LABEL = "Ajouter un numéro"
@staticmethod
def IS_TELEPHONE(s: str):
r = re.compile(r'[0-9]{9,10}')
m = r.search(s.replace(' ', ''))
return (m is not None)
def _clear(self):
clear_layout(self.layout())
def enter_edit(self):
self._clear()
line_layout = self.layout()
self.entree = QLineEdit()
self.entree.setObjectName("nouveau-numero-tel")
self.entree.setAlignment(Qt.AlignCenter)
self.entree.setPlaceholderText("Ajouter...")
add = QPushButton()
add.setIcon(QIcon(Icons.Valid))
add.clicked.connect(self.on_add)
self.entree.editingFinished.connect(self.on_add)
line_layout.addWidget(self.entree)
line_layout.addWidget(add)
line_layout.setStretch(0, 3)
line_layout.setStretch(1, 1)
def on_add(self):
num = self.entree.text()
if self.IS_TELEPHONE(num):
self.entree.setPlaceholderText("Ajouter...")
self.data_changed.emit(num)
self._clear()
self.set_button()
else:
self.entree.selectAll()
QToolTip.showText(self.entree.mapToGlobal(
QPoint(0, 10)), "Numéro invalide")
class Tels(list_views.abstractMutableList):
LIST_PLACEHOLDER = "Aucun numéro."
LIST_HEADER = None
BOUTON = NouveauTelephone
def __init__(self, collection: list, is_editable):
collection = self.from_list(collection)
super().__init__(collection, is_editable)
def on_add(self, item):
"""Convert to pseuso acces"""
super(Tels, self).on_add(list_views.PseudoAccesCategorie(item))
def set_data(self, collection):
collection = self.from_list(collection)
super(Tels, self).set_data(collection)
def get_data(self):
col = super(Tels, self).get_data()
return [tel.Id for tel in col]
class Duree(QLabel):
"""Display the numbers of day between two date widgets.
These widgets have to implement a get_data method, which return a date.date"""
def __init__(self, begining, end):
super().__init__()
self.begining = begining
self.end = end
self.begining.data_changed.connect(self.set_data)
self.end.data_changed.connect(self.set_data)
self.set_data()
def set_data(self, *args):
"""we cant to call set_data to manually update"""
db = self.begining.get_data() or formats.DATE_DEFAULT
df = self.end.get_data() or formats.DATE_DEFAULT
jours = max((df - db).days + 1, 0)
self.setText(str(jours) + (jours >= 2 and " jours" or " jour"))
# -------------- Enumerations vizualisation --------------
class abstractEnum(QLabel):
VALUE_TO_LABEL = {}
"""Dict. giving label from raw value"""
def set_data(self, value):
self.value = value
self.setText(self.VALUE_TO_LABEL.get(self.value, ""))
def get_data(self):
return self.value
class abstractEnumEditable(QComboBox):
data_changed = pyqtSignal(object)
VALEURS_LABELS = []
"""List of tuples (value, label) or None to add a separator"""
def __init__(self, parent=None):
super().__init__(parent)
self.set_choix(self.VALEURS_LABELS)
self.currentIndexChanged.connect(
lambda i: self.data_changed.emit(self.currentData()))
def set_choix(self, choix):
self.places = {}
for t in choix:
if t:
self.places[t[0]] = self.count()
self.addItem(t[1], userData=t[0])
else:
self.insertSeparator(self.count())
def set_data(self, value):
if value is None:
self.setCurrentIndex(-1)
else:
self.setCurrentIndex(self.places[value])
self.data_changed.emit(self.get_data())
def get_data(self):
return self.currentData()
# -------------------- Commons types --------------------
class DepartementFixe(abstractEnum):
VALUE_TO_LABEL = formats.DEPARTEMENTS
class DepartementEditable(abstractEnumEditable):
VALEURS_LABELS = sorted((i, i + " " + v)
for i, v in formats.DEPARTEMENTS.items())
class SexeFixe(abstractEnum):
VALUE_TO_LABEL = formats.SEXES
class SexeEditable(abstractEnumEditable):
VALEURS_LABELS = sorted((k, v) for k, v in formats.SEXES.items())
class ModePaiementFixe(abstractEnum):
VALUE_TO_LABEL = formats.MODE_PAIEMENT
class ModePaiementEditable(abstractEnumEditable):
VALEURS_LABELS = sorted([(k, v) for k, v in formats.MODE_PAIEMENT.items()])
# ------------- Simple string-like field -------------
class abstractSimpleField(QLabel):
FONCTION_AFF = None
TOOLTIP = None
data_changed = pyqtSignal() # dummy signal
def __init__(self, *args, **kwargs):
super(abstractSimpleField, self).__init__(*args, **kwargs)
if self.TOOLTIP:
self.setToolTip(self.TOOLTIP)
def set_data(self, value):
self.value = value
label = self.FONCTION_AFF(value)
self.setText(label)
def get_data(self):
return self.value
class BoolFixe(abstractSimpleField):
FONCTION_AFF = staticmethod(formats.abstractRender.boolen)
class EurosFixe(abstractSimpleField):
FONCTION_AFF = staticmethod(formats.abstractRender.euros)
class PourcentFixe(abstractSimpleField):
FONCTION_AFF = staticmethod(formats.abstractRender.pourcent)
class DefaultFixe(abstractSimpleField):
FONCTION_AFF = staticmethod(formats.abstractRender.default)
class DateFixe(abstractSimpleField):
FONCTION_AFF = staticmethod(formats.abstractRender.date)
class DateHeureFixe(abstractSimpleField):
FONCTION_AFF = staticmethod(formats.abstractRender.dateheure)
# --------------- Numeric fields ---------------
class abstractEntierEditable(QSpinBox):
UNITE = ""
MAX = None
MIN = 0
DEFAULT = 0
data_changed = pyqtSignal(int)
def __init__(self, parent=None):
super().__init__(parent)
self.setMaximum(self.MAX)
self.setMinimum(self.MIN)
self.setSuffix(self.UNITE)
self.valueChanged.connect(self.data_changed.emit)
self.setSpecialValueText(" ")
def set_data(self, somme):
somme = somme if somme is not None else (self.MIN - 1)
self.setValue(somme)
def get_data(self):
return self.value()
class EntierEditable(abstractEntierEditable):
MAX = 10000
class PourcentEditable(abstractEntierEditable):
UNITE = "%"
MAX = 100
DEFAULT = 0
class EurosEditable(QDoubleSpinBox):
data_changed = pyqtSignal(float)
def __init__(self, parent=None):
super().__init__(parent)
self.setMaximum(100000)
self.setMinimum(-1)
self.setSpecialValueText(" ")
self.setSuffix("€")
self.valueChanged.connect(self.data_changed.emit)
def set_data(self, somme):
somme = somme if somme is not None else -1
self.setValue(somme)
def get_data(self):
v = self.value()
return v if v != -1 else None
class BoolEditable(QFrame):
data_changed = pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent)
cb = QCheckBox()
l = QLabel()
self.setAutoFillBackground(True) # Pour éviter la transparence
layout = QHBoxLayout(self)
layout.addWidget(cb)
layout.addWidget(l)
def callback(b):
l.setText(b and "Oui" or "Non")
self.data_changed.emit(b)
cb.clicked.connect(callback)
self.cb = cb
self.l = l
def set_data(self, b):
b = b or False
self.cb.setChecked(b)
self.l.setText(b and "Oui" or "Non")
def get_data(self):
return self.cb.isChecked()
class DefaultEditable(QLineEdit):
data_changed = pyqtSignal(str)
MAX_LENGTH = None
def __init__(self, parent=None, completion=[]):
super().__init__(parent)
self.textChanged.connect(self.data_changed.emit)
if completion:
c = QCompleter(completion)
c.setCaseSensitivity(Qt.CaseInsensitive)
self.setCompleter(c)
if self.MAX_LENGTH:
self.setMaxLength(self.MAX_LENGTH)
def set_data(self, value):
self.setText(str(value or ""))
def get_data(self):
return self.text()
def LimitedDefaultEditable(max_length):
return type("LDefaultEditable", (DefaultEditable,), {"MAX_LENGTH": max_length})
class OptionnalTextEditable(QFrame):
"""QCheckbox + QLineEdit"""
data_changed = pyqtSignal(object)
def __init__(self, parent=None):
super(OptionnalTextEditable, self).__init__(parent=parent)
self.active = QCheckBox()
self.text = QLineEdit()
self.active.clicked.connect(self.on_click)
self.text.textChanged.connect(self.on_text_changed)
layout = QHBoxLayout(self)
layout.addWidget(self.active)
layout.addWidget(self.text)
def on_click(self):
self.text.setEnabled(self.active.isChecked())
self.data_changed.emit(self.get_data())
def on_text_changed(self, text):
is_active = bool(text.strip())
self.active.setChecked(is_active)
self.text.setEnabled(is_active)
self.data_changed.emit(self.get_data())
def get_data(self):
text = self.text.text().strip()
active = self.active.isChecked() and bool(text)
return text if active else None
def set_data(self, text: str):
text = text or ""
is_active = bool(text.strip())
self.active.setChecked(is_active)
self.text.setEnabled(is_active)
self.text.setText(text)
self.data_changed.emit(self.get_data())
class DateEditable(QFrame):
data_changed = pyqtSignal(object)
def __init__(self, parent=None):
super().__init__(parent)
layout = QGridLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
j = QSpinBox()
j.setMinimum(0)
j.setMaximum(31)
j.setToolTip("Jour")
m = QSpinBox()
m.setMinimum(0)
m.setMaximum(12)
m.setToolTip("Mois")
a = QSpinBox()
a.setMinimum(0)
a.setMaximum(2500)
a.setToolTip("Année")
j.setAlignment(Qt.AlignCenter)
m.setAlignment(Qt.AlignCenter)
a.setAlignment(Qt.AlignCenter)
j.setSpecialValueText("-")
m.setSpecialValueText("-")
a.setSpecialValueText("-")
layout.addWidget(j, 0, 0)
layout.addWidget(m, 0, 1)
layout.addWidget(a, 0, 2, 1, 2)
j.valueChanged.connect(
lambda v: self.data_changed.emit(self.get_data()))
m.valueChanged.connect(
lambda v: self.data_changed.emit(self.get_data()))
a.valueChanged.connect(
lambda v: self.data_changed.emit(self.get_data()))
a.editingFinished.connect(self.on_editing)
self.ws = (a, m, j)
def _change_year_text_color(self, is_ok):
color = "black" if is_ok else "red"
self.ws[0].setStyleSheet(f"color : {color}")
def on_editing(self):
current_year = self.ws[0].value()
if not current_year:
return
self._change_year_text_color(not current_year < 100)
self.ws[0].setValue(current_year)
def get_data(self):
d = [self.ws[0].value(), self.ws[1].value(), self.ws[2].value()]
try:
return datetime.date(*d)
except ValueError:
return
def set_data(self, d):
if d is None:
self.ws[0].clear()
self.ws[1].clear()
self.ws[2].clear()
else:
self.ws[0].setValue(d.year)
self.ws[1].setValue(d.month)
self.ws[2].setValue(d.day)
self.on_editing()
class MontantEditable(QFrame):
def __init__(self, parent=None):
super().__init__(parent)
self.setAutoFillBackground(True)
self.val = QDoubleSpinBox()
self.val.setMaximum(100000)
self.par_jour = QCheckBox("Par jour")
layout = QVBoxLayout(self)
layout.addWidget(self.val)
layout.addWidget(self.par_jour)
def set_data(self, value):
self.val.setValue(value[0])
self.par_jour.setChecked(value[1])
def get_data(self):
return [self.val.value(), self.par_jour.isChecked()]
class DateRange(QFrame):
data_changed = pyqtSignal(object, object)
def __init__(self):
super().__init__()
self.debut = DateEditable()
self.fin = DateEditable()
self.debut.data_changed.connect(self.on_change)
self.fin.data_changed.connect(self.on_change)
layout = QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(QLabel("Du "))
layout.addWidget(self.debut)
layout.addWidget(QLabel(" au "))
layout.addWidget(self.fin)
def on_change(self):
self.data_changed.emit(*self.get_data())
def get_data(self):
return self.debut.get_data(), self.fin.get_data()
def set_data(self, v):
v = v or [None, None]
self.debut.set_data(v[0])
self.fin.set_data(v[1])
class Texte(QPlainTextEdit):
data_changed = pyqtSignal(str)
def __init__(self, text, is_editable, placeholder="Informations complémentaires"):
super().__init__(text)
self.setSizeAdjustPolicy(QPlainTextEdit.AdjustToContents)
self.setMinimumHeight(50)
self.setMinimumWidth(150)
self.setPlaceholderText(placeholder)
self.setReadOnly(not is_editable)
self.textChanged.connect(
lambda: self.data_changed.emit(self.toPlainText()))
def get_data(self):
return self.toPlainText()
def set_data(self, text):
self.setPlainText(text)
class OptionsButton(QPushButton):
"""Bouton to open window to acces advanced options.
CLASS_PANEL_OPTIONS is responsible for doing the actual modification"""
TITLE = "Advanced options"
CLASS_PANEL_OPTIONS:Any = None
options_changed = pyqtSignal()
def __init__(self, acces, is_editable):
super(OptionsButton, self).__init__(self.TITLE)
self.clicked.connect(self.show_options)
self.acces = acces
self.is_editable = is_editable
def show_options(self):
f = self.CLASS_PANEL_OPTIONS(self.acces, self.is_editable)
if f.exec_():
self.options_changed.emit()
def set_data(self, *args):
pass
###---------------------------- Wrappers---------------------------- ###
def _get_widget(classe, value):
w = classe()
w.set_data(value)
return w
def Default(value, is_editable):
return _get_widget(is_editable and DefaultEditable or DefaultFixe, value)
def Booleen(value, is_editable):
return _get_widget(is_editable and BoolEditable or BoolFixe, value)
def Entier(entier, is_editable):
return _get_widget(is_editable and EntierEditable or DefaultFixe, entier)
def Euros(value, is_editable):
return _get_widget(is_editable and EurosEditable or EurosFixe, value)
def Pourcent(value, is_editable):
return _get_widget(is_editable and PourcentEditable or PourcentFixe, value)
def Date(value, is_editable):
return _get_widget(is_editable and DateEditable or DateFixe, value)
def Departement(value, is_editable):
return _get_widget(is_editable and DepartementEditable or DepartementFixe, value)
def Sexe(value, is_editable):
return _get_widget(is_editable and SexeEditable or SexeFixe, value)
def Adresse(value, is_editable):
return Texte(value, is_editable, placeholder="Adresse...")
def ModePaiement(value, is_editable):
return _get_widget(is_editable and ModePaiementEditable or ModePaiementFixe, value)
def DateHeure(value, is_editable):
if is_editable:
raise NotImplementedError("No editable datetime widget !")
w = DateHeureFixe()
w.set_data(value)
return w
def OptionnalText(value, is_editable):
return _get_widget(is_editable and OptionnalTextEditable or DefaultFixe, value)
"""Correspondance field -> widget (callable)"""
TYPES_WIDGETS = defaultdict(
lambda: Default,
date_naissance=Date,
departement_naissance=Departement,
sexe=Sexe,
tels=Tels,
adresse=Adresse,
date=Date,
date_debut=Date,
date_fin=Date,
date_arrivee=Date,
date_depart=Date,
date_emission=Date,
date_reception=Date,
nb_places=Entier,
nb_places_reservees=Entier,
age_min=Entier,
age_max=Entier,
acquite=Booleen,
is_acompte=Booleen,
is_remboursement=Booleen,
reduc_speciale=Euros,
acompte_recu=Euros,
valeur=Euros,
total=Euros,
prix=Euros,
date_heure_modif=DateHeure,
date_reglement=Date,
date_encaissement=Date,
info=Texte,
message=Texte,
mode_paiement=ModePaiement,
)
ASSOCIATION = {}
def add_widgets_type(type_widgets, abstract_ASSOCIATION):
TYPES_WIDGETS.update(type_widgets)
for k, v in abstract_ASSOCIATION.items():
t = TYPES_WIDGETS[k]
ASSOCIATION[k] = (v[0], v[1], v[2], t, v[3])
add_widgets_type({}, formats.ASSOCIATION)
## ------------------Custom delegate ------------------ ##
class delegateAttributs(QStyledItemDelegate):
CORRES = {"montant": MontantEditable, "mode_paiement": ModePaiementEditable,
"valeur": EurosEditable,
"description": DefaultEditable, "quantite": EntierEditable,
"obligatoire": BoolEditable}
"""Correspondance between fields and widget classes"""
size_hint_: tuple
def __init__(self, parent):
QStyledItemDelegate.__init__(self, parent)
self.size_hint_ = None
self.row_done_ = None
@staticmethod
def paint_filling_rect(option, painter, proportion):
rect = option.rect
painter.save()
proportion = min(proportion, 100)
rs, vs, bs = (30,64,55) # start
re, ve, be = (153,242,200) # end
t = proportion / 100
color = QColor( rs + t*(re - rs), vs + t*(ve - vs), bs + t*(be - bs))
painter.setPen(QPen(color, 0.5, Qt.SolidLine,
Qt.RoundCap, Qt.RoundJoin))
painter.setBackgroundMode(Qt.OpaqueMode)
painter.setBackground(QBrush(color))
painter.setBrush(QBrush(color))
rect.setWidth(rect.width() * proportion / 100)
painter.drawRoundedRect(rect, 5, 5)
painter.restore()
@staticmethod
def _get_field(index):
return index.model().header[index.column()]
def sizeHint(self, option, index):
if self.size_hint_ and self.size_hint_[0] == index:
return self.size_hint_[1]
return super().sizeHint(option, index)
def setEditorData(self, editor, index):
value = index.data(role=Qt.EditRole)
editor.set_data(value)
self.sizeHintChanged.emit(index)
def createEditor(self, parent, option, index):
field = self._get_field(index)
other = index.data(role=Qt.UserRole)
classe = self.CORRES[field]
w = classe(parent, other) if other else classe(parent)
self.size_hint_ = (index, w.sizeHint())
self.row_done_ = index.row()
return w
def destroyEditor(self, editor, index):
self.size_hint_ = None
super().destroyEditor(editor, index)
def setModelData(self, editor, model, index):
value = editor.get_data()
model.set_data(index, value)
|
[
"PyQt5.QtCore.pyqtSignal",
"PyQt5.QtWidgets.QStyledItemDelegate.__init__",
"PyQt5.QtGui.QColor",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QPushButton",
"collections.defaultdict",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtGui.QBrush",
"PyQt5.QtWidgets.QCompleter",
"PyQt5.QtWidgets.QSpinBox",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QCheckBox",
"PyQt5.QtWidgets.QHBoxLayout",
"datetime.date",
"PyQt5.QtWidgets.QDoubleSpinBox",
"PyQt5.QtGui.QPen",
"PyQt5.QtCore.QPoint",
"re.compile",
"PyQt5.QtGui.QIcon",
"PyQt5.QtWidgets.QLineEdit"
] |
[((16988, 17618), 'collections.defaultdict', 'defaultdict', (['(lambda : Default)'], {'date_naissance': 'Date', 'departement_naissance': 'Departement', 'sexe': 'Sexe', 'tels': 'Tels', 'adresse': 'Adresse', 'date': 'Date', 'date_debut': 'Date', 'date_fin': 'Date', 'date_arrivee': 'Date', 'date_depart': 'Date', 'date_emission': 'Date', 'date_reception': 'Date', 'nb_places': 'Entier', 'nb_places_reservees': 'Entier', 'age_min': 'Entier', 'age_max': 'Entier', 'acquite': 'Booleen', 'is_acompte': 'Booleen', 'is_remboursement': 'Booleen', 'reduc_speciale': 'Euros', 'acompte_recu': 'Euros', 'valeur': 'Euros', 'total': 'Euros', 'prix': 'Euros', 'date_heure_modif': 'DateHeure', 'date_reglement': 'Date', 'date_encaissement': 'Date', 'info': 'Texte', 'message': 'Texte', 'mode_paiement': 'ModePaiement'}), '(lambda : Default, date_naissance=Date, departement_naissance=\n Departement, sexe=Sexe, tels=Tels, adresse=Adresse, date=Date,\n date_debut=Date, date_fin=Date, date_arrivee=Date, date_depart=Date,\n date_emission=Date, date_reception=Date, nb_places=Entier,\n nb_places_reservees=Entier, age_min=Entier, age_max=Entier, acquite=\n Booleen, is_acompte=Booleen, is_remboursement=Booleen, reduc_speciale=\n Euros, acompte_recu=Euros, valeur=Euros, total=Euros, prix=Euros,\n date_heure_modif=DateHeure, date_reglement=Date, date_encaissement=Date,\n info=Texte, message=Texte, mode_paiement=ModePaiement)\n', (16999, 17618), False, 'from collections import defaultdict\n'), ((3797, 3815), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['object'], {}), '(object)\n', (3807, 3815), False, 'from PyQt5.QtCore import pyqtSignal, Qt, QPoint\n'), ((5528, 5540), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (5538, 5540), False, 'from PyQt5.QtCore import pyqtSignal, Qt, QPoint\n'), ((6708, 6723), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['int'], {}), '(int)\n', (6718, 6723), False, 'from PyQt5.QtCore import pyqtSignal, Qt, QPoint\n'), ((7389, 7406), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['float'], {}), '(float)\n', (7399, 7406), False, 'from PyQt5.QtCore import pyqtSignal, Qt, QPoint\n'), ((7911, 7927), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['bool'], {}), '(bool)\n', (7921, 7927), False, 'from PyQt5.QtCore import pyqtSignal, Qt, QPoint\n'), ((8636, 8651), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['str'], {}), '(str)\n', (8646, 8651), False, 'from PyQt5.QtCore import pyqtSignal, Qt, QPoint\n'), ((9380, 9398), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['object'], {}), '(object)\n', (9390, 9398), False, 'from PyQt5.QtCore import pyqtSignal, Qt, QPoint\n'), ((10599, 10617), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['object'], {}), '(object)\n', (10609, 10617), False, 'from PyQt5.QtCore import pyqtSignal, Qt, QPoint\n'), ((13248, 13274), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['object', 'object'], {}), '(object, object)\n', (13258, 13274), False, 'from PyQt5.QtCore import pyqtSignal, Qt, QPoint\n'), ((14074, 14089), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['str'], {}), '(str)\n', (14084, 14089), False, 'from PyQt5.QtCore import pyqtSignal, Qt, QPoint\n'), ((14908, 14920), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (14918, 14920), False, 'from PyQt5.QtCore import pyqtSignal, Qt, QPoint\n'), ((834, 859), 're.compile', 're.compile', (['"""[0-9]{9,10}"""'], {}), "('[0-9]{9,10}')\n", (844, 859), False, 'import re\n'), ((1099, 1110), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', ([], {}), '()\n', (1108, 1110), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((1283, 1296), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', ([], {}), '()\n', (1294, 1296), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((8012, 8023), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', ([], {}), '()\n', (8021, 8023), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((8036, 8044), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (8042, 8044), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((8134, 8151), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', (['self'], {}), '(self)\n', (8145, 8151), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((9526, 9537), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', ([], {}), '()\n', (9535, 9537), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((9558, 9569), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', ([], {}), '()\n', (9567, 9569), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((9700, 9717), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', (['self'], {}), '(self)\n', (9711, 9717), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((10706, 10723), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['self'], {}), '(self)\n', (10717, 10723), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((10782, 10792), 'PyQt5.QtWidgets.QSpinBox', 'QSpinBox', ([], {}), '()\n', (10790, 10792), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((10883, 10893), 'PyQt5.QtWidgets.QSpinBox', 'QSpinBox', ([], {}), '()\n', (10891, 10893), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((10984, 10994), 'PyQt5.QtWidgets.QSpinBox', 'QSpinBox', ([], {}), '()\n', (10992, 10994), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((12796, 12812), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (12810, 12812), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((12873, 12894), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', (['"""Par jour"""'], {}), "('Par jour')\n", (12882, 12894), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((12912, 12929), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', (['self'], {}), '(self)\n', (12923, 12929), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((13525, 13542), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', (['self'], {}), '(self)\n', (13536, 13542), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((18468, 18510), 'PyQt5.QtWidgets.QStyledItemDelegate.__init__', 'QStyledItemDelegate.__init__', (['self', 'parent'], {}), '(self, parent)\n', (18496, 18510), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((18867, 18933), 'PyQt5.QtGui.QColor', 'QColor', (['(rs + t * (re - rs))', '(vs + t * (ve - vs))', '(bs + t * (be - bs))'], {}), '(rs + t * (re - rs), vs + t * (ve - vs), bs + t * (be - bs))\n', (18873, 18933), False, 'from PyQt5.QtGui import QColor, QPen, QBrush, QIcon\n'), ((1317, 1335), 'PyQt5.QtGui.QIcon', 'QIcon', (['Icons.Valid'], {}), '(Icons.Valid)\n', (1322, 1335), False, 'from PyQt5.QtGui import QColor, QPen, QBrush, QIcon\n'), ((8856, 8878), 'PyQt5.QtWidgets.QCompleter', 'QCompleter', (['completion'], {}), '(completion)\n', (8866, 8878), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((12265, 12282), 'datetime.date', 'datetime.date', (['*d'], {}), '(*d)\n', (12278, 12282), False, 'import datetime\n'), ((13614, 13627), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Du """'], {}), "('Du ')\n", (13620, 13627), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((13691, 13705), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['""" au """'], {}), "(' au ')\n", (13697, 13705), False, 'from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton, QLineEdit, QLabel, QComboBox, QSpinBox, QDoubleSpinBox, QCheckBox, QCompleter, QGridLayout, QVBoxLayout, QPlainTextEdit, QStyledItemDelegate, QToolTip\n'), ((18952, 19009), 'PyQt5.QtGui.QPen', 'QPen', (['color', '(0.5)', 'Qt.SolidLine', 'Qt.RoundCap', 'Qt.RoundJoin'], {}), '(color, 0.5, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin)\n', (18956, 19009), False, 'from PyQt5.QtGui import QColor, QPen, QBrush, QIcon\n'), ((19118, 19131), 'PyQt5.QtGui.QBrush', 'QBrush', (['color'], {}), '(color)\n', (19124, 19131), False, 'from PyQt5.QtGui import QColor, QPen, QBrush, QIcon\n'), ((19158, 19171), 'PyQt5.QtGui.QBrush', 'QBrush', (['color'], {}), '(color)\n', (19164, 19171), False, 'from PyQt5.QtGui import QColor, QPen, QBrush, QIcon\n'), ((1952, 1965), 'PyQt5.QtCore.QPoint', 'QPoint', (['(0)', '(10)'], {}), '(0, 10)\n', (1958, 1965), False, 'from PyQt5.QtCore import pyqtSignal, Qt, QPoint\n')]
|
import json
from sklearn.preprocessing import LabelEncoder
from src.comment_analysis.url_utils import get_text_by_url
from src.csv.csv_utils import get_link_line_type, get_keywords
from src.keys import line, serialize_outpath
from nltk.stem.porter import *
from spacy.lang.en.stop_words import STOP_WORDS
from src.comment_analysis.java_re import *
def get_line(code, comment_line, comment_type):
code = code.splitlines()
try:
if comment_type == line:
if not re.match(r"^[\s]*//.*", code[
comment_line - 1]): # if the line doesn't start as a comment, the comment refers to this line
if not re.match(r"^[\s]*}?[\s]*(else|try|finally)?[\s]*{?[\s]*//.*[\s]*$", code[
comment_line - 1]): # if the line isnt just brackets and some keywords, the foucs line is the comment_line
return code[comment_line - 1]
i = 0
while re.match(r"^[\s]*//.*", code[comment_line + i]) or re.match(r"^[\s]*$", code[comment_line + i]) or re.match(r"[\s]*[^}{](try|else|finally)[\s]*{?", code[comment_line + i]): # while the line starts as a comment, ignore it. I do this because they use multiple line comment to simulate a block
i += 1
if re.match(r"^[\s]*}.*", code[comment_line + i]) or re.match(r"[\s]*(try|else|finally)[\s]*{?", code[
comment_line + i]): # if this matches, the block is empty so i take the first non comment non empty line before the comment.
i = -2
while re.match(r"^[\s]*//.*", code[comment_line + i]) or re.match(r"^[\s]*$",
code[comment_line + i]) or re.match(
r"^[\s]*/\*.*", code[comment_line + i]) or re.match(r"^\*", code[comment_line + i]) or re.match(
r"^[\s]*\*/.*", code[comment_line + i]): # while the line is a comment or is blank, ignore it
i -= 1
return code[comment_line + i] # comment refers to that
# r"^[\s]*}?[\s]*(else|try|finally)?[\s]*{?[\s]*.*$"
else: # block or javadoc
# if the line doesn't start as a comment, the comment refers to this line
if not re.match(r"^[\s]*/\*.*", code[comment_line - 1]):
return code[comment_line - 1]
if comment_line >= len(code) - 1:
return code[comment_line - 2]
i = 0
if not re.match(r"^[\s]*.*\*/", code[comment_line - 1]):
while not re.match(r"^[\s]*\*/", code[comment_line + i]):
i += 1
i += 1
# while the line starts as a comment or is blank, or is an annotation, ignore it
while re.match(r"^[\s]*$", code[comment_line + i]) or re.match(r"^[\s]*@[^\s]*[\s]*$", code[comment_line + i]) or re.match(r"^[\s]*//.*", code[comment_line + i]) or re.match(r"[\s]*[^}{](try|else|finally)[\s]*{?", code[comment_line + i]):
i += 1
# if this matches, probabily the comment refers to the line before
if re.match(r"^[\s]*}[\s]*.*", code[comment_line + i]) or re.match(r"[\s]*(try|else|finally)[\s]*{?", code[comment_line + i]):
i = -2
# while the line is a comment or is blank, ignore it
while re.match(r"^[\s]*//.*", code[comment_line + i]) or re.match(r"^[\s]*$", code[comment_line + i]) or re.match(r"^[\s]*/\*.*", code[comment_line + i]) or re.match(r"^\*", code[comment_line + i]) or re.match(r"^[\s]*\*/.*", code[comment_line + i]):
i -= 1
return code[comment_line + i]
except IndexError:
return ""
def get_positions(lines=None, set='train'):
comment_type = 0
text_link = 1
comment_line = 2
positions = []
data = get_link_line_type(set=set)
if lines is None:
lines = get_lines(set=set)
i = 0
for row in data:
#print(row[comment_line], row[comment_type], row[text_link] + "#L" + str(row[comment_line]))
focus_line = lines[i]
#print(focus_line)
p = get_position(focus_line)
positions.append(p)
#print(p)
i += 1
return positions
def get_positions_encoded(lines=None, set='train'):
if lines is None:
positions = get_positions(set=set)
else:
positions = get_positions(lines, set=set)
le = LabelEncoder()
return le.fit_transform(positions)
def get_lines(serialized=True, serialize=False, set='train'):
if serialized:
x = open(serialize_outpath + 'serialized_' + set +'.json', 'r').read()
return json.loads(x)
comment_type = 0
text_link = 1
comment_line = 2
data = get_link_line_type(set=set)
lines = []
for row in data:
code = get_text_by_url(row[text_link])
focus_line = get_line(code, row[comment_line], row[comment_type])
lines.append(focus_line)
if serialize:
x = open(serialize_outpath + 'serialized_' + set +'.json', 'w')
x.write(json.dumps(lines))
return lines
def get_code_words(stemming=True, rem_keyws=True, lines=None, set='train'):
if lines is None:
lines = get_lines(set=set, serialized=False, serialize=True)
words = []
for line in lines:
words.append(word_extractor(line, stemming, rem_keyws))
return words
def word_extractor(string, stemming=True, rem_keyws=True):
string = remove_line_comment(string)
string = remove_block_comment(string)
splitted = code_split(string)
words = []
for part in splitted:
camel_case_parts = camel_case_split(part)
for camel in camel_case_parts:
words.append(camel.lower())
if stemming and rem_keyws:
return stem(remove_keywords(words))
elif stemming:
return stem(words)
else:
return remove_keywords(words)
def remove_keywords(words):
keywords = get_keywords()
non_keywords = []
for word in words:
if word not in keywords:
non_keywords.append(word)
return non_keywords
def stem(words):
stemmer = PorterStemmer()
stemmed = []
for token in words:
stemmed.append(stemmer.stem(token))
return stemmed
def camel_case_split(string):
if not string:
return string
words = [[string[0]]]
for c in string[1:]:
if words[-1][-1].islower() and c.isupper():
words.append(list(c))
else:
words[-1].append(c)
return [''.join(word) for word in words]
def remove_line_comment(string):
in_string = False
escape = False
comment = False
i = 0
for char in string:
if char == '"':
if in_string is True:
if escape is False:
in_string = False
else:
escape = False
else:
in_string = True
elif char == '\\':
if in_string is True:
escape = True
elif char == '/':
if comment is False:
comment = True
else:
return string[:i]
elif comment is True:
i += 1
comment = False
elif escape is True:
escape = False
if comment is False:
i += 1
return string
def remove_block_comment(string):
in_string = False
escape = False
block = False
maybe_block = False
found = False
init_index = 0
end_index = 0
i = 0
for char in string:
if char == '*':
if not in_string:
if maybe_block is False:
if block is True:
maybe_block = True
else:
block = True
if char == '"':
if in_string is True:
if escape is False:
in_string = False
else:
escape = False
else:
in_string = True
elif char == '\\':
if in_string is True:
escape = True
elif char == '/':
if not in_string:
if maybe_block is True:
if block is True:
found = True
end_index = i
break
else:
maybe_block = True
init_index = i
i += 1
if found is True:
return string[:init_index] + string[end_index + 1:]
return string
def code_split(string):
words = re.split(r'\\n|\?|&|\\|;|,|\*|\(|\)|\{|\s|\.|/|_|:|=|<|>|\||!|"|\+|-|\[|\]|\'|\}|\^|#|%', string)
words = list(filter(lambda a: a != "", words))
return words
def remove_stopwords(tokens):
stop_words = STOP_WORDS
relevant_words = []
for token in tokens:
if token not in stop_words:
relevant_words.append(token)
return relevant_words
def tokenizer(string, rem_stop=False, stemming=False, rem_kws=False):
tokens = code_split(string)
new_tokens = []
for token in tokens:
for t in camel_case_split(token):
new_tokens.append(t.lower())
if rem_stop:
new_tokens = remove_stopwords(new_tokens)
if rem_kws:
new_tokens = remove_keywords(new_tokens)
if stemming:
new_tokens = stem(new_tokens)
return new_tokens
if __name__ == '__main__':
# code = open('../testers/test.txt', 'r').read()
# code_parser(code, 151, javadoc)
print(get_lines(serialized=False, serialize=True))
print('first')
print(get_lines(serialized=True, serialize=False))
# get_positions()
# line_type_identifier("ciao")
# code_parser3()
# print(word_extractor("ciao mamma /*css rff*/"))
# print(tokenizer("t<EMAIL>@<EMAIL> @param"))
# print(camel_case_split("tuaMadre@QuellaTroia @param"))
# print(code_split("tuaMadre@QuellaTroia @param"))
|
[
"src.csv.csv_utils.get_keywords",
"json.loads",
"src.comment_analysis.url_utils.get_text_by_url",
"sklearn.preprocessing.LabelEncoder",
"json.dumps",
"src.csv.csv_utils.get_link_line_type"
] |
[((3876, 3903), 'src.csv.csv_utils.get_link_line_type', 'get_link_line_type', ([], {'set': 'set'}), '(set=set)\n', (3894, 3903), False, 'from src.csv.csv_utils import get_link_line_type, get_keywords\n'), ((4457, 4471), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4469, 4471), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4774, 4801), 'src.csv.csv_utils.get_link_line_type', 'get_link_line_type', ([], {'set': 'set'}), '(set=set)\n', (4792, 4801), False, 'from src.csv.csv_utils import get_link_line_type, get_keywords\n'), ((5984, 5998), 'src.csv.csv_utils.get_keywords', 'get_keywords', ([], {}), '()\n', (5996, 5998), False, 'from src.csv.csv_utils import get_link_line_type, get_keywords\n'), ((4688, 4701), 'json.loads', 'json.loads', (['x'], {}), '(x)\n', (4698, 4701), False, 'import json\n'), ((4853, 4884), 'src.comment_analysis.url_utils.get_text_by_url', 'get_text_by_url', (['row[text_link]'], {}), '(row[text_link])\n', (4868, 4884), False, 'from src.comment_analysis.url_utils import get_text_by_url\n'), ((5098, 5115), 'json.dumps', 'json.dumps', (['lines'], {}), '(lines)\n', (5108, 5115), False, 'import json\n')]
|
from unittest import TestCase
from h.models import User
from . import AppTestCase
class UserTest(AppTestCase):
def test_password_encrypt(self):
"""make sure user passwords are stored encrypted
"""
u1 = User(username=u'test', password=u'<PASSWORD>', email=u'<EMAIL>')
assert u1.password != '<PASSWORD>'
self.db.add(u1)
self.db.flush()
assert u1.password != '<PASSWORD>'
|
[
"h.models.User"
] |
[((233, 297), 'h.models.User', 'User', ([], {'username': 'u"""test"""', 'password': 'u"""<PASSWORD>"""', 'email': 'u"""<EMAIL>"""'}), "(username=u'test', password=u'<PASSWORD>', email=u'<EMAIL>')\n", (237, 297), False, 'from h.models import User\n')]
|
"""
Class for parsing the main Ben Yehuda site page
"""
from urllib import request
from urllib import parse as urlparse
from bs4 import BeautifulSoup
from .helpers import NamedLink, clean_text
class MainPage(object):
"""
Parses and gets information from the main index page. Mostly used to get
links for all of the artist pages
"""
def __init__(self, url="http://benyehuda.org"):
self.main_url = url
self.soup = BeautifulSoup(request.urlopen(url))
@staticmethod
def artist_a_filter(tag):
"""
Finds all the links in the index page that points to an artist's page
"""
if tag.name != "a":
return False
href = tag.get("href").lower()
# Artist links are supposed to be internal
if href.startswith("http"):
return False
# Remove unrelated crap
if href.startswith("javascript"):
return False
# Artist pages are one branch below the main page and their links
# usually end with / - Need to verify
if href.count("/") == 1 and href[-1] == "/":
return True
return False
def get_artist_links(self):
"""
:return: A set of unique artist page urls and names
:rtype: set[NamedLink]
"""
anchors = self.soup.find_all(self.artist_a_filter)
links = set()
for anchor in anchors:
url = urlparse.urljoin(self.main_url, anchor.get("href").lower())
links.add(NamedLink(url, clean_text(anchor)))
return links
|
[
"urllib.request.urlopen"
] |
[((465, 485), 'urllib.request.urlopen', 'request.urlopen', (['url'], {}), '(url)\n', (480, 485), False, 'from urllib import request\n')]
|
import pandas as pd
def load_from_excel(file_path, sheet_name=None):
"""
Loads the data from an Excel file into a list of dictionaries, where each dictionary represents a row in the Excel
file and the keys of each dictionary represent each column header in the Excel file. The method creates this list
of dictionaries via a Pandas dataframe.
:param file_path: The full file path (appended with .xlsx) of the Excel file to be loaded.
:type file_path: str
:param sheet_name: Name of a particular sheet in the file to load (optional, defaults to the first sheet in the
Excel file).
:type sheet_name: str
:return: List of dictionaries, each dictionary representing a row in the Excel file.
:rtype: list of dict
"""
xl = pd.ExcelFile(file_path)
sheet_name = sheet_name if sheet_name else xl.sheet_names[0]
return xl.parse(sheet_name, index_col=None).to_dict('records')
def export_to_excel(data, file_path, sheet_name="Sheet1", field_order=None, sorting_fields=None):
"""
Writes data from a list of dictionaries to an Excel file, where each dictionary represents a row in the Excel file
and the keys of each dictionary represent each column header in the Excel file.
:param data: List of dictionaries, each dictionary representing a row in the Excel file.
:type data: list of dict
:param file_path: The full file path (appended with .xlsx) of the Excel file to be written to. This will overwrite
data if both file_path and sheet_name already exist.
:type file_path: str
:param sheet_name: Name of a particular sheet in the file to write to (optional, defaults to "Sheet1").
:type sheet_name: str
:param field_order: List of keys from data ordered to match the intended Excel column ordering (left to right). Must
include all keys/columns. Any keys omitted from the list will not be written as columns. (optional)
:type field_order: list of str
:param sorting_fields: List of keys from data to be used as sorting columns (small to large) in Excel. Can be any
length from 1 column to every column. The order of the list will dictate the sorting order.
:type sorting_fields: list of str
:return: None
"""
writer = pd.ExcelWriter(file_path, engine='openpyxl')
df = pd.DataFrame(data)
if field_order:
df = df[field_order]
if sorting_fields:
df = df.sort_values(sorting_fields)
df.to_excel(writer, sheet_name=sheet_name, index=False)
writer.save()
|
[
"pandas.DataFrame",
"pandas.ExcelFile",
"pandas.ExcelWriter"
] |
[((771, 794), 'pandas.ExcelFile', 'pd.ExcelFile', (['file_path'], {}), '(file_path)\n', (783, 794), True, 'import pandas as pd\n'), ((2249, 2293), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['file_path'], {'engine': '"""openpyxl"""'}), "(file_path, engine='openpyxl')\n", (2263, 2293), True, 'import pandas as pd\n'), ((2303, 2321), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (2315, 2321), True, 'import pandas as pd\n')]
|
"""WIP URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from complaint.views import show_complaints
from complaint.views import reject, signup
from django.contrib.auth import views as auth_views
from complaint.views import reject
from complaint.views import index
from complaint.views import resolved
from complaint.views import detail
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^show/$',show_complaints),
url(r'^reject/complaint/(\d{1,2})/$',reject),
url(r'^register/$',signup),
url(r'^$', auth_views.login, name='login'),
url(r'^logout/$', auth_views.logout, name='logout'),
url(r'^complaint/$',index),
url(r'^resolved/complaint/(\d{1,2})/$',resolved),
url(r'^complaint/(\d{1,2})/',detail),
]
|
[
"django.conf.urls.url"
] |
[((1004, 1035), 'django.conf.urls.url', 'url', (['"""^admin/"""', 'admin.site.urls'], {}), "('^admin/', admin.site.urls)\n", (1007, 1035), False, 'from django.conf.urls import url\n'), ((1042, 1073), 'django.conf.urls.url', 'url', (['"""^show/$"""', 'show_complaints'], {}), "('^show/$', show_complaints)\n", (1045, 1073), False, 'from django.conf.urls import url\n'), ((1079, 1124), 'django.conf.urls.url', 'url', (['"""^reject/complaint/(\\\\d{1,2})/$"""', 'reject'], {}), "('^reject/complaint/(\\\\d{1,2})/$', reject)\n", (1082, 1124), False, 'from django.conf.urls import url\n'), ((1129, 1155), 'django.conf.urls.url', 'url', (['"""^register/$"""', 'signup'], {}), "('^register/$', signup)\n", (1132, 1155), False, 'from django.conf.urls import url\n'), ((1161, 1202), 'django.conf.urls.url', 'url', (['"""^$"""', 'auth_views.login'], {'name': '"""login"""'}), "('^$', auth_views.login, name='login')\n", (1164, 1202), False, 'from django.conf.urls import url\n'), ((1209, 1259), 'django.conf.urls.url', 'url', (['"""^logout/$"""', 'auth_views.logout'], {'name': '"""logout"""'}), "('^logout/$', auth_views.logout, name='logout')\n", (1212, 1259), False, 'from django.conf.urls import url\n'), ((1266, 1292), 'django.conf.urls.url', 'url', (['"""^complaint/$"""', 'index'], {}), "('^complaint/$', index)\n", (1269, 1292), False, 'from django.conf.urls import url\n'), ((1298, 1347), 'django.conf.urls.url', 'url', (['"""^resolved/complaint/(\\\\d{1,2})/$"""', 'resolved'], {}), "('^resolved/complaint/(\\\\d{1,2})/$', resolved)\n", (1301, 1347), False, 'from django.conf.urls import url\n'), ((1352, 1389), 'django.conf.urls.url', 'url', (['"""^complaint/(\\\\d{1,2})/"""', 'detail'], {}), "('^complaint/(\\\\d{1,2})/', detail)\n", (1355, 1389), False, 'from django.conf.urls import url\n')]
|
from devserver.modules import DevServerModule
from devserver.utils.time import ms_from_timedelta
from devserver.settings import DEVSERVER_AUTO_PROFILE
from datetime import datetime
import functools
import gc
class ProfileSummaryModule(DevServerModule):
"""
Outputs a summary of cache events once a response is ready.
"""
logger_name = 'profile'
def process_init(self, request):
self.start = datetime.now()
def process_complete(self, request):
duration = datetime.now() - self.start
self.logger.info('Total time to render was %.2fs', ms_from_timedelta(duration) / 1000)
class LeftOversModule(DevServerModule):
"""
Outputs a summary of events the garbage collector couldn't handle.
"""
# TODO: Not even sure this is correct, but the its a general idea
logger_name = 'profile'
def process_init(self, request):
gc.enable()
gc.set_debug(gc.DEBUG_SAVEALL)
def process_complete(self, request):
gc.collect()
self.logger.info('%s objects left in garbage', len(gc.garbage))
from django.template.defaultfilters import filesizeformat
try:
from guppy import hpy
except ImportError:
import warnings
class MemoryUseModule(DevServerModule):
def __new__(cls, *args, **kwargs):
warnings.warn('MemoryUseModule requires guppy to be installed.')
return super(MemoryUseModule, cls).__new__(cls)
else:
class MemoryUseModule(DevServerModule):
"""
Outputs a summary of memory usage of the course of a request.
"""
logger_name = 'profile'
def __init__(self, request):
super(MemoryUseModule, self).__init__(request)
self.hpy = hpy()
self.oldh = self.hpy.heap()
self.logger.info('heap size is %s', filesizeformat(self.oldh.size))
def process_complete(self, request):
newh = self.hpy.heap()
alloch = newh - self.oldh
dealloch = self.oldh - newh
self.oldh = newh
self.logger.info('%s allocated, %s deallocated, heap size is %s', *map(filesizeformat, [alloch.size, dealloch.size, newh.size]))
try:
from line_profiler import LineProfiler
except ImportError:
import warnings
class LineProfilerModule(DevServerModule):
def __new__(cls, *args, **kwargs):
warnings.warn('LineProfilerModule requires line_profiler to be installed.')
return super(LineProfilerModule, cls).__new__(cls)
class devserver_profile(object):
def __init__(self, follow=[]):
pass
def __call__(self, func):
return func
else:
class LineProfilerModule(DevServerModule):
"""
Outputs a Line by Line profile of any @devserver_profile'd functions that were run
"""
logger_name = 'profile'
def process_view(self, request, view_func, view_args, view_kwargs):
request.devserver_profiler = LineProfiler()
request.devserver_profiler_run = False
if (DEVSERVER_AUTO_PROFILE):
_unwrap_closure_and_profile(request.devserver_profiler, view_func)
request.devserver_profiler.enable_by_count()
def process_complete(self, request):
if hasattr(request, 'devserver_profiler_run') and (DEVSERVER_AUTO_PROFILE or request.devserver_profiler_run):
from cStringIO import StringIO
out = StringIO()
if (DEVSERVER_AUTO_PROFILE):
request.devserver_profiler.disable_by_count()
request.devserver_profiler.print_stats(stream=out)
self.logger.info(out.getvalue())
def _unwrap_closure_and_profile(profiler, func):
if not hasattr(func, 'func_code'):
return
profiler.add_function(func)
if func.func_closure:
for cell in func.func_closure:
if hasattr(cell.cell_contents, 'func_code'):
_unwrap_closure_and_profile(profiler, cell.cell_contents)
class devserver_profile(object):
def __init__(self, follow=[]):
self.follow = follow
def __call__(self, func):
def profiled_func(*args, **kwargs):
request = args[0]
if hasattr(request, 'request'):
# We're decorating a Django class-based-view and the first argument is actually self:
request = args[1]
try:
request.devserver_profiler.add_function(func)
request.devserver_profiler_run = True
for f in self.follow:
request.devserver_profiler.add_function(f)
request.devserver_profiler.enable_by_count()
return func(*args, **kwargs)
finally:
request.devserver_profiler.disable_by_count()
return functools.wraps(func)(profiled_func)
|
[
"guppy.hpy",
"gc.enable",
"line_profiler.LineProfiler",
"cStringIO.StringIO",
"gc.collect",
"gc.set_debug",
"devserver.utils.time.ms_from_timedelta",
"functools.wraps",
"django.template.defaultfilters.filesizeformat",
"warnings.warn",
"datetime.datetime.now"
] |
[((425, 439), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (437, 439), False, 'from datetime import datetime\n'), ((899, 910), 'gc.enable', 'gc.enable', ([], {}), '()\n', (908, 910), False, 'import gc\n'), ((919, 949), 'gc.set_debug', 'gc.set_debug', (['gc.DEBUG_SAVEALL'], {}), '(gc.DEBUG_SAVEALL)\n', (931, 949), False, 'import gc\n'), ((1000, 1012), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1010, 1012), False, 'import gc\n'), ((501, 515), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (513, 515), False, 'from datetime import datetime\n'), ((1737, 1742), 'guppy.hpy', 'hpy', ([], {}), '()\n', (1740, 1742), False, 'from guppy import hpy\n'), ((3015, 3029), 'line_profiler.LineProfiler', 'LineProfiler', ([], {}), '()\n', (3027, 3029), False, 'from line_profiler import LineProfiler\n'), ((589, 616), 'devserver.utils.time.ms_from_timedelta', 'ms_from_timedelta', (['duration'], {}), '(duration)\n', (606, 616), False, 'from devserver.utils.time import ms_from_timedelta\n'), ((1316, 1380), 'warnings.warn', 'warnings.warn', (['"""MemoryUseModule requires guppy to be installed."""'], {}), "('MemoryUseModule requires guppy to be installed.')\n", (1329, 1380), False, 'import warnings\n'), ((1831, 1861), 'django.template.defaultfilters.filesizeformat', 'filesizeformat', (['self.oldh.size'], {}), '(self.oldh.size)\n', (1845, 1861), False, 'from django.template.defaultfilters import filesizeformat\n'), ((2385, 2460), 'warnings.warn', 'warnings.warn', (['"""LineProfilerModule requires line_profiler to be installed."""'], {}), "('LineProfilerModule requires line_profiler to be installed.')\n", (2398, 2460), False, 'import warnings\n'), ((3503, 3513), 'cStringIO.StringIO', 'StringIO', ([], {}), '()\n', (3511, 3513), False, 'from cStringIO import StringIO\n'), ((5004, 5025), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (5019, 5025), False, 'import functools\n')]
|
# -*- coding: utf-8 -*-
import hashlib
import json
import pycurl
from ..base.multi_account import MultiAccount
class LinkifierCom(MultiAccount):
__name__ = "LinkifierCom"
__type__ = "account"
__version__ = "0.01"
__status__ = "testing"
__pyload_version__ = "0.5"
__description__ = """Linkifier.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
__config__ = [
("mh_mode", "all;listed;unlisted", "Filter hosters to use", "all"),
("mh_list", "str", "Hoster list (comma separated)", ""),
("mh_interval", "int", "Reload interval in hours", 12),
]
API_KEY = "<KEY>"
API_URL = "https://api.linkifier.com/downloadapi.svc/"
def api_response(self, method, user, password, **kwargs):
post = {
"login": user,
"md5Pass": hashlib.md5(password.encode()).hexdigest(),
"apiKey": self.API_KEY,
}
post.update(kwargs)
self.req.http.c.setopt(
pycurl.HTTPHEADER, ["Content-Type: application/json; charset=utf-8"]
)
res = json.loads(self.load(self.API_URL + method, post=json.dumps(post)))
self.req.http.c.setopt(
pycurl.HTTPHEADER, ["Content-Type: text/html; charset=utf-8"]
)
return res
def grab_hosters(self, user, password, data):
json_data = self.api_response("hosters", user, password)
if json_data["hasErrors"]:
self.log_warning(json_data["ErrorMSG"] or "Unknown error")
return []
return [
x["hostername"]
for x in json_data["hosters"]
if x["hostername"] and x["isActive"]
]
def grab_info(self, user, password, data):
json_data = self.api_response("user", user, password)
trafficleft = json_data["extraTraffic"]
validuntil = float(json_data["expirydate"]) // 1000
return {
"validuntil": validuntil,
"trafficleft": -1
if trafficleft.lower() == "unlimited"
else int(trafficleft),
"premium": True,
}
def signin(self, user, password, data):
json_data = self.api_response("user", user, password)
if json_data.get("hasErrors", True) or not json_data.get("isActive", True):
self.log_warning(json_data["ErrorMSG"] or "Unknown error")
self.fail_login()
|
[
"json.dumps"
] |
[((1184, 1200), 'json.dumps', 'json.dumps', (['post'], {}), '(post)\n', (1194, 1200), False, 'import json\n')]
|
#This code is written for dynamic step-size. step size c0 gets smaller when it achieves the number 200.
#Author: <NAME>, Senior Research Fellow, University of Delhi
#Date: 5-07-2021
from math import *
import numpy as np
c0=50.0
for x in np.arange(c0,580,10):
t=10*(abs(200.1-c0)/200.1)*abs(np.log(0.3/abs(c0-200.1)))
y=1.0/(c0-200.0**2)**2
print(str(c0)+" "+str(y))
c0+=t
if c0> 198 and c0<202:
c0+=1
|
[
"numpy.arange"
] |
[((239, 261), 'numpy.arange', 'np.arange', (['c0', '(580)', '(10)'], {}), '(c0, 580, 10)\n', (248, 261), True, 'import numpy as np\n')]
|
import theano
import argparse
_floatX = theano.config.floatX
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1', 'y')
def get_args():
parser = argparse.ArgumentParser()
parser.register('type', 'bool', str2bool)
# Basics
parser.add_argument('--debug',
type='bool',
default=False,
help='whether it is debug mode')
parser.add_argument('--test_only',
type='bool',
default=False,
help='test_only: no need to run training process')
parser.add_argument('--random_seed',
type=int,
default=1013,
help='Random seed')
# Data file
parser.add_argument('--train_file',
type=str,
default=None,
help='Training file')
parser.add_argument('--dev_file',
type=str,
default=None,
help='Development file')
parser.add_argument('--pre_trained',
type=str,
default=None,
help='Pre-trained model.')
parser.add_argument('--model_file',
type=str,
default='model.pkl.gz',
help='Model file to save')
parser.add_argument('--log_file',
type=str,
default=None,
help='Log file')
parser.add_argument('--embedding_file',
type=str,
default=None,
help='Word embedding file')
parser.add_argument('--max_dev',
type=int,
default=None,
help='Maximum number of dev examples to evaluate on')
parser.add_argument('--relabeling',
type='bool',
default=True,
help='Whether to relabel the entities when loading the data')
# Model details
parser.add_argument('--embedding_size',
type=int,
default=None,
help='Default embedding size if embedding_file is not given')
parser.add_argument('--hidden_size',
type=int,
default=128,
help='Hidden size of RNN units')
parser.add_argument('--bidir',
type='bool',
default=True,
help='bidir: whether to use a bidirectional RNN')
parser.add_argument('--num_layers',
type=int,
default=1,
help='Number of RNN layers')
parser.add_argument('--rnn_type',
type=str,
default='gru',
help='RNN type: lstm or gru (default)')
parser.add_argument('--att_func',
type=str,
default='bilinear',
help='Attention function: bilinear (default) or mlp or avg or last or dot')
# Optimization details
parser.add_argument('--batch_size',
type=int,
default=32,
help='Batch size')
parser.add_argument('--num_epoches',
type=int,
default=100,
help='Number of epoches')
parser.add_argument('--eval_iter',
type=int,
default=100,
help='Evaluation on dev set after K updates')
parser.add_argument('--dropout_rate',
type=float,
default=0.2,
help='Dropout rate')
parser.add_argument('--optimizer',
type=str,
default='sgd',
help='Optimizer: sgd (default) or adam or rmsprop')
parser.add_argument('--learning_rate', '-lr',
type=float,
default=0.1,
help='Learning rate for SGD')
parser.add_argument('--grad_clipping',
type=float,
default=10.0,
help='Gradient clipping')
return parser.parse_args()
|
[
"argparse.ArgumentParser"
] |
[((169, 194), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (192, 194), False, 'import argparse\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-30 00:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('measure_mate', '0009_auto_20160124_1245'),
]
operations = [
migrations.AddField(
model_name='measurement',
name='target_rating',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='target_measurements', to='measure_mate.Rating'),
),
]
|
[
"django.db.models.ForeignKey"
] |
[((448, 605), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""target_measurements"""', 'to': '"""measure_mate.Rating"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='target_measurements', to=\n 'measure_mate.Rating')\n", (465, 605), False, 'from django.db import migrations, models\n')]
|
'''
Created on Aug 26, 2014
@author: preethi
'''
import os
import sys
import shutil
sys.path.insert(0,os.path.abspath(os.path.dirname(__file__) + '/' + '../..')) #trick to make it run from CLI
import unittest
import sqlalchemy
from sqlalchemy.orm import sessionmaker
import pydot
from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base
from jnpr.openclos.writer import WriterBase, ConfigWriter, CablingPlanWriter
from jnpr.openclos.util import configLocation
from jnpr.openclos.dao import Dao
from test_model import createPod, createPodDevice
from flexmock import flexmock
class TestWriterBase(unittest.TestCase):
def setUp(self):
self.conf = {}
self.conf['outputDir'] = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'out')
self.conf['dbUrl'] = 'sqlite:///'
self.conf['DOT'] = {'ranksep' : '5 equally', 'colors': ['red', 'green', 'blue']}
self.conf['deviceFamily'] = {
"QFX5100-24Q": {
"ports": 'et-0/0/[0-23]'
},
"QFX5100-48S": {
"uplinkPorts": 'et-0/0/[48-53]',
"downlinkPorts": 'xe-0/0/[0-47]'
}
}
self.dao = Dao(self.conf)
''' Deletes 'out' folder under test dir'''
shutil.rmtree(self.conf['outputDir'], ignore_errors=True)
def tearDown(self):
''' Deletes 'out' folder under test dir'''
shutil.rmtree(self.conf['outputDir'], ignore_errors=True)
class TestConfigWriter(TestWriterBase):
def testWrite(self):
pod = createPod('pod1', self.dao.Session())
device = Device('test_device', "",'admin', 'admin', 'spine', "", "", pod)
configWriter = ConfigWriter(self.conf, pod, self.dao)
configWriter.write(device, "dummy config")
self.assertTrue(os.path.exists(configWriter.outputDir + '/test_device.conf'))
class TestCablingPlanWriter(TestWriterBase):
def testInitWithTemplate(self):
from jinja2 import TemplateNotFound
pod = createPod('pod1', self.dao.Session())
cablingPlanWriter = CablingPlanWriter(self.conf, pod, self.dao)
self.assertIsNotNone(cablingPlanWriter.template)
with self.assertRaises(TemplateNotFound) as e:
cablingPlanWriter.templateEnv.get_template('unknown-template')
self.assertTrue('unknown-template' in e.exception.message)
def testCreateDeviceInGraph(self):
testDeviceTopology = pydot.Dot(graph_type='graph', )
pod = createPod('pod1', self.dao.Session())
cablingPlanWriter = CablingPlanWriter(self.conf, pod, self.dao)
device = createPodDevice(self.dao.Session(), 'Preethi', pod)
device.id = 'preethi-1'
cablingPlanWriter.createDeviceInGraph(device.name, device, testDeviceTopology)
path = cablingPlanWriter.outputDir + '/testDevicelabel.dot'
testDeviceTopology.write_raw(path)
data = open(path, 'r').read()
#check the generated label for device
self.assertTrue('"preethi-1" [shape=record, label=Preethi];' in data)
def testcreateLinksInGraph(self):
testLinksInTopology = pydot.Dot(graph_type='graph')
pod = createPod('pod1', self.dao.Session())
cablingPlanWriter = CablingPlanWriter(self.conf, pod, self.dao)
deviceOne = Device('spine01',"", 'admin', 'admin', 'spine', "", "", pod)
deviceOne.id = 'spine01'
IF1 = InterfaceDefinition('IF1', deviceOne, 'downlink')
IF1.id = 'IF1'
deviceTwo = Device('leaf01',"", 'admin', 'admin', 'leaf', "", "", pod)
deviceTwo.id = 'leaf01'
IF21 = InterfaceDefinition('IF1', deviceTwo, 'uplink')
IF21.id = 'IF21'
IF1.peer = IF21
IF21.peer = IF1
linkLabel = {deviceOne.id + ':' + IF1.id : deviceTwo.id + ':' + IF21.id}
cablingPlanWriter.createLinksInGraph(linkLabel, testLinksInTopology, 'red')
path = cablingPlanWriter.outputDir + '/testLinklabel.dot'
testLinksInTopology.write_raw(path)
data = open(path, 'r').read()
#check generated label for links
self.assertTrue('spine01:IF1 -- leaf01:IF21 [color=red];' in data)
def testcreateDOTFile(self):
# create pod
# create device
#create interface
session = self.dao.Session()
pod = createPod('pod1', session)
cablingPlanWriter = CablingPlanWriter(self.conf, pod, self.dao)
deviceOne = Device('spine01',"", 'admin', 'admin', 'spine', "", "", pod)
session.add(deviceOne)
IF1 = InterfaceDefinition('IF1', deviceOne, 'downlink')
session.add(IF1)
IF2 = InterfaceDefinition('IF2', deviceOne, 'downlink')
session.add(IF2)
deviceTwo = Device('leaf01',"", 'admin', 'admin', 'leaf', "", "", pod)
session.add(deviceTwo)
IF21 = InterfaceDefinition('IF1', deviceTwo, 'uplink')
session.add(IF21)
IF22 = InterfaceDefinition('IF2', deviceTwo, 'uplink')
session.add(IF22)
IF23 = InterfaceDefinition('IF3', deviceTwo, 'downlink')
session.add(IF23)
IF24 = InterfaceDefinition('IF3', deviceTwo, 'downlink')
session.add(IF24)
deviceThree = Device('Access01', "",'admin', 'admin', 'leaf', "", "", pod)
session.add(deviceThree)
IF31 = InterfaceDefinition('IF1', deviceThree, 'uplink')
session.add(IF31)
IF32 = InterfaceDefinition('IF2', deviceThree, 'uplink')
session.add(IF32)
IF1.peer = IF21
IF2.peer = IF22
IF21.peer = IF1
IF22.peer = IF2
IF23.peer = IF31
IF31.peer = IF23
IF24.peer = IF32
IF32.peer = IF24
session.commit()
devices = session.query(Device).all()
#check the DOT file is generated
cablingPlanWriter.writeDOT()
data = open(cablingPlanWriter.outputDir + '/cablingPlan.dot', 'r').read()
#check generated label for links
self.assertTrue('splines=polyline;' in data)
|
[
"os.path.abspath",
"jnpr.openclos.model.InterfaceDefinition",
"jnpr.openclos.writer.CablingPlanWriter",
"os.path.dirname",
"os.path.exists",
"pydot.Dot",
"jnpr.openclos.dao.Dao",
"test_model.createPod",
"jnpr.openclos.writer.ConfigWriter",
"shutil.rmtree",
"jnpr.openclos.model.Device"
] |
[((1228, 1242), 'jnpr.openclos.dao.Dao', 'Dao', (['self.conf'], {}), '(self.conf)\n', (1231, 1242), False, 'from jnpr.openclos.dao import Dao\n'), ((1302, 1359), 'shutil.rmtree', 'shutil.rmtree', (["self.conf['outputDir']"], {'ignore_errors': '(True)'}), "(self.conf['outputDir'], ignore_errors=True)\n", (1315, 1359), False, 'import shutil\n'), ((1444, 1501), 'shutil.rmtree', 'shutil.rmtree', (["self.conf['outputDir']"], {'ignore_errors': '(True)'}), "(self.conf['outputDir'], ignore_errors=True)\n", (1457, 1501), False, 'import shutil\n'), ((1646, 1711), 'jnpr.openclos.model.Device', 'Device', (['"""test_device"""', '""""""', '"""admin"""', '"""admin"""', '"""spine"""', '""""""', '""""""', 'pod'], {}), "('test_device', '', 'admin', 'admin', 'spine', '', '', pod)\n", (1652, 1711), False, 'from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base\n'), ((1735, 1773), 'jnpr.openclos.writer.ConfigWriter', 'ConfigWriter', (['self.conf', 'pod', 'self.dao'], {}), '(self.conf, pod, self.dao)\n', (1747, 1773), False, 'from jnpr.openclos.writer import WriterBase, ConfigWriter, CablingPlanWriter\n'), ((2122, 2165), 'jnpr.openclos.writer.CablingPlanWriter', 'CablingPlanWriter', (['self.conf', 'pod', 'self.dao'], {}), '(self.conf, pod, self.dao)\n', (2139, 2165), False, 'from jnpr.openclos.writer import WriterBase, ConfigWriter, CablingPlanWriter\n'), ((2497, 2526), 'pydot.Dot', 'pydot.Dot', ([], {'graph_type': '"""graph"""'}), "(graph_type='graph')\n", (2506, 2526), False, 'import pydot\n'), ((2609, 2652), 'jnpr.openclos.writer.CablingPlanWriter', 'CablingPlanWriter', (['self.conf', 'pod', 'self.dao'], {}), '(self.conf, pod, self.dao)\n', (2626, 2652), False, 'from jnpr.openclos.writer import WriterBase, ConfigWriter, CablingPlanWriter\n'), ((3183, 3212), 'pydot.Dot', 'pydot.Dot', ([], {'graph_type': '"""graph"""'}), "(graph_type='graph')\n", (3192, 3212), False, 'import pydot\n'), ((3293, 3336), 'jnpr.openclos.writer.CablingPlanWriter', 'CablingPlanWriter', (['self.conf', 'pod', 'self.dao'], {}), '(self.conf, pod, self.dao)\n', (3310, 3336), False, 'from jnpr.openclos.writer import WriterBase, ConfigWriter, CablingPlanWriter\n'), ((3357, 3418), 'jnpr.openclos.model.Device', 'Device', (['"""spine01"""', '""""""', '"""admin"""', '"""admin"""', '"""spine"""', '""""""', '""""""', 'pod'], {}), "('spine01', '', 'admin', 'admin', 'spine', '', '', pod)\n", (3363, 3418), False, 'from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base\n'), ((3466, 3515), 'jnpr.openclos.model.InterfaceDefinition', 'InterfaceDefinition', (['"""IF1"""', 'deviceOne', '"""downlink"""'], {}), "('IF1', deviceOne, 'downlink')\n", (3485, 3515), False, 'from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base\n'), ((3568, 3627), 'jnpr.openclos.model.Device', 'Device', (['"""leaf01"""', '""""""', '"""admin"""', '"""admin"""', '"""leaf"""', '""""""', '""""""', 'pod'], {}), "('leaf01', '', 'admin', 'admin', 'leaf', '', '', pod)\n", (3574, 3627), False, 'from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base\n'), ((3675, 3722), 'jnpr.openclos.model.InterfaceDefinition', 'InterfaceDefinition', (['"""IF1"""', 'deviceTwo', '"""uplink"""'], {}), "('IF1', deviceTwo, 'uplink')\n", (3694, 3722), False, 'from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base\n'), ((4399, 4425), 'test_model.createPod', 'createPod', (['"""pod1"""', 'session'], {}), "('pod1', session)\n", (4408, 4425), False, 'from test_model import createPod, createPodDevice\n'), ((4454, 4497), 'jnpr.openclos.writer.CablingPlanWriter', 'CablingPlanWriter', (['self.conf', 'pod', 'self.dao'], {}), '(self.conf, pod, self.dao)\n', (4471, 4497), False, 'from jnpr.openclos.writer import WriterBase, ConfigWriter, CablingPlanWriter\n'), ((4518, 4579), 'jnpr.openclos.model.Device', 'Device', (['"""spine01"""', '""""""', '"""admin"""', '"""admin"""', '"""spine"""', '""""""', '""""""', 'pod'], {}), "('spine01', '', 'admin', 'admin', 'spine', '', '', pod)\n", (4524, 4579), False, 'from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base\n'), ((4625, 4674), 'jnpr.openclos.model.InterfaceDefinition', 'InterfaceDefinition', (['"""IF1"""', 'deviceOne', '"""downlink"""'], {}), "('IF1', deviceOne, 'downlink')\n", (4644, 4674), False, 'from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base\n'), ((4714, 4763), 'jnpr.openclos.model.InterfaceDefinition', 'InterfaceDefinition', (['"""IF2"""', 'deviceOne', '"""downlink"""'], {}), "('IF2', deviceOne, 'downlink')\n", (4733, 4763), False, 'from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base\n'), ((4818, 4877), 'jnpr.openclos.model.Device', 'Device', (['"""leaf01"""', '""""""', '"""admin"""', '"""admin"""', '"""leaf"""', '""""""', '""""""', 'pod'], {}), "('leaf01', '', 'admin', 'admin', 'leaf', '', '', pod)\n", (4824, 4877), False, 'from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base\n'), ((4924, 4971), 'jnpr.openclos.model.InterfaceDefinition', 'InterfaceDefinition', (['"""IF1"""', 'deviceTwo', '"""uplink"""'], {}), "('IF1', deviceTwo, 'uplink')\n", (4943, 4971), False, 'from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base\n'), ((5013, 5060), 'jnpr.openclos.model.InterfaceDefinition', 'InterfaceDefinition', (['"""IF2"""', 'deviceTwo', '"""uplink"""'], {}), "('IF2', deviceTwo, 'uplink')\n", (5032, 5060), False, 'from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base\n'), ((5102, 5151), 'jnpr.openclos.model.InterfaceDefinition', 'InterfaceDefinition', (['"""IF3"""', 'deviceTwo', '"""downlink"""'], {}), "('IF3', deviceTwo, 'downlink')\n", (5121, 5151), False, 'from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base\n'), ((5193, 5242), 'jnpr.openclos.model.InterfaceDefinition', 'InterfaceDefinition', (['"""IF3"""', 'deviceTwo', '"""downlink"""'], {}), "('IF3', deviceTwo, 'downlink')\n", (5212, 5242), False, 'from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base\n'), ((5300, 5361), 'jnpr.openclos.model.Device', 'Device', (['"""Access01"""', '""""""', '"""admin"""', '"""admin"""', '"""leaf"""', '""""""', '""""""', 'pod'], {}), "('Access01', '', 'admin', 'admin', 'leaf', '', '', pod)\n", (5306, 5361), False, 'from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base\n'), ((5410, 5459), 'jnpr.openclos.model.InterfaceDefinition', 'InterfaceDefinition', (['"""IF1"""', 'deviceThree', '"""uplink"""'], {}), "('IF1', deviceThree, 'uplink')\n", (5429, 5459), False, 'from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base\n'), ((5501, 5550), 'jnpr.openclos.model.InterfaceDefinition', 'InterfaceDefinition', (['"""IF2"""', 'deviceThree', '"""uplink"""'], {}), "('IF2', deviceThree, 'uplink')\n", (5520, 5550), False, 'from jnpr.openclos.model import Pod, Device, InterfaceDefinition, InterfaceLogical, Interface, Base\n'), ((1849, 1909), 'os.path.exists', 'os.path.exists', (["(configWriter.outputDir + '/test_device.conf')"], {}), "(configWriter.outputDir + '/test_device.conf')\n", (1863, 1909), False, 'import os\n'), ((119, 144), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (134, 144), False, 'import os\n'), ((768, 793), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (783, 793), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
from flask import Flask, request, make_response
import requests
import json
from core import Translation, RequestJson, PBMT
from bean import log
app = Flask(__name__)
def buildResponse(code, msg):
json_data = dict()
json_data['code'] = code
json_data['message'] = msg
response = make_response(json.dumps(json_data, sort_keys=True))
response.headers['Content-type'] = 'application/json; charset=utf-8'
return response
'''
=================翻译=====================
method: POST
headers: Authorization: [your api key]
type: json
{
"text":[text],
"taget":[target language]
}
return: json
{
"code":[status code],
"message":[translation text]
}
'''
@app.route('/languages/api/translate', methods=['GET', 'POST'])
def translate():
ip = request.remote_addr
if request.method != 'POST':
return buildResponse(403, "Method Not Allowed. ")
else:
try:
token = request.headers['Authorization']
except Exception:
return buildResponse(403, "API key not valid. Please pass a valid API key. ")
tobj = Translation(token)
jsondict = request.get_json()
try:
rjson = RequestJson(**jsondict)
except Exception:
log.writelogs(token, ip, '[Failed] Required field error. ')
return buildResponse(400, "Required field error. ")
rlist = tobj.translate(text=rjson.text, target=rjson.target)
if rlist[0] == 200:
log.writelogs(token, ip, '[Succeed]')
else:
log.writelogs(token, ip, '[Failed] '+rlist[1])
return buildResponse(code=rlist[0], msg=rlist[1])
'''
=================日志=====================
method: GET
headers: Authorization: [your api key]
type: NULL
return: json
{
"code":[status code],
"message":[calling log]
}
'''
@app.route('/languages/api/logs', methods=['GET', 'POST'])
def getlog():
if request.method != 'GET':
return buildResponse(403, "Method Not Allowed. ")
else:
try:
token = request.headers['Authorization']
logs = log.getlogs(token)
if logs:
logs = [(str(lo[0]), lo[1], lo[2], lo[3]) for lo in logs]
return buildResponse(200, logs)
elif logs == []:
return buildResponse(200, [])
elif logs is None:
return buildResponse(403, "API key not valid. Please pass a valid API key. ")
except Exception:
return buildResponse(500, "Query log exception. ")
@app.route('/languages/support', methods=['GET', 'POST'])
def support_languages():
if request.method != 'GET':
return buildResponse(403, "Method Not Allowed. ")
else:
return buildResponse(200, PBMT)
if __name__ == '__main__':
app.run('0.0.0.0', 81, debug=True)
|
[
"flask.Flask",
"json.dumps",
"core.Translation",
"bean.log.writelogs",
"flask.request.get_json",
"bean.log.getlogs",
"core.RequestJson"
] |
[((177, 192), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (182, 192), False, 'from flask import Flask, request, make_response\n'), ((337, 374), 'json.dumps', 'json.dumps', (['json_data'], {'sort_keys': '(True)'}), '(json_data, sort_keys=True)\n', (347, 374), False, 'import json\n'), ((1287, 1305), 'core.Translation', 'Translation', (['token'], {}), '(token)\n', (1298, 1305), False, 'from core import Translation, RequestJson, PBMT\n'), ((1325, 1343), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1341, 1343), False, 'from flask import Flask, request, make_response\n'), ((1377, 1400), 'core.RequestJson', 'RequestJson', ([], {}), '(**jsondict)\n', (1388, 1400), False, 'from core import Translation, RequestJson, PBMT\n'), ((1672, 1709), 'bean.log.writelogs', 'log.writelogs', (['token', 'ip', '"""[Succeed]"""'], {}), "(token, ip, '[Succeed]')\n", (1685, 1709), False, 'from bean import log\n'), ((1736, 1784), 'bean.log.writelogs', 'log.writelogs', (['token', 'ip', "('[Failed] ' + rlist[1])"], {}), "(token, ip, '[Failed] ' + rlist[1])\n", (1749, 1784), False, 'from bean import log\n'), ((2386, 2404), 'bean.log.getlogs', 'log.getlogs', (['token'], {}), '(token)\n', (2397, 2404), False, 'from bean import log\n'), ((1439, 1498), 'bean.log.writelogs', 'log.writelogs', (['token', 'ip', '"""[Failed] Required field error. """'], {}), "(token, ip, '[Failed] Required field error. ')\n", (1452, 1498), False, 'from bean import log\n')]
|
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import time
import coord
import warnings
import treecorr
from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer
@timer
def test_dessv():
try:
import fitsio
except ImportError:
print('Skipping dessv test, since fitsio is not installed')
return
#treecorr.set_omp_threads(1);
get_from_wiki('des_sv.fits')
file_name = os.path.join('data','des_sv.fits')
cat = treecorr.Catalog(file_name, ra_col='ra', dec_col='dec', ra_units='deg', dec_units='deg')
# Use an odd number to make sure we force some of the shuffle bits in InitializeCenters
# to happen.
npatch = 43
field = cat.getNField(max_top=5)
t0 = time.time()
patches, cen = field.run_kmeans(npatch)
t1 = time.time()
print('patches = ',np.unique(patches))
assert len(patches) == cat.ntot
assert min(patches) == 0
assert max(patches) == npatch-1
# Check the returned center to a direct calculation.
xyz = np.array([cat.x, cat.y, cat.z]).T
direct_cen = np.array([xyz[patches==i].mean(axis=0) for i in range(npatch)])
direct_cen /= np.sqrt(np.sum(direct_cen**2,axis=1)[:,np.newaxis])
np.testing.assert_allclose(cen, direct_cen, atol=1.e-3)
# KMeans minimizes the total inertia.
# Check this value and the rms size, which should also be quite small.
inertia = np.array([np.sum((xyz[patches==i] - cen[i])**2) for i in range(npatch)])
sizes = np.array([np.mean((xyz[patches==i] - cen[i])**2) for i in range(npatch)])**0.5
sizes *= 180. / np.pi * 60. # convert to arcmin
counts = np.array([np.sum(patches==i) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
print('mean size = ',np.mean(sizes))
print('rms size = ',np.std(sizes))
assert np.sum(inertia) < 200. # This is specific to this particular field and npatch.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually < 0.2 * mean
assert np.std(sizes) < 0.1 * np.mean(sizes) # sizes have even less spread usually.
# Should all have similar number of points. Nothing is required here though.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
t0 = time.time()
patches, cen = field.run_kmeans(npatch, alt=True)
t1 = time.time()
assert len(patches) == cat.ntot
assert min(patches) == 0
assert max(patches) == npatch-1
inertia = np.array([np.sum((xyz[patches==i] - cen[i])**2) for i in range(npatch)])
sizes = np.array([np.mean((xyz[patches==i] - cen[i])**2) for i in range(npatch)])**0.5
sizes *= 180. / np.pi * 60. # convert to arcmin
counts = np.array([np.sum(patches==i) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
print('mean size = ',np.mean(sizes))
print('rms size = ',np.std(sizes))
assert np.sum(inertia) < 200. # Total shouldn't increase much. (And often decreases.)
assert np.std(inertia) < 0.15 * np.mean(inertia) # rms should be even smaller here.
assert np.std(sizes) < 0.1 * np.mean(sizes) # This is only a little bit smaller.
# This doesn't keep the counts as equal as the standard algorithm.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Finally, use a field with lots of top level cells to check the other branch in
# InitializeCenters.
field = cat.getNField(min_top=10)
t0 = time.time()
patches, cen = field.run_kmeans(npatch)
t1 = time.time()
assert len(patches) == cat.ntot
assert min(patches) == 0
assert max(patches) == npatch-1
inertia = np.array([np.sum((xyz[patches==i] - cen[i])**2) for i in range(npatch)])
sizes = np.array([np.mean((xyz[patches==i] - cen[i])**2) for i in range(npatch)])**0.5
sizes *= 180. / np.pi * 60. # convert to arcmin
counts = np.array([np.sum(patches==i) for i in range(npatch)])
# This doesn't give as good an initialization, so these are a bit worse usually.
print('With min_top=10:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
print('mean size = ',np.mean(sizes))
print('rms size = ',np.std(sizes))
assert np.sum(inertia) < 210.
assert np.std(inertia) < 0.4 * np.mean(inertia) # I've seen over 0.3 x mean here.
assert np.std(sizes) < 0.15 * np.mean(sizes)
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
@timer
def test_radec():
# Very similar to the above, but with a random set of points, so it will run even
# if the user doesn't have fitsio installed.
# In addition, we add weights to make sure that works.
ngal = 100000
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
print('minra = ',np.min(ra) * coord.radians / coord.degrees)
print('maxra = ',np.max(ra) * coord.radians / coord.degrees)
print('mindec = ',np.min(dec) * coord.radians / coord.degrees)
print('maxdec = ',np.max(dec) * coord.radians / coord.degrees)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w)
npatch = 111
field = cat.getNField()
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
# Check the returned center to a direct calculation.
xyz = np.array([cat.x, cat.y, cat.z]).T
direct_cen = np.array([np.average(xyz[p==i], axis=0, weights=w[p==i]) for i in range(npatch)])
direct_cen /= np.sqrt(np.sum(direct_cen**2,axis=1)[:,np.newaxis])
np.testing.assert_allclose(cen, direct_cen, atol=2.e-3)
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # This is specific to this particular field and npatch.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
# With weights, these aren't actually all that similar. The range is more than a
# factor of 10. I think because it varies whether high weight points happen to be near the
# edges or middles of patches, so the total weight varies when you target having the
# inertias be relatively similar.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
t0 = time.time()
p, cen = field.run_kmeans(npatch, alt=True)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # Total shouldn't increase much. (And often decreases.)
assert np.std(inertia) < 0.15 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Finally, use a field with lots of top level cells to check the other branch in
# InitializeCenters.
field = cat.getNField(min_top=10)
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
# This doesn't give as good an initialization, so these are a bit worse usually.
print('With min_top=10:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 210.
assert np.std(inertia) < 0.4 * np.mean(inertia) # I've seen over 0.3 x mean here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
@timer
def test_3d():
# Like the above, but using x,y,z positions.
ngal = 100000
s = 1.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal) + 1
cat = treecorr.Catalog(x=x, y=y, z=z, w=w)
npatch = 111
field = cat.getNField()
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
xyz = np.array([x, y, z]).T
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 33000.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Should be the same thing with ra, dec, ra
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
r = (x**2 + y**2 + z**2)**0.5
cat2 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', r=r, w=w)
field = cat2.getNField()
t0 = time.time()
p2, cen = field.run_kmeans(npatch)
t1 = time.time()
inertia = np.array([np.sum(w[p2==i][:,None] * (xyz[p2==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p2==i]) for i in range(npatch)])
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 33000.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
t0 = time.time()
p, cen = field.run_kmeans(npatch, alt=True)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 33000.
assert np.std(inertia) < 0.1 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Finally, use a field with lots of top level cells to check the other branch in
# InitializeCenters.
field = cat.getNField(min_top=10)
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
# This doesn't give as good an initialization, so these are a bit worse usually.
print('With min_top=10:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 33000.
assert np.std(inertia) < 0.4 * np.mean(inertia) # I've seen over 0.3 x mean here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
@timer
def test_2d():
# Like the above, but using x,y positions.
# An additional check here is that this works with other fields besides NField, even though
# in practice NField will alsmost always be the kind of Field used.
ngal = 100000
s = 1.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal) + 1
g1 = rng.normal(0,s, (ngal,) )
g2 = rng.normal(0,s, (ngal,) )
k = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y, w=w, g1=g1, g2=g2, k=k)
npatch = 111
field = cat.getGField()
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
xy = np.array([x, y]).T
inertia = np.array([np.sum(w[p==i][:,None] * (xy[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 5300.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
t0 = time.time()
p, cen = field.run_kmeans(npatch, alt=True)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xy[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 5300.
assert np.std(inertia) < 0.1 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Finally, use a field with lots of top level cells to check the other branch in
# InitializeCenters.
field = cat.getKField(min_top=10)
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xy[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
# This doesn't give as good an initialization, so these are a bit worse usually.
print('With min_top=10:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 5300.
assert np.std(inertia) < 0.4 * np.mean(inertia) # I've seen over 0.3 x mean here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
@timer
def test_init_random():
# Test the init=random option
ngal = 100000
s = 1.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
z = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y, z=z)
xyz = np.array([x, y, z]).T
# Skip the refine_centers step.
print('3d with init=random')
npatch = 10
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'random')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
# Use higher max_iter, since random isn't a great initialization.
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Use a field with lots of top level cells
print('3d with init=random, min_top=10')
field = cat.getNField(min_top=10)
cen1 = field.kmeans_initialize_centers(npatch, 'random')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Repeat in 2d
print('2d with init=random')
cat = treecorr.Catalog(x=x, y=y)
xy = np.array([x, y]).T
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'random')
assert cen1.shape == (npatch, 2)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xy[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000)
inertia2 = np.array([np.sum((xy[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Repeat in spherical
print('spher with init=random')
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad')
xyz = np.array([cat.x, cat.y, cat.z]).T
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'random')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
with assert_raises(ValueError):
field.run_kmeans(npatch, init='invalid')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch, init='invalid')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=ngal*2, init='random')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=ngal+1, init='random')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=0, init='random')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=-100, init='random')
# Should be valid to give npatch = 1, although not particularly useful.
cen_1 = field.kmeans_initialize_centers(npatch=1, init='random')
p_1 = field.kmeans_assign_patches(cen_1)
np.testing.assert_equal(p_1, np.zeros(ngal))
# If same number of patches as galaxies, each galaxy gets a patch.
# (This is stupid of course, but check that it doesn't fail.)
# Do this with fewer points though, since it's not particularly fast with N=10^5.
n = 100
cat = treecorr.Catalog(ra=ra[:n], dec=dec[:n], ra_units='rad', dec_units='rad')
field = cat.getNField()
cen_n = field.kmeans_initialize_centers(npatch=n, init='random')
p_n = field.kmeans_assign_patches(cen_n)
np.testing.assert_equal(sorted(p_n), list(range(n)))
@timer
def test_init_kmpp():
# Test the init=random option
ngal = 100000
s = 1.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
z = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y, z=z)
xyz = np.array([x, y, z]).T
# Skip the refine_centers step.
print('3d with init=kmeans++')
npatch = 10
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
# Use higher max_iter, since random isn't a great initialization.
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Use a field with lots of top level cells
print('3d with init=kmeans++, min_top=10')
field = cat.getNField(min_top=10)
cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Repeat in 2d
print('2d with init=kmeans++')
cat = treecorr.Catalog(x=x, y=y)
xy = np.array([x, y]).T
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
assert cen1.shape == (npatch, 2)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xy[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
inertia2 = np.array([np.sum((xy[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Repeat in spherical
print('spher with init=kmeans++')
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad')
xyz = np.array([cat.x, cat.y, cat.z]).T
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=ngal*2, init='kmeans++')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=ngal+1, init='kmeans++')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=0, init='kmeans++')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=-100, init='kmeans++')
# Should be valid to give npatch = 1, although not particularly useful.
cen_1 = field.kmeans_initialize_centers(npatch=1, init='kmeans++')
p_1 = field.kmeans_assign_patches(cen_1)
np.testing.assert_equal(p_1, np.zeros(ngal))
# If same number of patches as galaxies, each galaxy gets a patch.
# (This is stupid of course, but check that it doesn't fail.)
# Do this with fewer points though, since it's not particularly fast with N=10^5.
n = 100
cat = treecorr.Catalog(ra=ra[:n], dec=dec[:n], ra_units='rad', dec_units='rad')
field = cat.getNField()
cen_n = field.kmeans_initialize_centers(npatch=n, init='kmeans++')
p_n = field.kmeans_assign_patches(cen_n)
np.testing.assert_equal(sorted(p_n), list(range(n)))
@timer
def test_zero_weight():
# Based on test_ra_dec, but where many galaxies have w=0.
# There used to be a bug where w=0 objects were not assigned to any patch.
ngal = 10000
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
w = np.zeros(ngal)
w[np.random.choice(range(ngal), ngal//10, replace=False)] = 1.0
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
print('minra = ',np.min(ra) * coord.radians / coord.degrees)
print('maxra = ',np.max(ra) * coord.radians / coord.degrees)
print('mindec = ',np.min(dec) * coord.radians / coord.degrees)
print('maxdec = ',np.max(dec) * coord.radians / coord.degrees)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,
keep_zero_weight=True)
treecorr.set_omp_threads(1)
npatch = 16
field = cat.getNField()
t0 = time.time()
p, c = field.run_kmeans(npatch)
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
print('w>0 patches = ',np.unique(p[w>0]))
print('w==0 patches = ',np.unique(p[w==0]))
assert set(p[w>0]) == set(p[w==0])
@timer
def test_catalog_sphere():
# This follows the same path as test_radec, but using the Catalog API to run kmeans.
ngal = 100000
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec, r = coord.CelestialCoord.xyz_to_radec(x,y,z, return_r=True)
print('minra = ',np.min(ra) * coord.radians / coord.degrees)
print('maxra = ',np.max(ra) * coord.radians / coord.degrees)
print('mindec = ',np.min(dec) * coord.radians / coord.degrees)
print('maxdec = ',np.max(dec) * coord.radians / coord.degrees)
npatch = 111
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w, npatch=npatch)
t0 = time.time()
p = cat.patch
cen = cat.patch_centers
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
# Check the returned center to a direct calculation.
xyz = np.array([cat.x, cat.y, cat.z]).T
direct_cen = np.array([np.average(xyz[p==i], axis=0, weights=w[p==i]) for i in range(npatch)])
direct_cen /= np.sqrt(np.sum(direct_cen**2,axis=1)[:,np.newaxis])
np.testing.assert_allclose(cen, direct_cen, atol=2.e-3)
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # This is specific to this particular field and npatch.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
# With weights, these aren't actually all that similar. The range is more than a
# factor of 10. I think because it varies whether high weight points happen to be near the
# edges or middles of patches, so the total weight varies when you target having the
# inertias be relatively similar.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
cat2 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,
npatch=npatch, kmeans_alt=True)
t0 = time.time()
p = cat2.patch
cen = cat2.patch_centers
t1 = time.time()
assert len(p) == cat2.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # Total shouldn't increase much. (And often decreases.)
assert np.std(inertia) < 0.15 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check using patch_centers from (ra,dec) -> (ra,dec,r)
cat3 = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,
patch_centers=cat2.patch_centers)
np.testing.assert_array_equal(cat2.patch, cat3.patch)
np.testing.assert_array_equal(cat2.patch_centers, cat3.patch_centers)
@timer
def test_catalog_3d():
# With ra, dec, r, the Catalog API should only do patches using RA, Dec.
ngal = 100000
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec, r = coord.CelestialCoord.xyz_to_radec(x,y,z, return_r=True)
print('minra = ',np.min(ra) * coord.radians / coord.degrees)
print('maxra = ',np.max(ra) * coord.radians / coord.degrees)
print('mindec = ',np.min(dec) * coord.radians / coord.degrees)
print('maxdec = ',np.max(dec) * coord.radians / coord.degrees)
npatch = 111
cat = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,
npatch=npatch)
t0 = time.time()
p = cat.patch
cen = cat.patch_centers
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
# Check the returned center to a direct calculation.
xyz = np.array([cat.x/cat.r, cat.y/cat.r, cat.z/cat.r]).T
print('cen = ',cen)
print('xyz = ',xyz)
direct_cen = np.array([np.average(xyz[p==i], axis=0, weights=w[p==i]) for i in range(npatch)])
direct_cen /= np.sqrt(np.sum(direct_cen**2,axis=1)[:,np.newaxis])
np.testing.assert_allclose(cen, direct_cen, atol=2.e-3)
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # This is specific to this particular field and npatch.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
# With weights, these aren't actually all that similar. The range is more than a
# factor of 10. I think because it varies whether high weight points happen to be near the
# edges or middles of patches, so the total weight varies when you target having the
# inertias be relatively similar.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
cat2 = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,
npatch=npatch, kmeans_alt=True)
t0 = time.time()
p = cat2.patch
cen = cat2.patch_centers
t1 = time.time()
assert len(p) == cat2.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # Total shouldn't increase much. (And often decreases.)
assert np.std(inertia) < 0.15 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check using patch_centers from (ra,dec,r) -> (ra,dec)
cat3 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,
patch_centers=cat2.patch_centers)
np.testing.assert_array_equal(cat2.patch, cat3.patch)
np.testing.assert_array_equal(cat2.patch_centers, cat3.patch_centers)
if __name__ == '__main__':
test_dessv()
test_radec()
test_3d()
test_2d()
test_init_random()
test_init_kmpp()
test_zero_weight()
test_catalog_sphere()
test_catalog_3d()
|
[
"numpy.sum",
"test_helper.assert_raises",
"treecorr.Catalog",
"numpy.mean",
"test_helper.get_from_wiki",
"os.path.join",
"numpy.unique",
"numpy.std",
"numpy.random.RandomState",
"numpy.max",
"numpy.testing.assert_allclose",
"numpy.average",
"numpy.testing.assert_array_equal",
"treecorr.set_omp_threads",
"numpy.min",
"coord.CelestialCoord.xyz_to_radec",
"numpy.zeros",
"time.time",
"numpy.array"
] |
[((1012, 1040), 'test_helper.get_from_wiki', 'get_from_wiki', (['"""des_sv.fits"""'], {}), "('des_sv.fits')\n", (1025, 1040), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((1057, 1092), 'os.path.join', 'os.path.join', (['"""data"""', '"""des_sv.fits"""'], {}), "('data', 'des_sv.fits')\n", (1069, 1092), False, 'import os\n'), ((1102, 1194), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name'], {'ra_col': '"""ra"""', 'dec_col': '"""dec"""', 'ra_units': '"""deg"""', 'dec_units': '"""deg"""'}), "(file_name, ra_col='ra', dec_col='dec', ra_units='deg',\n dec_units='deg')\n", (1118, 1194), False, 'import treecorr\n'), ((1363, 1374), 'time.time', 'time.time', ([], {}), '()\n', (1372, 1374), False, 'import time\n'), ((1428, 1439), 'time.time', 'time.time', ([], {}), '()\n', (1437, 1439), False, 'import time\n'), ((1841, 1896), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cen', 'direct_cen'], {'atol': '(0.001)'}), '(cen, direct_cen, atol=0.001)\n', (1867, 1896), True, 'import numpy as np\n'), ((3145, 3156), 'time.time', 'time.time', ([], {}), '()\n', (3154, 3156), False, 'import time\n'), ((3220, 3231), 'time.time', 'time.time', ([], {}), '()\n', (3229, 3231), False, 'import time\n'), ((4539, 4550), 'time.time', 'time.time', ([], {}), '()\n', (4548, 4550), False, 'import time\n'), ((4604, 4615), 'time.time', 'time.time', ([], {}), '()\n', (4613, 4615), False, 'import time\n'), ((5936, 5966), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (5957, 5966), True, 'import numpy as np\n'), ((6176, 6218), 'coord.CelestialCoord.xyz_to_radec', 'coord.CelestialCoord.xyz_to_radec', (['x', 'y', 'z'], {}), '(x, y, z)\n', (6209, 6218), False, 'import coord\n'), ((6491, 6561), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w)\n", (6507, 6561), False, 'import treecorr\n'), ((6617, 6628), 'time.time', 'time.time', ([], {}), '()\n', (6626, 6628), False, 'import time\n'), ((6676, 6687), 'time.time', 'time.time', ([], {}), '()\n', (6685, 6687), False, 'import time\n'), ((7083, 7138), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cen', 'direct_cen'], {'atol': '(0.002)'}), '(cen, direct_cen, atol=0.002)\n', (7109, 7138), True, 'import numpy as np\n'), ((8255, 8266), 'time.time', 'time.time', ([], {}), '()\n', (8264, 8266), False, 'import time\n'), ((8324, 8335), 'time.time', 'time.time', ([], {}), '()\n', (8333, 8335), False, 'import time\n'), ((9252, 9263), 'time.time', 'time.time', ([], {}), '()\n', (9261, 9263), False, 'import time\n'), ((9311, 9322), 'time.time', 'time.time', ([], {}), '()\n', (9320, 9322), False, 'import time\n'), ((10211, 10241), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (10232, 10241), True, 'import numpy as np\n'), ((10390, 10426), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y', 'z': 'z', 'w': 'w'}), '(x=x, y=y, z=z, w=w)\n', (10406, 10426), False, 'import treecorr\n'), ((10482, 10493), 'time.time', 'time.time', ([], {}), '()\n', (10491, 10493), False, 'import time\n'), ((10541, 10552), 'time.time', 'time.time', ([], {}), '()\n', (10550, 10552), False, 'import time\n'), ((11442, 11484), 'coord.CelestialCoord.xyz_to_radec', 'coord.CelestialCoord.xyz_to_radec', (['x', 'y', 'z'], {}), '(x, y, z)\n', (11475, 11484), False, 'import coord\n'), ((11528, 11603), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'r': 'r', 'w': 'w'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad', r=r, w=w)\n", (11544, 11603), False, 'import treecorr\n'), ((11642, 11653), 'time.time', 'time.time', ([], {}), '()\n', (11651, 11653), False, 'import time\n'), ((11702, 11713), 'time.time', 'time.time', ([], {}), '()\n', (11711, 11713), False, 'import time\n'), ((12366, 12377), 'time.time', 'time.time', ([], {}), '()\n', (12375, 12377), False, 'import time\n'), ((12435, 12446), 'time.time', 'time.time', ([], {}), '()\n', (12444, 12446), False, 'import time\n'), ((13307, 13318), 'time.time', 'time.time', ([], {}), '()\n', (13316, 13318), False, 'import time\n'), ((13366, 13377), 'time.time', 'time.time', ([], {}), '()\n', (13375, 13377), False, 'import time\n'), ((14434, 14464), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (14455, 14464), True, 'import numpy as np\n'), ((14683, 14733), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y', 'w': 'w', 'g1': 'g1', 'g2': 'g2', 'k': 'k'}), '(x=x, y=y, w=w, g1=g1, g2=g2, k=k)\n', (14699, 14733), False, 'import treecorr\n'), ((14789, 14800), 'time.time', 'time.time', ([], {}), '()\n', (14798, 14800), False, 'import time\n'), ((14848, 14859), 'time.time', 'time.time', ([], {}), '()\n', (14857, 14859), False, 'import time\n'), ((15757, 15768), 'time.time', 'time.time', ([], {}), '()\n', (15766, 15768), False, 'import time\n'), ((15826, 15837), 'time.time', 'time.time', ([], {}), '()\n', (15835, 15837), False, 'import time\n'), ((16696, 16707), 'time.time', 'time.time', ([], {}), '()\n', (16705, 16707), False, 'import time\n'), ((16755, 16766), 'time.time', 'time.time', ([], {}), '()\n', (16764, 16766), False, 'import time\n'), ((17649, 17679), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (17670, 17679), True, 'import numpy as np\n'), ((17792, 17823), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y', 'z': 'z'}), '(x=x, y=y, z=z)\n', (17808, 17823), False, 'import treecorr\n'), ((20074, 20100), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (20090, 20100), False, 'import treecorr\n'), ((21152, 21194), 'coord.CelestialCoord.xyz_to_radec', 'coord.CelestialCoord.xyz_to_radec', (['x', 'y', 'z'], {}), '(x, y, z)\n', (21185, 21194), False, 'import coord\n'), ((21203, 21268), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad')\n", (21219, 21268), False, 'import treecorr\n'), ((23350, 23423), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra[:n]', 'dec': 'dec[:n]', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""'}), "(ra=ra[:n], dec=dec[:n], ra_units='rad', dec_units='rad')\n", (23366, 23423), False, 'import treecorr\n'), ((23728, 23758), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (23749, 23758), True, 'import numpy as np\n'), ((23871, 23902), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y', 'z': 'z'}), '(x=x, y=y, z=z)\n', (23887, 23902), False, 'import treecorr\n'), ((26167, 26193), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (26183, 26193), False, 'import treecorr\n'), ((27251, 27293), 'coord.CelestialCoord.xyz_to_radec', 'coord.CelestialCoord.xyz_to_radec', (['x', 'y', 'z'], {}), '(x, y, z)\n', (27284, 27293), False, 'import coord\n'), ((27302, 27367), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad')\n", (27318, 27367), False, 'import treecorr\n'), ((29278, 29351), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra[:n]', 'dec': 'dec[:n]', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""'}), "(ra=ra[:n], dec=dec[:n], ra_units='rad', dec_units='rad')\n", (29294, 29351), False, 'import treecorr\n'), ((29767, 29797), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (29788, 29797), True, 'import numpy as np\n'), ((29969, 29983), 'numpy.zeros', 'np.zeros', (['ngal'], {}), '(ngal)\n', (29977, 29983), True, 'import numpy as np\n'), ((30066, 30108), 'coord.CelestialCoord.xyz_to_radec', 'coord.CelestialCoord.xyz_to_radec', (['x', 'y', 'z'], {}), '(x, y, z)\n', (30099, 30108), False, 'import coord\n'), ((30381, 30478), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w', 'keep_zero_weight': '(True)'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,\n keep_zero_weight=True)\n", (30397, 30478), False, 'import treecorr\n'), ((30506, 30533), 'treecorr.set_omp_threads', 'treecorr.set_omp_threads', (['(1)'], {}), '(1)\n', (30530, 30533), False, 'import treecorr\n'), ((30588, 30599), 'time.time', 'time.time', ([], {}), '()\n', (30597, 30599), False, 'import time\n'), ((30645, 30656), 'time.time', 'time.time', ([], {}), '()\n', (30654, 30656), False, 'import time\n'), ((31075, 31105), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (31096, 31105), True, 'import numpy as np\n'), ((31318, 31375), 'coord.CelestialCoord.xyz_to_radec', 'coord.CelestialCoord.xyz_to_radec', (['x', 'y', 'z'], {'return_r': '(True)'}), '(x, y, z, return_r=True)\n', (31351, 31375), False, 'import coord\n'), ((31665, 31754), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w', 'npatch': 'npatch'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,\n npatch=npatch)\n", (31681, 31754), False, 'import treecorr\n'), ((31761, 31772), 'time.time', 'time.time', ([], {}), '()\n', (31770, 31772), False, 'import time\n'), ((31828, 31839), 'time.time', 'time.time', ([], {}), '()\n', (31837, 31839), False, 'import time\n'), ((32235, 32290), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cen', 'direct_cen'], {'atol': '(0.002)'}), '(cen, direct_cen, atol=0.002)\n', (32261, 32290), True, 'import numpy as np\n'), ((33409, 33515), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w', 'npatch': 'npatch', 'kmeans_alt': '(True)'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,\n npatch=npatch, kmeans_alt=True)\n", (33425, 33515), False, 'import treecorr\n'), ((33549, 33560), 'time.time', 'time.time', ([], {}), '()\n', (33558, 33560), False, 'import time\n'), ((33618, 33629), 'time.time', 'time.time', ([], {}), '()\n', (33627, 33629), False, 'import time\n'), ((34461, 34574), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'r': 'r', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w', 'patch_centers': 'cat2.patch_centers'}), "(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,\n patch_centers=cat2.patch_centers)\n", (34477, 34574), False, 'import treecorr\n'), ((34603, 34656), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['cat2.patch', 'cat3.patch'], {}), '(cat2.patch, cat3.patch)\n', (34632, 34656), True, 'import numpy as np\n'), ((34661, 34730), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['cat2.patch_centers', 'cat3.patch_centers'], {}), '(cat2.patch_centers, cat3.patch_centers)\n', (34690, 34730), True, 'import numpy as np\n'), ((34881, 34911), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (34902, 34911), True, 'import numpy as np\n'), ((35124, 35181), 'coord.CelestialCoord.xyz_to_radec', 'coord.CelestialCoord.xyz_to_radec', (['x', 'y', 'z'], {'return_r': '(True)'}), '(x, y, z, return_r=True)\n', (35157, 35181), False, 'import coord\n'), ((35471, 35565), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'r': 'r', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w', 'npatch': 'npatch'}), "(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,\n npatch=npatch)\n", (35487, 35565), False, 'import treecorr\n'), ((35599, 35610), 'time.time', 'time.time', ([], {}), '()\n', (35608, 35610), False, 'import time\n'), ((35666, 35677), 'time.time', 'time.time', ([], {}), '()\n', (35675, 35677), False, 'import time\n'), ((36139, 36194), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cen', 'direct_cen'], {'atol': '(0.002)'}), '(cen, direct_cen, atol=0.002)\n', (36165, 36194), True, 'import numpy as np\n'), ((37313, 37424), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'r': 'r', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w', 'npatch': 'npatch', 'kmeans_alt': '(True)'}), "(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,\n npatch=npatch, kmeans_alt=True)\n", (37329, 37424), False, 'import treecorr\n'), ((37458, 37469), 'time.time', 'time.time', ([], {}), '()\n', (37467, 37469), False, 'import time\n'), ((37527, 37538), 'time.time', 'time.time', ([], {}), '()\n', (37536, 37538), False, 'import time\n'), ((38370, 38478), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w', 'patch_centers': 'cat2.patch_centers'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,\n patch_centers=cat2.patch_centers)\n", (38386, 38478), False, 'import treecorr\n'), ((38507, 38560), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['cat2.patch', 'cat3.patch'], {}), '(cat2.patch, cat3.patch)\n', (38536, 38560), True, 'import numpy as np\n'), ((38565, 38634), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['cat2.patch_centers', 'cat3.patch_centers'], {}), '(cat2.patch_centers, cat3.patch_centers)\n', (38594, 38634), True, 'import numpy as np\n'), ((1463, 1481), 'numpy.unique', 'np.unique', (['patches'], {}), '(patches)\n', (1472, 1481), True, 'import numpy as np\n'), ((1652, 1683), 'numpy.array', 'np.array', (['[cat.x, cat.y, cat.z]'], {}), '([cat.x, cat.y, cat.z])\n', (1660, 1683), True, 'import numpy as np\n'), ((2408, 2423), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (2414, 2423), True, 'import numpy as np\n'), ((2453, 2469), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (2460, 2469), True, 'import numpy as np\n'), ((2498, 2513), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (2504, 2513), True, 'import numpy as np\n'), ((2540, 2554), 'numpy.mean', 'np.mean', (['sizes'], {}), '(sizes)\n', (2547, 2554), True, 'import numpy as np\n'), ((2580, 2593), 'numpy.std', 'np.std', (['sizes'], {}), '(sizes)\n', (2586, 2593), True, 'import numpy as np\n'), ((2606, 2621), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (2612, 2621), True, 'import numpy as np\n'), ((2697, 2712), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (2703, 2712), True, 'import numpy as np\n'), ((2780, 2793), 'numpy.std', 'np.std', (['sizes'], {}), '(sizes)\n', (2786, 2793), True, 'import numpy as np\n'), ((2967, 2982), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (2974, 2982), True, 'import numpy as np\n'), ((3010, 3024), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (3016, 3024), True, 'import numpy as np\n'), ((3052, 3066), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (3058, 3066), True, 'import numpy as np\n'), ((3728, 3743), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (3734, 3743), True, 'import numpy as np\n'), ((3773, 3789), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (3780, 3789), True, 'import numpy as np\n'), ((3818, 3833), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (3824, 3833), True, 'import numpy as np\n'), ((3860, 3874), 'numpy.mean', 'np.mean', (['sizes'], {}), '(sizes)\n', (3867, 3874), True, 'import numpy as np\n'), ((3900, 3913), 'numpy.std', 'np.std', (['sizes'], {}), '(sizes)\n', (3906, 3913), True, 'import numpy as np\n'), ((3926, 3941), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (3932, 3941), True, 'import numpy as np\n'), ((4017, 4032), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (4023, 4032), True, 'import numpy as np\n'), ((4106, 4119), 'numpy.std', 'np.std', (['sizes'], {}), '(sizes)\n', (4112, 4119), True, 'import numpy as np\n'), ((4280, 4295), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (4287, 4295), True, 'import numpy as np\n'), ((4323, 4337), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (4329, 4337), True, 'import numpy as np\n'), ((4365, 4379), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (4371, 4379), True, 'import numpy as np\n'), ((5188, 5203), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (5194, 5203), True, 'import numpy as np\n'), ((5233, 5249), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (5240, 5249), True, 'import numpy as np\n'), ((5278, 5293), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (5284, 5293), True, 'import numpy as np\n'), ((5320, 5334), 'numpy.mean', 'np.mean', (['sizes'], {}), '(sizes)\n', (5327, 5334), True, 'import numpy as np\n'), ((5360, 5373), 'numpy.std', 'np.std', (['sizes'], {}), '(sizes)\n', (5366, 5373), True, 'import numpy as np\n'), ((5386, 5401), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (5392, 5401), True, 'import numpy as np\n'), ((5420, 5435), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (5426, 5435), True, 'import numpy as np\n'), ((5507, 5520), 'numpy.std', 'np.std', (['sizes'], {}), '(sizes)\n', (5513, 5520), True, 'import numpy as np\n'), ((5572, 5587), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (5579, 5587), True, 'import numpy as np\n'), ((5615, 5629), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (5621, 5629), True, 'import numpy as np\n'), ((5657, 5671), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (5663, 5671), True, 'import numpy as np\n'), ((6711, 6723), 'numpy.unique', 'np.unique', (['p'], {}), '(p)\n', (6720, 6723), True, 'import numpy as np\n'), ((6876, 6907), 'numpy.array', 'np.array', (['[cat.x, cat.y, cat.z]'], {}), '([cat.x, cat.y, cat.z])\n', (6884, 6907), True, 'import numpy as np\n'), ((7460, 7475), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (7466, 7475), True, 'import numpy as np\n'), ((7505, 7521), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (7512, 7521), True, 'import numpy as np\n'), ((7550, 7565), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (7556, 7565), True, 'import numpy as np\n'), ((7578, 7593), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (7584, 7593), True, 'import numpy as np\n'), ((7669, 7684), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (7675, 7684), True, 'import numpy as np\n'), ((8077, 8092), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (8084, 8092), True, 'import numpy as np\n'), ((8120, 8134), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (8126, 8134), True, 'import numpy as np\n'), ((8162, 8176), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (8168, 8176), True, 'import numpy as np\n'), ((8679, 8694), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (8685, 8694), True, 'import numpy as np\n'), ((8724, 8740), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (8731, 8740), True, 'import numpy as np\n'), ((8769, 8784), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (8775, 8784), True, 'import numpy as np\n'), ((8797, 8812), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (8803, 8812), True, 'import numpy as np\n'), ((8888, 8903), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (8894, 8903), True, 'import numpy as np\n'), ((8993, 9008), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (9000, 9008), True, 'import numpy as np\n'), ((9036, 9050), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (9042, 9050), True, 'import numpy as np\n'), ((9078, 9092), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (9084, 9092), True, 'import numpy as np\n'), ((9742, 9757), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (9748, 9757), True, 'import numpy as np\n'), ((9787, 9803), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (9794, 9803), True, 'import numpy as np\n'), ((9832, 9847), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (9838, 9847), True, 'import numpy as np\n'), ((9860, 9875), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (9866, 9875), True, 'import numpy as np\n'), ((9894, 9909), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (9900, 9909), True, 'import numpy as np\n'), ((9997, 10012), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (10004, 10012), True, 'import numpy as np\n'), ((10040, 10054), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (10046, 10054), True, 'import numpy as np\n'), ((10082, 10096), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (10088, 10096), True, 'import numpy as np\n'), ((10576, 10588), 'numpy.unique', 'np.unique', (['p'], {}), '(p)\n', (10585, 10588), True, 'import numpy as np\n'), ((10684, 10703), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (10692, 10703), True, 'import numpy as np\n'), ((11026, 11041), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (11032, 11041), True, 'import numpy as np\n'), ((11071, 11087), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (11078, 11087), True, 'import numpy as np\n'), ((11116, 11131), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (11122, 11131), True, 'import numpy as np\n'), ((11144, 11159), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (11150, 11159), True, 'import numpy as np\n'), ((11180, 11195), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (11186, 11195), True, 'import numpy as np\n'), ((11278, 11293), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (11285, 11293), True, 'import numpy as np\n'), ((11321, 11335), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (11327, 11335), True, 'import numpy as np\n'), ((11363, 11377), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (11369, 11377), True, 'import numpy as np\n'), ((11936, 11951), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (11942, 11951), True, 'import numpy as np\n'), ((11981, 11997), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (11988, 11997), True, 'import numpy as np\n'), ((12026, 12041), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (12032, 12041), True, 'import numpy as np\n'), ((12054, 12069), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (12060, 12069), True, 'import numpy as np\n'), ((12090, 12105), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (12096, 12105), True, 'import numpy as np\n'), ((12188, 12203), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (12195, 12203), True, 'import numpy as np\n'), ((12231, 12245), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (12237, 12245), True, 'import numpy as np\n'), ((12273, 12287), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (12279, 12287), True, 'import numpy as np\n'), ((12790, 12805), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (12796, 12805), True, 'import numpy as np\n'), ((12835, 12851), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (12842, 12851), True, 'import numpy as np\n'), ((12880, 12895), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (12886, 12895), True, 'import numpy as np\n'), ((12908, 12923), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (12914, 12923), True, 'import numpy as np\n'), ((12944, 12959), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (12950, 12959), True, 'import numpy as np\n'), ((13048, 13063), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (13055, 13063), True, 'import numpy as np\n'), ((13091, 13105), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (13097, 13105), True, 'import numpy as np\n'), ((13133, 13147), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (13139, 13147), True, 'import numpy as np\n'), ((13797, 13812), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (13803, 13812), True, 'import numpy as np\n'), ((13842, 13858), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (13849, 13858), True, 'import numpy as np\n'), ((13887, 13902), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (13893, 13902), True, 'import numpy as np\n'), ((13915, 13930), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (13921, 13930), True, 'import numpy as np\n'), ((13951, 13966), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (13957, 13966), True, 'import numpy as np\n'), ((14054, 14069), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (14061, 14069), True, 'import numpy as np\n'), ((14097, 14111), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (14103, 14111), True, 'import numpy as np\n'), ((14139, 14153), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (14145, 14153), True, 'import numpy as np\n'), ((14883, 14895), 'numpy.unique', 'np.unique', (['p'], {}), '(p)\n', (14892, 14895), True, 'import numpy as np\n'), ((14990, 15006), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (14998, 15006), True, 'import numpy as np\n'), ((15328, 15343), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (15334, 15343), True, 'import numpy as np\n'), ((15373, 15389), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (15380, 15389), True, 'import numpy as np\n'), ((15418, 15433), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (15424, 15433), True, 'import numpy as np\n'), ((15446, 15461), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (15452, 15461), True, 'import numpy as np\n'), ((15481, 15496), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (15487, 15496), True, 'import numpy as np\n'), ((15579, 15594), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (15586, 15594), True, 'import numpy as np\n'), ((15622, 15636), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (15628, 15636), True, 'import numpy as np\n'), ((15664, 15678), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (15670, 15678), True, 'import numpy as np\n'), ((16180, 16195), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (16186, 16195), True, 'import numpy as np\n'), ((16225, 16241), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (16232, 16241), True, 'import numpy as np\n'), ((16270, 16285), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (16276, 16285), True, 'import numpy as np\n'), ((16298, 16313), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (16304, 16313), True, 'import numpy as np\n'), ((16333, 16348), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (16339, 16348), True, 'import numpy as np\n'), ((16437, 16452), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (16444, 16452), True, 'import numpy as np\n'), ((16480, 16494), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (16486, 16494), True, 'import numpy as np\n'), ((16522, 16536), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (16528, 16536), True, 'import numpy as np\n'), ((17185, 17200), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (17191, 17200), True, 'import numpy as np\n'), ((17230, 17246), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (17237, 17246), True, 'import numpy as np\n'), ((17275, 17290), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (17281, 17290), True, 'import numpy as np\n'), ((17303, 17318), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (17309, 17318), True, 'import numpy as np\n'), ((17338, 17353), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (17344, 17353), True, 'import numpy as np\n'), ((17441, 17456), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (17448, 17456), True, 'import numpy as np\n'), ((17484, 17498), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (17490, 17498), True, 'import numpy as np\n'), ((17526, 17540), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (17532, 17540), True, 'import numpy as np\n'), ((17834, 17853), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (17842, 17853), True, 'import numpy as np\n'), ((18134, 18147), 'numpy.unique', 'np.unique', (['p1'], {}), '(p1)\n', (18143, 18147), True, 'import numpy as np\n'), ((18440, 18455), 'numpy.std', 'np.std', (['counts1'], {}), '(counts1)\n', (18446, 18455), True, 'import numpy as np\n'), ((18486, 18502), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (18492, 18502), True, 'import numpy as np\n'), ((18848, 18863), 'numpy.std', 'np.std', (['counts2'], {}), '(counts2)\n', (18854, 18863), True, 'import numpy as np\n'), ((18895, 18911), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (18901, 18911), True, 'import numpy as np\n'), ((18924, 18940), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (18930, 18940), True, 'import numpy as np\n'), ((18943, 18959), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (18949, 18959), True, 'import numpy as np\n'), ((19255, 19268), 'numpy.unique', 'np.unique', (['p1'], {}), '(p1)\n', (19264, 19268), True, 'import numpy as np\n'), ((19561, 19576), 'numpy.std', 'np.std', (['counts1'], {}), '(counts1)\n', (19567, 19576), True, 'import numpy as np\n'), ((19607, 19623), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (19613, 19623), True, 'import numpy as np\n'), ((19899, 19914), 'numpy.std', 'np.std', (['counts2'], {}), '(counts2)\n', (19905, 19914), True, 'import numpy as np\n'), ((19946, 19962), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (19952, 19962), True, 'import numpy as np\n'), ((19975, 19991), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (19981, 19991), True, 'import numpy as np\n'), ((19994, 20010), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (20000, 20010), True, 'import numpy as np\n'), ((20110, 20126), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (20118, 20126), True, 'import numpy as np\n'), ((20321, 20334), 'numpy.unique', 'np.unique', (['p1'], {}), '(p1)\n', (20330, 20334), True, 'import numpy as np\n'), ((20626, 20641), 'numpy.std', 'np.std', (['counts1'], {}), '(counts1)\n', (20632, 20641), True, 'import numpy as np\n'), ((20672, 20688), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (20678, 20688), True, 'import numpy as np\n'), ((20963, 20978), 'numpy.std', 'np.std', (['counts2'], {}), '(counts2)\n', (20969, 20978), True, 'import numpy as np\n'), ((21010, 21026), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (21016, 21026), True, 'import numpy as np\n'), ((21039, 21055), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (21045, 21055), True, 'import numpy as np\n'), ((21058, 21074), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (21064, 21074), True, 'import numpy as np\n'), ((21279, 21310), 'numpy.array', 'np.array', (['[cat.x, cat.y, cat.z]'], {}), '([cat.x, cat.y, cat.z])\n', (21287, 21310), True, 'import numpy as np\n'), ((21505, 21518), 'numpy.unique', 'np.unique', (['p1'], {}), '(p1)\n', (21514, 21518), True, 'import numpy as np\n'), ((21811, 21826), 'numpy.std', 'np.std', (['counts1'], {}), '(counts1)\n', (21817, 21826), True, 'import numpy as np\n'), ((21857, 21873), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (21863, 21873), True, 'import numpy as np\n'), ((22149, 22164), 'numpy.std', 'np.std', (['counts2'], {}), '(counts2)\n', (22155, 22164), True, 'import numpy as np\n'), ((22196, 22212), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (22202, 22212), True, 'import numpy as np\n'), ((22225, 22241), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (22231, 22241), True, 'import numpy as np\n'), ((22244, 22260), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (22250, 22260), True, 'import numpy as np\n'), ((22271, 22296), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (22284, 22296), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((22356, 22381), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (22369, 22381), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((22456, 22481), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (22469, 22481), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((22562, 22587), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (22575, 22587), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((22668, 22693), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (22681, 22693), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((22769, 22794), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (22782, 22794), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((23088, 23102), 'numpy.zeros', 'np.zeros', (['ngal'], {}), '(ngal)\n', (23096, 23102), True, 'import numpy as np\n'), ((23913, 23932), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (23921, 23932), True, 'import numpy as np\n'), ((24217, 24230), 'numpy.unique', 'np.unique', (['p1'], {}), '(p1)\n', (24226, 24230), True, 'import numpy as np\n'), ((24523, 24538), 'numpy.std', 'np.std', (['counts1'], {}), '(counts1)\n', (24529, 24538), True, 'import numpy as np\n'), ((24569, 24585), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (24575, 24585), True, 'import numpy as np\n'), ((24933, 24948), 'numpy.std', 'np.std', (['counts2'], {}), '(counts2)\n', (24939, 24948), True, 'import numpy as np\n'), ((24980, 24996), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (24986, 24996), True, 'import numpy as np\n'), ((25009, 25025), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (25015, 25025), True, 'import numpy as np\n'), ((25028, 25044), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (25034, 25044), True, 'import numpy as np\n'), ((25344, 25357), 'numpy.unique', 'np.unique', (['p1'], {}), '(p1)\n', (25353, 25357), True, 'import numpy as np\n'), ((25650, 25665), 'numpy.std', 'np.std', (['counts1'], {}), '(counts1)\n', (25656, 25665), True, 'import numpy as np\n'), ((25696, 25712), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (25702, 25712), True, 'import numpy as np\n'), ((25990, 26005), 'numpy.std', 'np.std', (['counts2'], {}), '(counts2)\n', (25996, 26005), True, 'import numpy as np\n'), ((26037, 26053), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (26043, 26053), True, 'import numpy as np\n'), ((26066, 26082), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (26072, 26082), True, 'import numpy as np\n'), ((26085, 26101), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (26091, 26101), True, 'import numpy as np\n'), ((26203, 26219), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (26211, 26219), True, 'import numpy as np\n'), ((26416, 26429), 'numpy.unique', 'np.unique', (['p1'], {}), '(p1)\n', (26425, 26429), True, 'import numpy as np\n'), ((26721, 26736), 'numpy.std', 'np.std', (['counts1'], {}), '(counts1)\n', (26727, 26736), True, 'import numpy as np\n'), ((26767, 26783), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (26773, 26783), True, 'import numpy as np\n'), ((27060, 27075), 'numpy.std', 'np.std', (['counts2'], {}), '(counts2)\n', (27066, 27075), True, 'import numpy as np\n'), ((27107, 27123), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (27113, 27123), True, 'import numpy as np\n'), ((27136, 27152), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (27142, 27152), True, 'import numpy as np\n'), ((27155, 27171), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (27161, 27171), True, 'import numpy as np\n'), ((27378, 27409), 'numpy.array', 'np.array', (['[cat.x, cat.y, cat.z]'], {}), '([cat.x, cat.y, cat.z])\n', (27386, 27409), True, 'import numpy as np\n'), ((27606, 27619), 'numpy.unique', 'np.unique', (['p1'], {}), '(p1)\n', (27615, 27619), True, 'import numpy as np\n'), ((27912, 27927), 'numpy.std', 'np.std', (['counts1'], {}), '(counts1)\n', (27918, 27927), True, 'import numpy as np\n'), ((27958, 27974), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (27964, 27974), True, 'import numpy as np\n'), ((28252, 28267), 'numpy.std', 'np.std', (['counts2'], {}), '(counts2)\n', (28258, 28267), True, 'import numpy as np\n'), ((28299, 28315), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (28305, 28315), True, 'import numpy as np\n'), ((28328, 28344), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (28334, 28344), True, 'import numpy as np\n'), ((28347, 28363), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (28353, 28363), True, 'import numpy as np\n'), ((28374, 28399), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (28387, 28399), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((28482, 28507), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (28495, 28507), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((28590, 28615), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (28603, 28615), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((28693, 28718), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (28706, 28718), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((29016, 29030), 'numpy.zeros', 'np.zeros', (['ngal'], {}), '(ngal)\n', (29024, 29030), True, 'import numpy as np\n'), ((30680, 30692), 'numpy.unique', 'np.unique', (['p'], {}), '(p)\n', (30689, 30692), True, 'import numpy as np\n'), ((30804, 30823), 'numpy.unique', 'np.unique', (['p[w > 0]'], {}), '(p[w > 0])\n', (30813, 30823), True, 'import numpy as np\n'), ((30851, 30871), 'numpy.unique', 'np.unique', (['p[w == 0]'], {}), '(p[w == 0])\n', (30860, 30871), True, 'import numpy as np\n'), ((31863, 31875), 'numpy.unique', 'np.unique', (['p'], {}), '(p)\n', (31872, 31875), True, 'import numpy as np\n'), ((32028, 32059), 'numpy.array', 'np.array', (['[cat.x, cat.y, cat.z]'], {}), '([cat.x, cat.y, cat.z])\n', (32036, 32059), True, 'import numpy as np\n'), ((32612, 32627), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (32618, 32627), True, 'import numpy as np\n'), ((32657, 32673), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (32664, 32673), True, 'import numpy as np\n'), ((32702, 32717), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (32708, 32717), True, 'import numpy as np\n'), ((32730, 32745), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (32736, 32745), True, 'import numpy as np\n'), ((32821, 32836), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (32827, 32836), True, 'import numpy as np\n'), ((33229, 33244), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (33236, 33244), True, 'import numpy as np\n'), ((33272, 33286), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (33278, 33286), True, 'import numpy as np\n'), ((33314, 33328), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (33320, 33328), True, 'import numpy as np\n'), ((33974, 33989), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (33980, 33989), True, 'import numpy as np\n'), ((34019, 34035), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (34026, 34035), True, 'import numpy as np\n'), ((34064, 34079), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (34070, 34079), True, 'import numpy as np\n'), ((34092, 34107), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (34098, 34107), True, 'import numpy as np\n'), ((34183, 34198), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (34189, 34198), True, 'import numpy as np\n'), ((34288, 34303), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (34295, 34303), True, 'import numpy as np\n'), ((34331, 34345), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (34337, 34345), True, 'import numpy as np\n'), ((34373, 34387), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (34379, 34387), True, 'import numpy as np\n'), ((35701, 35713), 'numpy.unique', 'np.unique', (['p'], {}), '(p)\n', (35710, 35713), True, 'import numpy as np\n'), ((35866, 35921), 'numpy.array', 'np.array', (['[cat.x / cat.r, cat.y / cat.r, cat.z / cat.r]'], {}), '([cat.x / cat.r, cat.y / cat.r, cat.z / cat.r])\n', (35874, 35921), True, 'import numpy as np\n'), ((36516, 36531), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (36522, 36531), True, 'import numpy as np\n'), ((36561, 36577), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (36568, 36577), True, 'import numpy as np\n'), ((36606, 36621), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (36612, 36621), True, 'import numpy as np\n'), ((36634, 36649), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (36640, 36649), True, 'import numpy as np\n'), ((36725, 36740), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (36731, 36740), True, 'import numpy as np\n'), ((37133, 37148), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (37140, 37148), True, 'import numpy as np\n'), ((37176, 37190), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (37182, 37190), True, 'import numpy as np\n'), ((37218, 37232), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (37224, 37232), True, 'import numpy as np\n'), ((37883, 37898), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (37889, 37898), True, 'import numpy as np\n'), ((37928, 37944), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (37935, 37944), True, 'import numpy as np\n'), ((37973, 37988), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (37979, 37988), True, 'import numpy as np\n'), ((38001, 38016), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (38007, 38016), True, 'import numpy as np\n'), ((38092, 38107), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (38098, 38107), True, 'import numpy as np\n'), ((38197, 38212), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (38204, 38212), True, 'import numpy as np\n'), ((38240, 38254), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (38246, 38254), True, 'import numpy as np\n'), ((38282, 38296), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (38288, 38296), True, 'import numpy as np\n'), ((1793, 1824), 'numpy.sum', 'np.sum', (['(direct_cen ** 2)'], {'axis': '(1)'}), '(direct_cen ** 2, axis=1)\n', (1799, 1824), True, 'import numpy as np\n'), ((2039, 2080), 'numpy.sum', 'np.sum', (['((xyz[patches == i] - cen[i]) ** 2)'], {}), '((xyz[patches == i] - cen[i]) ** 2)\n', (2045, 2080), True, 'import numpy as np\n'), ((2269, 2289), 'numpy.sum', 'np.sum', (['(patches == i)'], {}), '(patches == i)\n', (2275, 2289), True, 'import numpy as np\n'), ((2721, 2737), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (2728, 2737), True, 'import numpy as np\n'), ((2802, 2816), 'numpy.mean', 'np.mean', (['sizes'], {}), '(sizes)\n', (2809, 2816), True, 'import numpy as np\n'), ((3358, 3399), 'numpy.sum', 'np.sum', (['((xyz[patches == i] - cen[i]) ** 2)'], {}), '((xyz[patches == i] - cen[i]) ** 2)\n', (3364, 3399), True, 'import numpy as np\n'), ((3588, 3608), 'numpy.sum', 'np.sum', (['(patches == i)'], {}), '(patches == i)\n', (3594, 3608), True, 'import numpy as np\n'), ((4042, 4058), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (4049, 4058), True, 'import numpy as np\n'), ((4128, 4142), 'numpy.mean', 'np.mean', (['sizes'], {}), '(sizes)\n', (4135, 4142), True, 'import numpy as np\n'), ((4742, 4783), 'numpy.sum', 'np.sum', (['((xyz[patches == i] - cen[i]) ** 2)'], {}), '((xyz[patches == i] - cen[i]) ** 2)\n', (4748, 4783), True, 'import numpy as np\n'), ((4972, 4992), 'numpy.sum', 'np.sum', (['(patches == i)'], {}), '(patches == i)\n', (4978, 4992), True, 'import numpy as np\n'), ((5444, 5460), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (5451, 5460), True, 'import numpy as np\n'), ((5530, 5544), 'numpy.mean', 'np.mean', (['sizes'], {}), '(sizes)\n', (5537, 5544), True, 'import numpy as np\n'), ((6937, 6987), 'numpy.average', 'np.average', (['xyz[p == i]'], {'axis': '(0)', 'weights': 'w[p == i]'}), '(xyz[p == i], axis=0, weights=w[p == i])\n', (6947, 6987), True, 'import numpy as np\n'), ((7035, 7066), 'numpy.sum', 'np.sum', (['(direct_cen ** 2)'], {'axis': '(1)'}), '(direct_cen ** 2, axis=1)\n', (7041, 7066), True, 'import numpy as np\n'), ((7164, 7220), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (7170, 7220), True, 'import numpy as np\n'), ((7262, 7279), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (7268, 7279), True, 'import numpy as np\n'), ((7693, 7709), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (7700, 7709), True, 'import numpy as np\n'), ((8444, 8500), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (8450, 8500), True, 'import numpy as np\n'), ((8542, 8559), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (8548, 8559), True, 'import numpy as np\n'), ((8913, 8929), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (8920, 8929), True, 'import numpy as np\n'), ((9431, 9487), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (9437, 9487), True, 'import numpy as np\n'), ((9529, 9546), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (9535, 9546), True, 'import numpy as np\n'), ((9918, 9934), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (9925, 9934), True, 'import numpy as np\n'), ((10730, 10786), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (10736, 10786), True, 'import numpy as np\n'), ((10828, 10845), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (10834, 10845), True, 'import numpy as np\n'), ((11204, 11220), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (11211, 11220), True, 'import numpy as np\n'), ((11738, 11796), 'numpy.sum', 'np.sum', (['(w[p2 == i][:, None] * (xyz[p2 == i] - cen[i]) ** 2)'], {}), '(w[p2 == i][:, None] * (xyz[p2 == i] - cen[i]) ** 2)\n', (11744, 11796), True, 'import numpy as np\n'), ((11838, 11856), 'numpy.sum', 'np.sum', (['w[p2 == i]'], {}), '(w[p2 == i])\n', (11844, 11856), True, 'import numpy as np\n'), ((12114, 12130), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (12121, 12130), True, 'import numpy as np\n'), ((12555, 12611), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (12561, 12611), True, 'import numpy as np\n'), ((12653, 12670), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (12659, 12670), True, 'import numpy as np\n'), ((12968, 12984), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (12975, 12984), True, 'import numpy as np\n'), ((13486, 13542), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (13492, 13542), True, 'import numpy as np\n'), ((13584, 13601), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (13590, 13601), True, 'import numpy as np\n'), ((13975, 13991), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (13982, 13991), True, 'import numpy as np\n'), ((15033, 15088), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xy[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xy[p == i] - cen[i]) ** 2)\n', (15039, 15088), True, 'import numpy as np\n'), ((15130, 15147), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (15136, 15147), True, 'import numpy as np\n'), ((15505, 15521), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (15512, 15521), True, 'import numpy as np\n'), ((15946, 16001), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xy[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xy[p == i] - cen[i]) ** 2)\n', (15952, 16001), True, 'import numpy as np\n'), ((16043, 16060), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (16049, 16060), True, 'import numpy as np\n'), ((16357, 16373), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (16364, 16373), True, 'import numpy as np\n'), ((16875, 16930), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xy[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xy[p == i] - cen[i]) ** 2)\n', (16881, 16930), True, 'import numpy as np\n'), ((16972, 16989), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (16978, 16989), True, 'import numpy as np\n'), ((17362, 17378), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (17369, 17378), True, 'import numpy as np\n'), ((18261, 18298), 'numpy.sum', 'np.sum', (['((xyz[p1 == i] - cen1[i]) ** 2)'], {}), '((xyz[p1 == i] - cen1[i]) ** 2)\n', (18267, 18298), True, 'import numpy as np\n'), ((18344, 18359), 'numpy.sum', 'np.sum', (['(p1 == i)'], {}), '(p1 == i)\n', (18350, 18359), True, 'import numpy as np\n'), ((18699, 18736), 'numpy.sum', 'np.sum', (['((xyz[p2 == i] - cen2[i]) ** 2)'], {}), '((xyz[p2 == i] - cen2[i]) ** 2)\n', (18705, 18736), True, 'import numpy as np\n'), ((18782, 18797), 'numpy.sum', 'np.sum', (['(p2 == i)'], {}), '(p2 == i)\n', (18788, 18797), True, 'import numpy as np\n'), ((19382, 19419), 'numpy.sum', 'np.sum', (['((xyz[p1 == i] - cen1[i]) ** 2)'], {}), '((xyz[p1 == i] - cen1[i]) ** 2)\n', (19388, 19419), True, 'import numpy as np\n'), ((19465, 19480), 'numpy.sum', 'np.sum', (['(p1 == i)'], {}), '(p1 == i)\n', (19471, 19480), True, 'import numpy as np\n'), ((19750, 19787), 'numpy.sum', 'np.sum', (['((xyz[p2 == i] - cen2[i]) ** 2)'], {}), '((xyz[p2 == i] - cen2[i]) ** 2)\n', (19756, 19787), True, 'import numpy as np\n'), ((19833, 19848), 'numpy.sum', 'np.sum', (['(p2 == i)'], {}), '(p2 == i)\n', (19839, 19848), True, 'import numpy as np\n'), ((20448, 20484), 'numpy.sum', 'np.sum', (['((xy[p1 == i] - cen1[i]) ** 2)'], {}), '((xy[p1 == i] - cen1[i]) ** 2)\n', (20454, 20484), True, 'import numpy as np\n'), ((20530, 20545), 'numpy.sum', 'np.sum', (['(p1 == i)'], {}), '(p1 == i)\n', (20536, 20545), True, 'import numpy as np\n'), ((20815, 20851), 'numpy.sum', 'np.sum', (['((xy[p2 == i] - cen2[i]) ** 2)'], {}), '((xy[p2 == i] - cen2[i]) ** 2)\n', (20821, 20851), True, 'import numpy as np\n'), ((20897, 20912), 'numpy.sum', 'np.sum', (['(p2 == i)'], {}), '(p2 == i)\n', (20903, 20912), True, 'import numpy as np\n'), ((21632, 21669), 'numpy.sum', 'np.sum', (['((xyz[p1 == i] - cen1[i]) ** 2)'], {}), '((xyz[p1 == i] - cen1[i]) ** 2)\n', (21638, 21669), True, 'import numpy as np\n'), ((21715, 21730), 'numpy.sum', 'np.sum', (['(p1 == i)'], {}), '(p1 == i)\n', (21721, 21730), True, 'import numpy as np\n'), ((22000, 22037), 'numpy.sum', 'np.sum', (['((xyz[p2 == i] - cen2[i]) ** 2)'], {}), '((xyz[p2 == i] - cen2[i]) ** 2)\n', (22006, 22037), True, 'import numpy as np\n'), ((22083, 22098), 'numpy.sum', 'np.sum', (['(p2 == i)'], {}), '(p2 == i)\n', (22089, 22098), True, 'import numpy as np\n'), ((24344, 24381), 'numpy.sum', 'np.sum', (['((xyz[p1 == i] - cen1[i]) ** 2)'], {}), '((xyz[p1 == i] - cen1[i]) ** 2)\n', (24350, 24381), True, 'import numpy as np\n'), ((24427, 24442), 'numpy.sum', 'np.sum', (['(p1 == i)'], {}), '(p1 == i)\n', (24433, 24442), True, 'import numpy as np\n'), ((24784, 24821), 'numpy.sum', 'np.sum', (['((xyz[p2 == i] - cen2[i]) ** 2)'], {}), '((xyz[p2 == i] - cen2[i]) ** 2)\n', (24790, 24821), True, 'import numpy as np\n'), ((24867, 24882), 'numpy.sum', 'np.sum', (['(p2 == i)'], {}), '(p2 == i)\n', (24873, 24882), True, 'import numpy as np\n'), ((25471, 25508), 'numpy.sum', 'np.sum', (['((xyz[p1 == i] - cen1[i]) ** 2)'], {}), '((xyz[p1 == i] - cen1[i]) ** 2)\n', (25477, 25508), True, 'import numpy as np\n'), ((25554, 25569), 'numpy.sum', 'np.sum', (['(p1 == i)'], {}), '(p1 == i)\n', (25560, 25569), True, 'import numpy as np\n'), ((25841, 25878), 'numpy.sum', 'np.sum', (['((xyz[p2 == i] - cen2[i]) ** 2)'], {}), '((xyz[p2 == i] - cen2[i]) ** 2)\n', (25847, 25878), True, 'import numpy as np\n'), ((25924, 25939), 'numpy.sum', 'np.sum', (['(p2 == i)'], {}), '(p2 == i)\n', (25930, 25939), True, 'import numpy as np\n'), ((26543, 26579), 'numpy.sum', 'np.sum', (['((xy[p1 == i] - cen1[i]) ** 2)'], {}), '((xy[p1 == i] - cen1[i]) ** 2)\n', (26549, 26579), True, 'import numpy as np\n'), ((26625, 26640), 'numpy.sum', 'np.sum', (['(p1 == i)'], {}), '(p1 == i)\n', (26631, 26640), True, 'import numpy as np\n'), ((26912, 26948), 'numpy.sum', 'np.sum', (['((xy[p2 == i] - cen2[i]) ** 2)'], {}), '((xy[p2 == i] - cen2[i]) ** 2)\n', (26918, 26948), True, 'import numpy as np\n'), ((26994, 27009), 'numpy.sum', 'np.sum', (['(p2 == i)'], {}), '(p2 == i)\n', (27000, 27009), True, 'import numpy as np\n'), ((27733, 27770), 'numpy.sum', 'np.sum', (['((xyz[p1 == i] - cen1[i]) ** 2)'], {}), '((xyz[p1 == i] - cen1[i]) ** 2)\n', (27739, 27770), True, 'import numpy as np\n'), ((27816, 27831), 'numpy.sum', 'np.sum', (['(p1 == i)'], {}), '(p1 == i)\n', (27822, 27831), True, 'import numpy as np\n'), ((28103, 28140), 'numpy.sum', 'np.sum', (['((xyz[p2 == i] - cen2[i]) ** 2)'], {}), '((xyz[p2 == i] - cen2[i]) ** 2)\n', (28109, 28140), True, 'import numpy as np\n'), ((28186, 28201), 'numpy.sum', 'np.sum', (['(p2 == i)'], {}), '(p2 == i)\n', (28192, 28201), True, 'import numpy as np\n'), ((32089, 32139), 'numpy.average', 'np.average', (['xyz[p == i]'], {'axis': '(0)', 'weights': 'w[p == i]'}), '(xyz[p == i], axis=0, weights=w[p == i])\n', (32099, 32139), True, 'import numpy as np\n'), ((32187, 32218), 'numpy.sum', 'np.sum', (['(direct_cen ** 2)'], {'axis': '(1)'}), '(direct_cen ** 2, axis=1)\n', (32193, 32218), True, 'import numpy as np\n'), ((32316, 32372), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (32322, 32372), True, 'import numpy as np\n'), ((32414, 32431), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (32420, 32431), True, 'import numpy as np\n'), ((32845, 32861), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (32852, 32861), True, 'import numpy as np\n'), ((33739, 33795), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (33745, 33795), True, 'import numpy as np\n'), ((33837, 33854), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (33843, 33854), True, 'import numpy as np\n'), ((34208, 34224), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (34215, 34224), True, 'import numpy as np\n'), ((35993, 36043), 'numpy.average', 'np.average', (['xyz[p == i]'], {'axis': '(0)', 'weights': 'w[p == i]'}), '(xyz[p == i], axis=0, weights=w[p == i])\n', (36003, 36043), True, 'import numpy as np\n'), ((36091, 36122), 'numpy.sum', 'np.sum', (['(direct_cen ** 2)'], {'axis': '(1)'}), '(direct_cen ** 2, axis=1)\n', (36097, 36122), True, 'import numpy as np\n'), ((36220, 36276), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (36226, 36276), True, 'import numpy as np\n'), ((36318, 36335), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (36324, 36335), True, 'import numpy as np\n'), ((36749, 36765), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (36756, 36765), True, 'import numpy as np\n'), ((37648, 37704), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (37654, 37704), True, 'import numpy as np\n'), ((37746, 37763), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (37752, 37763), True, 'import numpy as np\n'), ((38117, 38133), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (38124, 38133), True, 'import numpy as np\n'), ((2124, 2166), 'numpy.mean', 'np.mean', (['((xyz[patches == i] - cen[i]) ** 2)'], {}), '((xyz[patches == i] - cen[i]) ** 2)\n', (2131, 2166), True, 'import numpy as np\n'), ((3443, 3485), 'numpy.mean', 'np.mean', (['((xyz[patches == i] - cen[i]) ** 2)'], {}), '((xyz[patches == i] - cen[i]) ** 2)\n', (3450, 3485), True, 'import numpy as np\n'), ((4827, 4869), 'numpy.mean', 'np.mean', (['((xyz[patches == i] - cen[i]) ** 2)'], {}), '((xyz[patches == i] - cen[i]) ** 2)\n', (4834, 4869), True, 'import numpy as np\n'), ((6238, 6248), 'numpy.min', 'np.min', (['ra'], {}), '(ra)\n', (6244, 6248), True, 'import numpy as np\n'), ((6303, 6313), 'numpy.max', 'np.max', (['ra'], {}), '(ra)\n', (6309, 6313), True, 'import numpy as np\n'), ((6369, 6380), 'numpy.min', 'np.min', (['dec'], {}), '(dec)\n', (6375, 6380), True, 'import numpy as np\n'), ((6436, 6447), 'numpy.max', 'np.max', (['dec'], {}), '(dec)\n', (6442, 6447), True, 'import numpy as np\n'), ((30128, 30138), 'numpy.min', 'np.min', (['ra'], {}), '(ra)\n', (30134, 30138), True, 'import numpy as np\n'), ((30193, 30203), 'numpy.max', 'np.max', (['ra'], {}), '(ra)\n', (30199, 30203), True, 'import numpy as np\n'), ((30259, 30270), 'numpy.min', 'np.min', (['dec'], {}), '(dec)\n', (30265, 30270), True, 'import numpy as np\n'), ((30326, 30337), 'numpy.max', 'np.max', (['dec'], {}), '(dec)\n', (30332, 30337), True, 'import numpy as np\n'), ((31395, 31405), 'numpy.min', 'np.min', (['ra'], {}), '(ra)\n', (31401, 31405), True, 'import numpy as np\n'), ((31460, 31470), 'numpy.max', 'np.max', (['ra'], {}), '(ra)\n', (31466, 31470), True, 'import numpy as np\n'), ((31526, 31537), 'numpy.min', 'np.min', (['dec'], {}), '(dec)\n', (31532, 31537), True, 'import numpy as np\n'), ((31593, 31604), 'numpy.max', 'np.max', (['dec'], {}), '(dec)\n', (31599, 31604), True, 'import numpy as np\n'), ((35201, 35211), 'numpy.min', 'np.min', (['ra'], {}), '(ra)\n', (35207, 35211), True, 'import numpy as np\n'), ((35266, 35276), 'numpy.max', 'np.max', (['ra'], {}), '(ra)\n', (35272, 35276), True, 'import numpy as np\n'), ((35332, 35343), 'numpy.min', 'np.min', (['dec'], {}), '(dec)\n', (35338, 35343), True, 'import numpy as np\n'), ((35399, 35410), 'numpy.max', 'np.max', (['dec'], {}), '(dec)\n', (35405, 35410), True, 'import numpy as np\n')]
|
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from pm4py.objects.log.obj import EventLog, Trace, EventStream
from pm4py.util import xes_constants as xes
def sort_timestamp_trace(trace, timestamp_key=xes.DEFAULT_TIMESTAMP_KEY, reverse_sort=False):
"""
Sort a trace based on timestamp key
Parameters
-----------
trace
Trace
timestamp_key
Timestamp key
reverse_sort
If true, reverses the direction in which the sort is done (ascending)
Returns
-----------
trace
Sorted trace
"""
events = sorted(trace._list, key=lambda x: x[timestamp_key], reverse=reverse_sort)
new_trace = Trace(events, attributes=trace.attributes)
return new_trace
def sort_timestamp_stream(event_log, timestamp_key=xes.DEFAULT_TIMESTAMP_KEY, reverse_sort=False):
"""
Sort an event log based on timestamp key
Parameters
-----------
event_log
Event log
timestamp_key
Timestamp key
reverse_sort
If true, reverses the direction in which the sort is done (ascending)
Returns
-----------
event_log
Sorted event log
"""
events = sorted(event_log._list, key=lambda x: x[timestamp_key], reverse=reverse_sort)
new_stream = EventStream(events, attributes=event_log.attributes, extensions=event_log.extensions,
omni_present=event_log.omni_present, classifiers=event_log.classifiers,
properties=event_log.properties)
return new_stream
def sort_timestamp_log(event_log, timestamp_key=xes.DEFAULT_TIMESTAMP_KEY, reverse_sort=False):
"""
Sort a log based on timestamp key
Parameters
-----------
event_log
Log
timestamp_key
Timestamp key
reverse_sort
If true, reverses the direction in which the sort is done (ascending)
Returns
-----------
log
Sorted log
"""
new_log = EventLog(attributes=event_log.attributes, extensions=event_log.extensions,
omni_present=event_log.omni_present, classifiers=event_log.classifiers,
properties=event_log.properties)
for trace in event_log:
if trace:
new_log.append(sort_timestamp_trace(trace, timestamp_key=timestamp_key, reverse_sort=reverse_sort))
new_log._list.sort(key=lambda x: x[0][timestamp_key], reverse=reverse_sort)
return new_log
def sort_timestamp(log, timestamp_key=xes.DEFAULT_TIMESTAMP_KEY, reverse_sort=False):
"""
Sort a log based on timestamp key
Parameters
-----------
log
Trace/Event log
timestamp_key
Timestamp key
reverse_sort
If true, reverses the direction in which the sort is done (ascending)
Returns
-----------
log
Sorted Trace/Event log
"""
if type(log) is EventLog:
return sort_timestamp_log(log, timestamp_key=timestamp_key, reverse_sort=reverse_sort)
return sort_timestamp_stream(log, timestamp_key=timestamp_key, reverse_sort=reverse_sort)
def sort_lambda_log(event_log, sort_function, reverse=False):
"""
Sort a log based on a lambda expression
Parameters
------------
event_log
Log
sort_function
Sort function
reverse
Boolean (sort by reverse order)
Returns
------------
new_log
Sorted log
"""
traces = sorted(event_log._list, key=sort_function, reverse=reverse)
new_log = EventLog(traces, attributes=event_log.attributes, extensions=event_log.extensions,
omni_present=event_log.omni_present, classifiers=event_log.classifiers,
properties=event_log.properties)
return new_log
def sort_lambda_stream(event_log, sort_function, reverse=False):
"""
Sort a stream based on a lambda expression
Parameters
------------
event_log
Stream
sort_function
Sort function
reverse
Boolean (sort by reverse order)
Returns
------------
stream
Sorted stream
"""
events = sorted(event_log._list, key=sort_function, reverse=reverse)
new_stream = EventStream(events, attributes=event_log.attributes, extensions=event_log.extensions,
omni_present=event_log.omni_present, classifiers=event_log.classifiers,
properties=event_log.properties)
return new_stream
def sort_lambda(log, sort_function, reverse=False):
"""
Sort a log based on lambda expression
Parameters
-------------
log
Log
sort_function
Sort function
reverse
Boolean (sort by reverse order)
Returns
-------------
log
Sorted log
"""
if type(log) is EventLog:
return sort_lambda_log(log, sort_function, reverse=reverse)
return sort_lambda_stream(log, sort_function, reverse=reverse)
|
[
"pm4py.objects.log.obj.EventStream",
"pm4py.objects.log.obj.Trace",
"pm4py.objects.log.obj.EventLog"
] |
[((1331, 1373), 'pm4py.objects.log.obj.Trace', 'Trace', (['events'], {'attributes': 'trace.attributes'}), '(events, attributes=trace.attributes)\n', (1336, 1373), False, 'from pm4py.objects.log.obj import EventLog, Trace, EventStream\n'), ((1932, 2132), 'pm4py.objects.log.obj.EventStream', 'EventStream', (['events'], {'attributes': 'event_log.attributes', 'extensions': 'event_log.extensions', 'omni_present': 'event_log.omni_present', 'classifiers': 'event_log.classifiers', 'properties': 'event_log.properties'}), '(events, attributes=event_log.attributes, extensions=event_log.\n extensions, omni_present=event_log.omni_present, classifiers=event_log.\n classifiers, properties=event_log.properties)\n', (1943, 2132), False, 'from pm4py.objects.log.obj import EventLog, Trace, EventStream\n'), ((2618, 2805), 'pm4py.objects.log.obj.EventLog', 'EventLog', ([], {'attributes': 'event_log.attributes', 'extensions': 'event_log.extensions', 'omni_present': 'event_log.omni_present', 'classifiers': 'event_log.classifiers', 'properties': 'event_log.properties'}), '(attributes=event_log.attributes, extensions=event_log.extensions,\n omni_present=event_log.omni_present, classifiers=event_log.classifiers,\n properties=event_log.properties)\n', (2626, 2805), False, 'from pm4py.objects.log.obj import EventLog, Trace, EventStream\n'), ((4153, 4350), 'pm4py.objects.log.obj.EventLog', 'EventLog', (['traces'], {'attributes': 'event_log.attributes', 'extensions': 'event_log.extensions', 'omni_present': 'event_log.omni_present', 'classifiers': 'event_log.classifiers', 'properties': 'event_log.properties'}), '(traces, attributes=event_log.attributes, extensions=event_log.\n extensions, omni_present=event_log.omni_present, classifiers=event_log.\n classifiers, properties=event_log.properties)\n', (4161, 4350), False, 'from pm4py.objects.log.obj import EventLog, Trace, EventStream\n'), ((4844, 5044), 'pm4py.objects.log.obj.EventStream', 'EventStream', (['events'], {'attributes': 'event_log.attributes', 'extensions': 'event_log.extensions', 'omni_present': 'event_log.omni_present', 'classifiers': 'event_log.classifiers', 'properties': 'event_log.properties'}), '(events, attributes=event_log.attributes, extensions=event_log.\n extensions, omni_present=event_log.omni_present, classifiers=event_log.\n classifiers, properties=event_log.properties)\n', (4855, 5044), False, 'from pm4py.objects.log.obj import EventLog, Trace, EventStream\n')]
|
import theano
from utils import srng
def dropout(input, dropout_rate=0):
if dropout_rate > 0:
retain = 1 - dropout_rate
d_output = (input / retain) * srng.binomial(input.shape, p=retain,
dtype='int32').astype('float32')
else:
d_output = input
return d_output
|
[
"utils.srng.binomial"
] |
[((171, 222), 'utils.srng.binomial', 'srng.binomial', (['input.shape'], {'p': 'retain', 'dtype': '"""int32"""'}), "(input.shape, p=retain, dtype='int32')\n", (184, 222), False, 'from utils import srng\n')]
|
from .. import Explanation
from ..utils import OpChain
from . import colors
import numpy as np
def convert_color(color):
try:
color = pl.get_cmap(color)
except:
pass
if color == "shap_red":
color = colors.red_rgb
elif color == "shap_blue":
color = colors.blue_rgb
return color
def convert_ordering(ordering, shap_values):
if issubclass(type(ordering), OpChain):
ordering = ordering.apply(Explanation(shap_values))
if issubclass(type(ordering), Explanation):
if "argsort" in [op["name"] for op in ordering.op_history]:
ordering = ordering.values
else:
ordering = ordering.argsort.flip.values
return ordering
def get_sort_order(dist, clust_order, cluster_threshold, feature_order):
""" Returns a sorted order of the values where we respect the clustering order when dist[i,j] < cluster_threshold
"""
#feature_imp = np.abs(values)
# if partition_tree is not None:
# new_tree = fill_internal_max_values(partition_tree, shap_values)
# clust_order = sort_inds(new_tree, np.abs(shap_values))
clust_inds = np.argsort(clust_order)
feature_order = feature_order.copy()#order.apply(Explanation(shap_values))
# print("feature_order", feature_order)
for i in range(len(feature_order)-1):
ind1 = feature_order[i]
next_ind = feature_order[i+1]
next_ind_pos = i + 1
for j in range(i+1,len(feature_order)):
ind2 = feature_order[j]
#if feature_imp[ind] >
# if ind1 == 2:
# print(ind1, ind2, dist[ind1,ind2])
if dist[ind1,ind2] <= cluster_threshold:
# if ind1 == 2:
# print(clust_inds)
# print(ind1, ind2, next_ind, dist[ind1,ind2], clust_inds[ind2], clust_inds[next_ind])
if dist[ind1,next_ind] > cluster_threshold or clust_inds[ind2] < clust_inds[next_ind]:
next_ind = ind2
next_ind_pos = j
# print("next_ind", next_ind)
# print("next_ind_pos", next_ind_pos)
# insert the next_ind next
for j in range(next_ind_pos, i+1, -1):
#print("j", j)
feature_order[j] = feature_order[j-1]
feature_order[i+1] = next_ind
#print(feature_order)
return feature_order
def merge_nodes(values, partition_tree):
""" This merges the two clustered leaf nodes with the smallest total value.
"""
M = partition_tree.shape[0] + 1
ptind = 0
min_val = np.inf
for i in range(partition_tree.shape[0]):
ind1 = int(partition_tree[i,0])
ind2 = int(partition_tree[i,1])
if ind1 < M and ind2 < M:
val = np.abs(values[ind1]) + np.abs(values[ind2])
if val < min_val:
min_val = val
ptind = i
#print("ptind", ptind, min_val)
ind1 = int(partition_tree[ptind,0])
ind2 = int(partition_tree[ptind,1])
if ind1 > ind2:
tmp = ind1
ind1 = ind2
ind2 = tmp
partition_tree_new = partition_tree.copy()
for i in range(partition_tree_new.shape[0]):
i0 = int(partition_tree_new[i,0])
i1 = int(partition_tree_new[i,1])
if i0 == ind2:
partition_tree_new[i,0] = ind1
elif i0 > ind2:
partition_tree_new[i,0] -= 1
if i0 == ptind + M:
partition_tree_new[i,0] = ind1
elif i0 > ptind + M:
partition_tree_new[i,0] -= 1
if i1 == ind2:
partition_tree_new[i,1] = ind1
elif i1 > ind2:
partition_tree_new[i,1] -= 1
if i1 == ptind + M:
partition_tree_new[i,1] = ind1
elif i1 > ptind + M:
partition_tree_new[i,1] -= 1
partition_tree_new = np.delete(partition_tree_new, ptind, axis=0)
# update the counts to be correct
fill_counts(partition_tree_new)
return partition_tree_new, ind1, ind2
def dendrogram_coords(leaf_positions, partition_tree):
""" Returns the x and y coords of the lines of a dendrogram where the leaf order is given.
Note that scipy can compute these coords as well, but it does not allow you to easily specify
a specific leaf order, hence this reimplementation.
"""
xout = []
yout = []
_dendrogram_coords_rec(partition_tree.shape[0]-1, leaf_positions, partition_tree, xout, yout)
return np.array(xout), np.array(yout)
def _dendrogram_coords_rec(pos, leaf_positions, partition_tree, xout, yout):
M = partition_tree.shape[0] + 1
if pos < 0:
return leaf_positions[pos + M], 0
left = int(partition_tree[pos, 0]) - M
right = int(partition_tree[pos, 1]) - M
x_left, y_left = _dendrogram_coords_rec(left, leaf_positions, partition_tree, xout, yout)
x_right, y_right = _dendrogram_coords_rec(right, leaf_positions, partition_tree, xout, yout)
y_curr = partition_tree[pos, 2]
xout.append([x_left, x_left, x_right, x_right])
yout.append([y_left, y_curr, y_curr, y_right])
return (x_left + x_right) / 2, y_curr
def fill_internal_max_values(partition_tree, leaf_values):
""" This fills the forth column of the partition tree matrix with the max leaf value in that cluster.
"""
M = partition_tree.shape[0] + 1
new_tree = partition_tree.copy()
for i in range(new_tree.shape[0]):
val = 0
if new_tree[i,0] < M:
ind = int(new_tree[i,0])
val = max(val, np.abs(leaf_values[ind]))
else:
ind = int(new_tree[i,0])-M
val = max(val, np.abs(new_tree[ind,3])) # / partition_tree[ind,2])
if new_tree[i,1] < M:
ind = int(new_tree[i,1])
val = max(val, np.abs(leaf_values[ind]))
else:
ind = int(new_tree[i,1])-M
val = max(val, np.abs(new_tree[ind,3])) # / partition_tree[ind,2])
new_tree[i,3] = val
return new_tree
def fill_counts(partition_tree):
""" This updates the
"""
M = partition_tree.shape[0] + 1
for i in range(partition_tree.shape[0]):
val = 0
if partition_tree[i,0] < M:
ind = int(partition_tree[i,0])
val += 1
else:
ind = int(partition_tree[i,0])-M
val += partition_tree[ind,3]
if partition_tree[i,1] < M:
ind = int(partition_tree[i,1])
val += 1
else:
ind = int(partition_tree[i,1])-M
val += partition_tree[ind,3]
partition_tree[i,3] = val
def sort_inds(partition_tree, leaf_values, pos=None, inds=None):
if inds is None:
inds = []
if pos is None:
partition_tree = fill_internal_max_values(partition_tree, leaf_values)
pos = partition_tree.shape[0]-1
M = partition_tree.shape[0] + 1
if pos < 0:
inds.append(pos + M)
return
left = int(partition_tree[pos, 0]) - M
right = int(partition_tree[pos, 1]) - M
left_val = partition_tree[left,3] if left >= 0 else leaf_values[left + M]
right_val = partition_tree[right,3] if right >= 0 else leaf_values[right + M]
if left_val < right_val:
tmp = right
right = left
left = tmp
sort_inds(partition_tree, leaf_values, left, inds)
sort_inds(partition_tree, leaf_values, right, inds)
return inds
|
[
"numpy.argsort",
"numpy.abs",
"numpy.array",
"numpy.delete"
] |
[((1167, 1190), 'numpy.argsort', 'np.argsort', (['clust_order'], {}), '(clust_order)\n', (1177, 1190), True, 'import numpy as np\n'), ((3993, 4037), 'numpy.delete', 'np.delete', (['partition_tree_new', 'ptind'], {'axis': '(0)'}), '(partition_tree_new, ptind, axis=0)\n', (4002, 4037), True, 'import numpy as np\n'), ((4621, 4635), 'numpy.array', 'np.array', (['xout'], {}), '(xout)\n', (4629, 4635), True, 'import numpy as np\n'), ((4637, 4651), 'numpy.array', 'np.array', (['yout'], {}), '(yout)\n', (4645, 4651), True, 'import numpy as np\n'), ((2856, 2876), 'numpy.abs', 'np.abs', (['values[ind1]'], {}), '(values[ind1])\n', (2862, 2876), True, 'import numpy as np\n'), ((2879, 2899), 'numpy.abs', 'np.abs', (['values[ind2]'], {}), '(values[ind2])\n', (2885, 2899), True, 'import numpy as np\n'), ((5708, 5732), 'numpy.abs', 'np.abs', (['leaf_values[ind]'], {}), '(leaf_values[ind])\n', (5714, 5732), True, 'import numpy as np\n'), ((5814, 5838), 'numpy.abs', 'np.abs', (['new_tree[ind, 3]'], {}), '(new_tree[ind, 3])\n', (5820, 5838), True, 'import numpy as np\n'), ((5960, 5984), 'numpy.abs', 'np.abs', (['leaf_values[ind]'], {}), '(leaf_values[ind])\n', (5966, 5984), True, 'import numpy as np\n'), ((6066, 6090), 'numpy.abs', 'np.abs', (['new_tree[ind, 3]'], {}), '(new_tree[ind, 3])\n', (6072, 6090), True, 'import numpy as np\n')]
|
# Copyright (c) 2016-2019 <NAME>
# License: MIT License
import ezdxf
dwg = ezdxf.new('R2000') # underlay requires the DXF R2000 format or newer
pdf_underlay_def = dwg.add_underlay_def(filename='underlay.pdf', name='1') # name = page to display
dwf_underlay_def = dwg.add_underlay_def(filename='underlay.dwf',
name="Underlay_R2013-Model") # don't know how to get this name
dgn_underlay_def = dwg.add_underlay_def(filename='underlay.dgn', name='default') # name = 'default' just works
# The (PDF)DEFINITION entity is like a block definition, it just defines the underlay
msp = dwg.modelspace()
# add first underlay
msp.add_underlay(pdf_underlay_def, insert=(0, 0, 0), scale=1.)
# The (PDF)UNDERLAY entity is like the INSERT entity, it creates an underlay reference,
# and there can be multiple references to the same underlay in a drawing.
msp.add_underlay(pdf_underlay_def, insert=(10, 0, 0), scale=.5, rotation=30)
# use dgn format
msp.add_underlay(dgn_underlay_def, insert=(0, 30, 0), scale=1.)
# use dwf format
msp.add_underlay(dwf_underlay_def, insert=(0, 15, 0), scale=1.)
# get existing underlay definitions, Important: UNDERLAYDEFs resides in the objects section
pdf_defs = dwg.objects.query('PDFDEFINITION') # get all pdf underlay defs in drawing
dwg.saveas("underlay.dxf")
|
[
"ezdxf.new"
] |
[((76, 94), 'ezdxf.new', 'ezdxf.new', (['"""R2000"""'], {}), "('R2000')\n", (85, 94), False, 'import ezdxf\n')]
|
import re
import string
class Reformulator(object):
def __init__(self, question, qclass, lang='en', stopwords=None):
self.__original_question = question
punctuation = re.sub(r"[-+/&']", '', string.punctuation)
self.__punctuation_re = r'[{}]'.format(punctuation)
question = question[0].lower() + question[1:]
question = re.sub(r'(?<=[A-Z])\.', 'QQQ', question)
question = re.sub(self.__punctuation_re, '', question)
self.__question = re.sub(r'QQQ', '.', question)
self.__stopwords = stopwords
self.__qclass = qclass.split(':')[1]
if lang == 'en':
question_words = ['what', 'which', 'who', 'whom', 'when', 'where', 'why', 'how']
conj_prep_words = ['of', 'not']
elif lang == 'sv':
question_words = ['vilket', 'vilken', 'vem', 'whom', 'när', 'var', 'varför', 'hur']
conj_prep_words = ['av', 'inte', 'ej']
else:
raise NotImplemented('This language is not available')
self.__exact_stop_words = set(stopwords) - set(conj_prep_words)
self.__expansion_rules = {
'dismed': 'disease',
'instru': 'instrument',
'lang': 'language',
'other': '',
'techmeth': 'technique',
'termeq': 'term',
'veh': 'vehicle',
'dist': 'distance',
'ord': 'order',
'perc': 'percentage',
'speed': 'speed',
'temp': 'temperature',
'volsize': 'size'
}
if qclass == 'ABBR:abb':
try:
self.__stopwords.append('abbreviation')
except:
self.__stopwords.add('abbreviation')
self.__exact_stop_words.append('abbreviation')
def question(self):
return self.__question
def reformulate(self):
without_stopwords = [w for w in self.__question.split()
if w not in self.__stopwords]
query = without_stopwords
query.append(self.__expansion_rules.get(self.__qclass, ''))
return " ".join(query)
def reformulate_exact(self):
without_exact_stopwords = [w for w in self.__question.split()
if w not in self.__exact_stop_words]
query = without_exact_stopwords
query.append(self.__expansion_rules.get(self.__qclass, ''))
return " ".join(query)
|
[
"re.sub"
] |
[((189, 230), 're.sub', 're.sub', (['"""[-+/&\']"""', '""""""', 'string.punctuation'], {}), '("[-+/&\']", \'\', string.punctuation)\n', (195, 230), False, 'import re\n'), ((365, 405), 're.sub', 're.sub', (['"""(?<=[A-Z])\\\\."""', '"""QQQ"""', 'question'], {}), "('(?<=[A-Z])\\\\.', 'QQQ', question)\n", (371, 405), False, 'import re\n'), ((425, 468), 're.sub', 're.sub', (['self.__punctuation_re', '""""""', 'question'], {}), "(self.__punctuation_re, '', question)\n", (431, 468), False, 'import re\n'), ((495, 523), 're.sub', 're.sub', (['"""QQQ"""', '"""."""', 'question'], {}), "('QQQ', '.', question)\n", (501, 523), False, 'import re\n')]
|
#! /usr/bin/env python3.6
import pymysql
import re
import random
import datetime
import sys
import argparse
import os
#########################
# Main-Routine #
#########################
def main():
#Initialization
print('> Crawler Initialization...')
iter_num = 0
crawler_nba.init()
#Argument Parser
(password, table, max_sql_store_num, unix_socket, database_name) = ArgumentParser()
#DB Initialization
print('> DB Initialization...')
crawler_nba.MySQLDBInitialize(password, table, unix_socket, database_name)
#Sideband Setting
current_time = datetime.datetime.now()
print(f'current_time = {current_time}')
random.seed(datetime.datetime.now())
starting_url = "https://en.wikipedia.org/wiki/Kevin_Bacon"
print(f'starting_url = {starting_url}')
# Scrape articles from Wikipedia and store into MySQl Database
choose_link = starting_url
skipping = 0
while(iter_num < max_sql_store_num):
print('iter_num = {}. Get Wiki Links and store the content to MySQL...'.format(iter_num))
print(f'choose_link = {choose_link}')
all_internal_links_loop, skipping = crawler_nba.GetWikiLinksContent(choose_link, crawler_nba.cur, table)
total_num_internal_links_loop = len(all_internal_links_loop)
if(total_num_internal_links_loop > 0):
choose_link = "http://en.wikipedia.org"+all_internal_links_loop[random.randint(0, total_num_internal_links_loop-1)].attrs['href']
if(skipping == 0):
iter_num += 1
# Test to read from MySQL Database
sql_ex = 'SELECT id, title, created, LEFT(content, 32) FROM {table_name} WHERE id=4;'.format(table_name=table)
crawler_nba.cur.execute(sql_ex)
results = crawler_nba.cur.fetchall()
print(f'-------------------Execution {sql_ex}-------------------')
print(f'table = {table}')
for row in results:
id_name = str(row[0])
title_name = row[1]
created_name = str(row[2])
content_name = row[3]
print('{x:<2s}, {y:<2s}, {z:<2s}, {k:<2s}'.format(x=id_name, y=title_name, z=created_name, k=content_name))
# Close the connection of MySQL Database
crawler_nba.MySQLDBClose(crawler_nba.cur, crawler_nba.conn)
#########################
# Sub-Routine #
#########################
def ArgumentParser():
password = ""
table = ""
database_name = ""
unix_socket = ""
max_sql_store_num = 10
parser = argparse.ArgumentParser()
parser.add_argument("--mysql_password", "-sql_p", help="The password to connect to MySQL server.", required=True)
parser.add_argument("--mysql_table_name", "-sql_tn", help="The table name that will be used to store data.", required=True)
parser.add_argument("--max_sql_store_num", "-sql_mx_sn", help="The maximum number that stores in MySQL table.", required=True)
parser.add_argument("--unix_socket", "-sql_un_sock", help="The unix_socket that is used to mypysql connection.", required=True)
parser.add_argument("--database_name", "-database_name", help="The unix_socket that is used to mypysql connection.", required=True)
args = parser.parse_args()
if args.mysql_password:
password = args.mysql_password
if args.mysql_table_name:
table = args.mysql_table_name
if args.max_sql_store_num:
max_sql_store_num = int(args.max_sql_store_num)
if args.unix_socket:
unix_socket = args.unix_socket
if args.database_name:
database_name = args.database_name
return(password, table, max_sql_store_num, unix_socket, database_name)
#-----------------Execution------------------#
if __name__ == '__main__':
import sys
this_script_path = os.path.realpath(__file__)
this_script_folder = os.path.dirname(this_script_path)
crawler_nba_pkg_path = this_script_folder+'/../../crawler'
print('Add to sys.path : {x}'.format(x=crawler_nba_pkg_path))
sys.path.append(crawler_nba_pkg_path)
import package_crawler_nba.crawler_nba as crawler_nba
print('Import package_crawler_nba successfully.')
main()
|
[
"sys.path.append",
"package_crawler_nba.crawler_nba.GetWikiLinksContent",
"argparse.ArgumentParser",
"random.randint",
"package_crawler_nba.crawler_nba.init",
"os.path.realpath",
"os.path.dirname",
"package_crawler_nba.crawler_nba.cur.fetchall",
"package_crawler_nba.crawler_nba.MySQLDBInitialize",
"package_crawler_nba.crawler_nba.cur.execute",
"package_crawler_nba.crawler_nba.MySQLDBClose",
"datetime.datetime.now"
] |
[((291, 309), 'package_crawler_nba.crawler_nba.init', 'crawler_nba.init', ([], {}), '()\n', (307, 309), True, 'import package_crawler_nba.crawler_nba as crawler_nba\n'), ((484, 558), 'package_crawler_nba.crawler_nba.MySQLDBInitialize', 'crawler_nba.MySQLDBInitialize', (['password', 'table', 'unix_socket', 'database_name'], {}), '(password, table, unix_socket, database_name)\n', (513, 558), True, 'import package_crawler_nba.crawler_nba as crawler_nba\n'), ((601, 624), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (622, 624), False, 'import datetime\n'), ((1703, 1734), 'package_crawler_nba.crawler_nba.cur.execute', 'crawler_nba.cur.execute', (['sql_ex'], {}), '(sql_ex)\n', (1726, 1734), True, 'import package_crawler_nba.crawler_nba as crawler_nba\n'), ((1749, 1775), 'package_crawler_nba.crawler_nba.cur.fetchall', 'crawler_nba.cur.fetchall', ([], {}), '()\n', (1773, 1775), True, 'import package_crawler_nba.crawler_nba as crawler_nba\n'), ((2192, 2251), 'package_crawler_nba.crawler_nba.MySQLDBClose', 'crawler_nba.MySQLDBClose', (['crawler_nba.cur', 'crawler_nba.conn'], {}), '(crawler_nba.cur, crawler_nba.conn)\n', (2216, 2251), True, 'import package_crawler_nba.crawler_nba as crawler_nba\n'), ((2483, 2508), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2506, 2508), False, 'import argparse\n'), ((3732, 3758), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3748, 3758), False, 'import os\n'), ((3784, 3817), 'os.path.dirname', 'os.path.dirname', (['this_script_path'], {}), '(this_script_path)\n', (3799, 3817), False, 'import os\n'), ((3951, 3988), 'sys.path.append', 'sys.path.append', (['crawler_nba_pkg_path'], {}), '(crawler_nba_pkg_path)\n', (3966, 3988), False, 'import sys\n'), ((685, 708), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (706, 708), False, 'import datetime\n'), ((1163, 1231), 'package_crawler_nba.crawler_nba.GetWikiLinksContent', 'crawler_nba.GetWikiLinksContent', (['choose_link', 'crawler_nba.cur', 'table'], {}), '(choose_link, crawler_nba.cur, table)\n', (1194, 1231), True, 'import package_crawler_nba.crawler_nba as crawler_nba\n'), ((1425, 1477), 'random.randint', 'random.randint', (['(0)', '(total_num_internal_links_loop - 1)'], {}), '(0, total_num_internal_links_loop - 1)\n', (1439, 1477), False, 'import random\n')]
|
import autolens as al
grid = al.Grid.uniform(shape_2d=(10, 10), pixel_scales=1.0)
aplt.Grid(grid=grid)
grid = al.Grid.uniform(shape_2d=(10, 10), pixel_scales=1.0, origin=(5.0, 5.0))
aplt.Grid(grid=grid, symmetric_around_centre=False)
|
[
"autolens.Grid.uniform"
] |
[((32, 84), 'autolens.Grid.uniform', 'al.Grid.uniform', ([], {'shape_2d': '(10, 10)', 'pixel_scales': '(1.0)'}), '(shape_2d=(10, 10), pixel_scales=1.0)\n', (47, 84), True, 'import autolens as al\n'), ((119, 190), 'autolens.Grid.uniform', 'al.Grid.uniform', ([], {'shape_2d': '(10, 10)', 'pixel_scales': '(1.0)', 'origin': '(5.0, 5.0)'}), '(shape_2d=(10, 10), pixel_scales=1.0, origin=(5.0, 5.0))\n', (134, 190), True, 'import autolens as al\n')]
|
import datetime
from app import db, mail
from app.models import Member, MemberInvite
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask_login import current_user, login_required, login_user
from flask_mail import Message
from .forms import MemberInviteForm, MemberJoinForm
members = Blueprint(
"members", __name__, subdomain="<org_username>", template_folder="templates"
)
@members.route("/")
@login_required
def index(org_username):
member_page = request.args.get("member_page", 1)
members = Member.query.filter_by(organization=current_user.organization).paginate(
int(member_page), 5
)
all_invites = MemberInvite.query.filter_by(
organization=current_user.organization
).all()
return render_template(
"members/index.html", all_invites=all_invites, members=members
)
@members.route("/invite/<invite_id>/remove", methods=["POST"])
@login_required
def remove_invite(org_username, invite_id):
invite = MemberInvite.query.filter_by(
organization=current_user.organization, id=invite_id
).first()
if invite is None:
flash("Unable to find invite to remove", "error")
else:
db.session.delete(invite)
db.session.commit()
flash("Invite removed successfully", "success")
return redirect(
url_for(
"members.index",
org_username=current_user.organization.username,
)
)
@members.route("/invite/", methods=["GET", "POST"])
@login_required
def invite(org_username):
member_invite = MemberInviteForm()
if member_invite.validate_on_submit():
email = member_invite.email.data.lower()
member = Member.query.filter_by(
email=email, organization=current_user.organization
).first()
if member:
flash("Email is already a member", "error")
return redirect(
url_for(
"members.index",
org_username=current_user.organization.username,
)
)
invite = MemberInvite.query.filter_by(
organization=current_user.organization, email=email
).first()
if invite:
flash("Invite already sent.", "warning")
return redirect(
url_for(
"members.index",
org_username=current_user.organization.username,
)
)
new_invite = MemberInvite(email=email, organization=current_user.organization)
db.session.add(new_invite)
db.session.commit()
invite_link = url_for(
".join",
org_username=current_user.organization.username,
token=new_invite.token,
_external=True,
)
msg = Message(
"Worktable organization join invite",
sender="<EMAIL>",
recipients=[new_invite.email],
)
msg.body = render_template(
"members/invite_mail.txt",
join_link=invite_link,
org_name=current_user.organization.name,
)
msg.html = render_template(
"members/invite_mail.html",
join_link=invite_link,
org_name=current_user.organization.name,
)
mail.send(msg)
flash("New member invite has been sent", "success")
return redirect(
url_for("members.index", org_username=current_user.organization.username)
)
return render_template("members/invite.html", form=member_invite)
@members.route("/join/<token>", methods=["GET", "POST"])
def join(org_username, token):
invite = MemberInvite.query.filter_by(token=token).first()
if not invite:
flash("Invalid invite link", "error")
return redirect(url_for("auth.login", org_username=org_username))
member_join = MemberJoinForm()
if member_join.validate_on_submit():
new_member = Member(
first_name=member_join.first_name.data,
last_name=member_join.last_name.data,
email=invite.email,
organization=invite.organization,
)
new_member.password = member_join.password.data
db.session.add(new_member)
db.session.delete(invite)
db.session.commit()
login_user(new_member)
flash("New member invite has been sent", "success")
return redirect(
url_for("dashboard.index", org_username=invite.organization.username)
)
return render_template(
"members/join.html", invite=invite, form=member_join, token=token
)
@members.route("/disable/", methods=["POST"])
@login_required
def disable_account(org_username):
member_id = request.form.get("member_id")
member = Member.query.filter_by(
id=member_id, organization=current_user.organization
).first()
if member is None:
flash("Member account is not found", "error")
elif member.disabled_at:
flash("Member account is already disabled", "error")
elif member == current_user:
flash("You can not disabled your own account", "error")
else:
member.disabled_at = datetime.datetime.utcnow()
db.session.commit()
flash("Member account has been disabled", "success")
redirect_url = url_for(".index", org_username=current_user.organization.username)
return redirect(redirect_url)
@members.route("/enable/", methods=["POST"])
@login_required
def enable_account(org_username):
member_id = request.form.get("member_id")
member = Member.query.filter_by(
id=member_id, organization=current_user.organization
).first()
if member is None:
flash("Member account is not found", "error")
elif member.disabled_at is None:
flash("Member account is already enabled", "error")
elif member == current_user:
flash("You can not enable your own account", "error")
else:
member.disabled_at = None
db.session.commit()
flash("Member account has been enabled", "success")
redirect_url = url_for(".index", org_username=current_user.organization.username)
return redirect(redirect_url)
|
[
"flask.flash",
"flask.Blueprint",
"app.models.Member",
"flask.request.args.get",
"flask.request.form.get",
"flask.redirect",
"flask_mail.Message",
"flask_login.login_user",
"app.models.MemberInvite.query.filter_by",
"datetime.datetime.utcnow",
"flask.url_for",
"app.db.session.delete",
"app.mail.send",
"app.db.session.commit",
"flask.render_template",
"app.models.MemberInvite",
"app.models.Member.query.filter_by",
"app.db.session.add"
] |
[((326, 418), 'flask.Blueprint', 'Blueprint', (['"""members"""', '__name__'], {'subdomain': '"""<org_username>"""', 'template_folder': '"""templates"""'}), "('members', __name__, subdomain='<org_username>', template_folder=\n 'templates')\n", (335, 418), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((501, 535), 'flask.request.args.get', 'request.args.get', (['"""member_page"""', '(1)'], {}), "('member_page', 1)\n", (517, 535), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((775, 854), 'flask.render_template', 'render_template', (['"""members/index.html"""'], {'all_invites': 'all_invites', 'members': 'members'}), "('members/index.html', all_invites=all_invites, members=members)\n", (790, 854), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((3532, 3590), 'flask.render_template', 'render_template', (['"""members/invite.html"""'], {'form': 'member_invite'}), "('members/invite.html', form=member_invite)\n", (3547, 3590), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((4550, 4637), 'flask.render_template', 'render_template', (['"""members/join.html"""'], {'invite': 'invite', 'form': 'member_join', 'token': 'token'}), "('members/join.html', invite=invite, form=member_join, token\n =token)\n", (4565, 4637), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((4762, 4791), 'flask.request.form.get', 'request.form.get', (['"""member_id"""'], {}), "('member_id')\n", (4778, 4791), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((5342, 5408), 'flask.url_for', 'url_for', (['""".index"""'], {'org_username': 'current_user.organization.username'}), "('.index', org_username=current_user.organization.username)\n", (5349, 5408), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((5420, 5442), 'flask.redirect', 'redirect', (['redirect_url'], {}), '(redirect_url)\n', (5428, 5442), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((5556, 5585), 'flask.request.form.get', 'request.form.get', (['"""member_id"""'], {}), "('member_id')\n", (5572, 5585), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((6118, 6184), 'flask.url_for', 'url_for', (['""".index"""'], {'org_username': 'current_user.organization.username'}), "('.index', org_username=current_user.organization.username)\n", (6125, 6184), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((6196, 6218), 'flask.redirect', 'redirect', (['redirect_url'], {}), '(redirect_url)\n', (6204, 6218), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1143, 1192), 'flask.flash', 'flash', (['"""Unable to find invite to remove"""', '"""error"""'], {}), "('Unable to find invite to remove', 'error')\n", (1148, 1192), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1211, 1236), 'app.db.session.delete', 'db.session.delete', (['invite'], {}), '(invite)\n', (1228, 1236), False, 'from app import db, mail\n'), ((1245, 1264), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1262, 1264), False, 'from app import db, mail\n'), ((1273, 1320), 'flask.flash', 'flash', (['"""Invite removed successfully"""', '"""success"""'], {}), "('Invite removed successfully', 'success')\n", (1278, 1320), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1350, 1423), 'flask.url_for', 'url_for', (['"""members.index"""'], {'org_username': 'current_user.organization.username'}), "('members.index', org_username=current_user.organization.username)\n", (1357, 1423), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((2497, 2562), 'app.models.MemberInvite', 'MemberInvite', ([], {'email': 'email', 'organization': 'current_user.organization'}), '(email=email, organization=current_user.organization)\n', (2509, 2562), False, 'from app.models import Member, MemberInvite\n'), ((2571, 2597), 'app.db.session.add', 'db.session.add', (['new_invite'], {}), '(new_invite)\n', (2585, 2597), False, 'from app import db, mail\n'), ((2606, 2625), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2623, 2625), False, 'from app import db, mail\n'), ((2648, 2758), 'flask.url_for', 'url_for', (['""".join"""'], {'org_username': 'current_user.organization.username', 'token': 'new_invite.token', '_external': '(True)'}), "('.join', org_username=current_user.organization.username, token=\n new_invite.token, _external=True)\n", (2655, 2758), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((2827, 2926), 'flask_mail.Message', 'Message', (['"""Worktable organization join invite"""'], {'sender': '"""<EMAIL>"""', 'recipients': '[new_invite.email]'}), "('Worktable organization join invite', sender='<EMAIL>', recipients=\n [new_invite.email])\n", (2834, 2926), False, 'from flask_mail import Message\n'), ((2988, 3099), 'flask.render_template', 'render_template', (['"""members/invite_mail.txt"""'], {'join_link': 'invite_link', 'org_name': 'current_user.organization.name'}), "('members/invite_mail.txt', join_link=invite_link, org_name=\n current_user.organization.name)\n", (3003, 3099), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((3161, 3273), 'flask.render_template', 'render_template', (['"""members/invite_mail.html"""'], {'join_link': 'invite_link', 'org_name': 'current_user.organization.name'}), "('members/invite_mail.html', join_link=invite_link, org_name\n =current_user.organization.name)\n", (3176, 3273), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((3324, 3338), 'app.mail.send', 'mail.send', (['msg'], {}), '(msg)\n', (3333, 3338), False, 'from app import db, mail\n'), ((3348, 3399), 'flask.flash', 'flash', (['"""New member invite has been sent"""', '"""success"""'], {}), "('New member invite has been sent', 'success')\n", (3353, 3399), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((3771, 3808), 'flask.flash', 'flash', (['"""Invalid invite link"""', '"""error"""'], {}), "('Invalid invite link', 'error')\n", (3776, 3808), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((3980, 4123), 'app.models.Member', 'Member', ([], {'first_name': 'member_join.first_name.data', 'last_name': 'member_join.last_name.data', 'email': 'invite.email', 'organization': 'invite.organization'}), '(first_name=member_join.first_name.data, last_name=member_join.\n last_name.data, email=invite.email, organization=invite.organization)\n', (3986, 4123), False, 'from app.models import Member, MemberInvite\n'), ((4242, 4268), 'app.db.session.add', 'db.session.add', (['new_member'], {}), '(new_member)\n', (4256, 4268), False, 'from app import db, mail\n'), ((4277, 4302), 'app.db.session.delete', 'db.session.delete', (['invite'], {}), '(invite)\n', (4294, 4302), False, 'from app import db, mail\n'), ((4311, 4330), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4328, 4330), False, 'from app import db, mail\n'), ((4339, 4361), 'flask_login.login_user', 'login_user', (['new_member'], {}), '(new_member)\n', (4349, 4361), False, 'from flask_login import current_user, login_required, login_user\n'), ((4370, 4421), 'flask.flash', 'flash', (['"""New member invite has been sent"""', '"""success"""'], {}), "('New member invite has been sent', 'success')\n", (4375, 4421), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((4935, 4980), 'flask.flash', 'flash', (['"""Member account is not found"""', '"""error"""'], {}), "('Member account is not found', 'error')\n", (4940, 4980), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((5729, 5774), 'flask.flash', 'flash', (['"""Member account is not found"""', '"""error"""'], {}), "('Member account is not found', 'error')\n", (5734, 5774), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((550, 612), 'app.models.Member.query.filter_by', 'Member.query.filter_by', ([], {'organization': 'current_user.organization'}), '(organization=current_user.organization)\n', (572, 612), False, 'from app.models import Member, MemberInvite\n'), ((675, 743), 'app.models.MemberInvite.query.filter_by', 'MemberInvite.query.filter_by', ([], {'organization': 'current_user.organization'}), '(organization=current_user.organization)\n', (703, 743), False, 'from app.models import Member, MemberInvite\n'), ((1007, 1094), 'app.models.MemberInvite.query.filter_by', 'MemberInvite.query.filter_by', ([], {'organization': 'current_user.organization', 'id': 'invite_id'}), '(organization=current_user.organization, id=\n invite_id)\n', (1035, 1094), False, 'from app.models import Member, MemberInvite\n'), ((1846, 1889), 'flask.flash', 'flash', (['"""Email is already a member"""', '"""error"""'], {}), "('Email is already a member', 'error')\n", (1851, 1889), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((2242, 2282), 'flask.flash', 'flash', (['"""Invite already sent."""', '"""warning"""'], {}), "('Invite already sent.', 'warning')\n", (2247, 2282), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((3437, 3510), 'flask.url_for', 'url_for', (['"""members.index"""'], {'org_username': 'current_user.organization.username'}), "('members.index', org_username=current_user.organization.username)\n", (3444, 3510), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((3694, 3735), 'app.models.MemberInvite.query.filter_by', 'MemberInvite.query.filter_by', ([], {'token': 'token'}), '(token=token)\n', (3722, 3735), False, 'from app.models import Member, MemberInvite\n'), ((3833, 3881), 'flask.url_for', 'url_for', (['"""auth.login"""'], {'org_username': 'org_username'}), "('auth.login', org_username=org_username)\n", (3840, 3881), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((4459, 4528), 'flask.url_for', 'url_for', (['"""dashboard.index"""'], {'org_username': 'invite.organization.username'}), "('dashboard.index', org_username=invite.organization.username)\n", (4466, 4528), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((4805, 4881), 'app.models.Member.query.filter_by', 'Member.query.filter_by', ([], {'id': 'member_id', 'organization': 'current_user.organization'}), '(id=member_id, organization=current_user.organization)\n', (4827, 4881), False, 'from app.models import Member, MemberInvite\n'), ((5018, 5070), 'flask.flash', 'flash', (['"""Member account is already disabled"""', '"""error"""'], {}), "('Member account is already disabled', 'error')\n", (5023, 5070), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((5599, 5675), 'app.models.Member.query.filter_by', 'Member.query.filter_by', ([], {'id': 'member_id', 'organization': 'current_user.organization'}), '(id=member_id, organization=current_user.organization)\n', (5621, 5675), False, 'from app.models import Member, MemberInvite\n'), ((5820, 5871), 'flask.flash', 'flash', (['"""Member account is already enabled"""', '"""error"""'], {}), "('Member account is already enabled', 'error')\n", (5825, 5871), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1709, 1784), 'app.models.Member.query.filter_by', 'Member.query.filter_by', ([], {'email': 'email', 'organization': 'current_user.organization'}), '(email=email, organization=current_user.organization)\n', (1731, 1784), False, 'from app.models import Member, MemberInvite\n'), ((1935, 2008), 'flask.url_for', 'url_for', (['"""members.index"""'], {'org_username': 'current_user.organization.username'}), "('members.index', org_username=current_user.organization.username)\n", (1942, 2008), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((2099, 2185), 'app.models.MemberInvite.query.filter_by', 'MemberInvite.query.filter_by', ([], {'organization': 'current_user.organization', 'email': 'email'}), '(organization=current_user.organization, email=\n email)\n', (2127, 2185), False, 'from app.models import Member, MemberInvite\n'), ((2328, 2401), 'flask.url_for', 'url_for', (['"""members.index"""'], {'org_username': 'current_user.organization.username'}), "('members.index', org_username=current_user.organization.username)\n", (2335, 2401), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((5112, 5167), 'flask.flash', 'flash', (['"""You can not disabled your own account"""', '"""error"""'], {}), "('You can not disabled your own account', 'error')\n", (5117, 5167), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((5207, 5233), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (5231, 5233), False, 'import datetime\n'), ((5242, 5261), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (5259, 5261), False, 'from app import db, mail\n'), ((5270, 5322), 'flask.flash', 'flash', (['"""Member account has been disabled"""', '"""success"""'], {}), "('Member account has been disabled', 'success')\n", (5275, 5322), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((5913, 5966), 'flask.flash', 'flash', (['"""You can not enable your own account"""', '"""error"""'], {}), "('You can not enable your own account', 'error')\n", (5918, 5966), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((6019, 6038), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (6036, 6038), False, 'from app import db, mail\n'), ((6047, 6098), 'flask.flash', 'flash', (['"""Member account has been enabled"""', '"""success"""'], {}), "('Member account has been enabled', 'success')\n", (6052, 6098), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n')]
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List
import apache_beam as beam
from apache_beam.io.filesystems import FileSystems
from sideinput_refresh import util
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(beam.pvalue.TaggedOutput)
class SplitToMultiple(beam.DoFn):
"""Generates a base path for each side input type combining root path received via file notification subscription
and side input type. PCollection recieved will contain only single element representing base path and will
be fired once every x hours matching the side input refresh frequency
Attributes:
sideinput_types: List of Side input types
file_prefix: file_prefix matching required files. Default is * indicating all files
"""
def __init__(self, sideinput_types: List[str], file_prefix: str = "*"):
self.sideinput_types = sideinput_types
self.file_prefix = file_prefix
def process(self,
element,
timestamp=beam.DoFn.TimestampParam,
window=beam.DoFn.WindowParam,
pane_info=beam.DoFn.PaneInfoParam):
# Logging to audit triggering of side input refresh process. Statement will be logged only whenever the pubsub notification
# triggers side input refresh process (i.e normally once in every x hours)
if isinstance(window, beam.transforms.window.GlobalWindow):
logging.info(
f"(Re)loading side input data from basepath {element.decode()} for global window: {timestamp} - {window}"
)
else:
logging.info(
f"(Re)loading side input data from basepath {element.decode()} for window: {util.get_formatted_time(window.start)} - {util.get_formatted_time(window.end)}"
)
for sideinput_type in self.sideinput_types:
yield beam.pvalue.TaggedOutput(
sideinput_type,
FileSystems.join(element.decode(), sideinput_type,
self.file_prefix))
|
[
"sideinput_refresh.util.get_formatted_time",
"apache_beam.typehints.with_input_types",
"apache_beam.typehints.with_output_types"
] |
[((734, 772), 'apache_beam.typehints.with_input_types', 'beam.typehints.with_input_types', (['bytes'], {}), '(bytes)\n', (765, 772), True, 'import apache_beam as beam\n'), ((774, 832), 'apache_beam.typehints.with_output_types', 'beam.typehints.with_output_types', (['beam.pvalue.TaggedOutput'], {}), '(beam.pvalue.TaggedOutput)\n', (806, 832), True, 'import apache_beam as beam\n'), ((2287, 2324), 'sideinput_refresh.util.get_formatted_time', 'util.get_formatted_time', (['window.start'], {}), '(window.start)\n', (2310, 2324), False, 'from sideinput_refresh import util\n'), ((2329, 2364), 'sideinput_refresh.util.get_formatted_time', 'util.get_formatted_time', (['window.end'], {}), '(window.end)\n', (2352, 2364), False, 'from sideinput_refresh import util\n')]
|
from flask import (
render_template,
request,
redirect,
url_for)
from flask_login import login_user, logout_user
from app.helpers import BaseView
from .models import LoginForm, User
class AuthView(BaseView):
def dispatch_request(self):
form = LoginForm(request.form)
if request.method == 'GET':
return render_template(
self.template_name,
form=form)
user = User.get_username(form.username.data)
print(user.check_password(form.password.data))
if user and user.check_password(form.password.data):
login_user(user, form.remember.data)
return redirect(
url_for('dashboard_bp.dashboard_page')
)
return redirect(url_for('auth_bp.auth_view'))
class LogoutView(BaseView):
def dispatch_request(self):
logout_user()
return redirect(
url_for('auth_bp.auth_view')
)
|
[
"flask.render_template",
"flask.url_for",
"flask_login.login_user",
"flask_login.logout_user"
] |
[((872, 885), 'flask_login.logout_user', 'logout_user', ([], {}), '()\n', (883, 885), False, 'from flask_login import login_user, logout_user\n'), ((352, 398), 'flask.render_template', 'render_template', (['self.template_name'], {'form': 'form'}), '(self.template_name, form=form)\n', (367, 398), False, 'from flask import render_template, request, redirect, url_for\n'), ((613, 649), 'flask_login.login_user', 'login_user', (['user', 'form.remember.data'], {}), '(user, form.remember.data)\n', (623, 649), False, 'from flask_login import login_user, logout_user\n'), ((772, 800), 'flask.url_for', 'url_for', (['"""auth_bp.auth_view"""'], {}), "('auth_bp.auth_view')\n", (779, 800), False, 'from flask import render_template, request, redirect, url_for\n'), ((923, 951), 'flask.url_for', 'url_for', (['"""auth_bp.auth_view"""'], {}), "('auth_bp.auth_view')\n", (930, 951), False, 'from flask import render_template, request, redirect, url_for\n'), ((695, 733), 'flask.url_for', 'url_for', (['"""dashboard_bp.dashboard_page"""'], {}), "('dashboard_bp.dashboard_page')\n", (702, 733), False, 'from flask import render_template, request, redirect, url_for\n')]
|
import pandas as pd
import re
import nltk
import spacy
import torch
from nltk.corpus import stopwords
from cleantext import clean
from ekphrasis.classes.preprocessor import TextPreProcessor
from torch.utils.data import Dataset
# Params for
clean_text_param = {
"lower":False, # lowercase text
"no_line_breaks":True, # fully strip line breaks as opposed to only normalizing them
"no_urls":False, # replace all URLs with a special token
"no_emails":False, # replace all email addresses with a special token
"no_phone_numbers":False, # replace all phone numbers with a special token
"no_numbers":False, # replace all numbers with a special token
"no_digits":False, # replace all digits with a special token
"no_currency_symbols":True, # replace all currency symbols with a special token
"no_punct":True, # remove punctuations
"replace_with_punct":"", # instead of removing punctuations you may replace them
"replace_with_number":"",
"replace_with_digit":"",
"replace_with_currency_symbol":"",
"lang":"en" # set to 'de' for German special handling
}
nlp = spacy.load('en_core_web_sm')
class Task1Dataset(Dataset):
def __init__(self, train_data, labels):
self.x_train = train_data
self.y_train = labels
def __len__(self):
return len(self.y_train)
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.x_train.items()}
item['labels'] = torch.tensor(self.y_train[idx], dtype=torch.float)
return item
class Preprocessor:
@staticmethod
def PreprocessorBuilder():
return Preprocessor()
def __init__(self):
self.transformations = []
self.text_processor = TextPreProcessor(
fix_html=True, # fix HTML tokens
# corpus from which the word statistics are going to be used
# for word segmentation
segmenter="english",
# corpus from which the word statistics are going to be used
# for spell correction
corrector="english",
unpack_hashtags=False, # perform word segmentation on hashtags
unpack_contractions=False, # Unpack contractions (can't -> can not)
spell_correct=True, # spell correction for elongated words
)
self.punct = "[\.,:;\(\)\[\]@\-\$£]"
nltk.download('stopwords')
self.stops = stopwords.words('english')
self.nlp = spacy.load('en_core_web_lg')
def _capitalisation_by_ner(self, sentence, entities=['GPE', 'ORG', 'NORP', 'PERSON']):
edited_row = []
trial_doc = self.nlp(sentence)
for tok in trial_doc:
if tok.ent_type_ in entities:
edited_row.append(tok.text)
else:
edited_row.append(tok.text.lower())
return ' '.join(edited_row)
def with_word_replacement(self):
self.transformations.append(("apply", {"func": (lambda x: re.subn("<.*/>", x[1], x[0])[0]), "axis":1}))
return self
def with_capitalisation_by_ner(self):
self.transformations.append(("apply", {"func": (lambda x: self._capitalisation_by_ner(x))}))
return self
def with_joining_contraction_tokens(self):
self.transformations.append(("str.replace", {"pat": " (?P<one>\w*'\w+)", "repl": (lambda x: x.group("one"))}))
return self
def with_spell_check(self):
self.transformations.append(("apply", {"func": (lambda x: self.text_processor.pre_process_doc(x))}))
return self
def with_space_after_hashtags(self):
self.transformations.append(("str.replace", {"pat": "#", "repl": "# "}))
return self
def with_ascii_quotes_replacement(self):
self.transformations.append(("str.replace", {"pat": "[‘’]", "repl": "'"}))
return self
def with_possessive_elimination(self):
self.transformations.append(("str.replace", {"pat": "'s", "repl": ""}))
return self
def with_punct_removal(self):
self.transformations.append(("str.replace", {"pat": self.punct, "repl": "'"}))
return self
def with_digit_removal(self):
self.transformations.append(("str.replace", {"pat": "[0-9]", "repl": ""}))
return self
def with_stopwords_removal(self):
self.transformations.append(("apply", {"func": (lambda x: " ".join([w for w in x.split(" ") if w not in self.stops]))}))
return self
def build(self):
return self
def preprocess(self, df, clean_col_name='edited_sentence'):
_df = pd.DataFrame(index=df.index, columns=[clean_col_name, 'meanGrade'])
_df['meanGrade'] = df.meanGrade
transformed_cols = df[['original', 'edit']]
for (func, params) in self.transformations:
func_to_apply = transformed_cols
for f in func.split("."):
print(f)
func_to_apply = getattr(func_to_apply, f)
transformed_cols = func_to_apply(**params)
_df[clean_col_name] = transformed_cols
return _df, clean_col_name
|
[
"pandas.DataFrame",
"ekphrasis.classes.preprocessor.TextPreProcessor",
"re.subn",
"spacy.load",
"nltk.corpus.stopwords.words",
"nltk.download",
"torch.tensor"
] |
[((1261, 1289), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {}), "('en_core_web_sm')\n", (1271, 1289), False, 'import spacy\n'), ((1626, 1676), 'torch.tensor', 'torch.tensor', (['self.y_train[idx]'], {'dtype': 'torch.float'}), '(self.y_train[idx], dtype=torch.float)\n', (1638, 1676), False, 'import torch\n'), ((1888, 2035), 'ekphrasis.classes.preprocessor.TextPreProcessor', 'TextPreProcessor', ([], {'fix_html': '(True)', 'segmenter': '"""english"""', 'corrector': '"""english"""', 'unpack_hashtags': '(False)', 'unpack_contractions': '(False)', 'spell_correct': '(True)'}), "(fix_html=True, segmenter='english', corrector='english',\n unpack_hashtags=False, unpack_contractions=False, spell_correct=True)\n", (1904, 2035), False, 'from ekphrasis.classes.preprocessor import TextPreProcessor\n'), ((2537, 2563), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (2550, 2563), False, 'import nltk\n'), ((2585, 2611), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2600, 2611), False, 'from nltk.corpus import stopwords\n'), ((2632, 2660), 'spacy.load', 'spacy.load', (['"""en_core_web_lg"""'], {}), "('en_core_web_lg')\n", (2642, 2660), False, 'import spacy\n'), ((4781, 4848), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df.index', 'columns': "[clean_col_name, 'meanGrade']"}), "(index=df.index, columns=[clean_col_name, 'meanGrade'])\n", (4793, 4848), True, 'import pandas as pd\n'), ((1540, 1562), 'torch.tensor', 'torch.tensor', (['val[idx]'], {}), '(val[idx])\n', (1552, 1562), False, 'import torch\n'), ((3165, 3193), 're.subn', 're.subn', (['"""<.*/>"""', 'x[1]', 'x[0]'], {}), "('<.*/>', x[1], x[0])\n", (3172, 3193), False, 'import re\n')]
|
"""projecto URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path
from app import views
urlpatterns = [
path('admin/', admin.site.urls),
path('index/', views.index, name='index'),
path('', views.index, name='index'),
path('login/', views.login, name='login'),
path('signup/', views.signup, name='signup'),
path('profile/', views.profile, name='profile'),
path('wishlist/', views.wishlist, name='wishlist'),
path('about/', views.about, name='about'),
path('model/', views.model, name='model'),
path('add_announce/', views.add_announce, name='add_announce'),
path('deleteAccount/', views.deleteAccount, name='deleteAccount'),
path('remove_announcement/', views.remove_announcement, name="remove_announcement"),
path('fav/', views.fav, name="fav"),
path('rev/', views.rev, name="rev"),
]
|
[
"django.urls.path"
] |
[((747, 778), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (751, 778), False, 'from django.urls import path, re_path\n'), ((784, 825), 'django.urls.path', 'path', (['"""index/"""', 'views.index'], {'name': '"""index"""'}), "('index/', views.index, name='index')\n", (788, 825), False, 'from django.urls import path, re_path\n'), ((831, 866), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (835, 866), False, 'from django.urls import path, re_path\n'), ((872, 913), 'django.urls.path', 'path', (['"""login/"""', 'views.login'], {'name': '"""login"""'}), "('login/', views.login, name='login')\n", (876, 913), False, 'from django.urls import path, re_path\n'), ((919, 963), 'django.urls.path', 'path', (['"""signup/"""', 'views.signup'], {'name': '"""signup"""'}), "('signup/', views.signup, name='signup')\n", (923, 963), False, 'from django.urls import path, re_path\n'), ((969, 1016), 'django.urls.path', 'path', (['"""profile/"""', 'views.profile'], {'name': '"""profile"""'}), "('profile/', views.profile, name='profile')\n", (973, 1016), False, 'from django.urls import path, re_path\n'), ((1022, 1072), 'django.urls.path', 'path', (['"""wishlist/"""', 'views.wishlist'], {'name': '"""wishlist"""'}), "('wishlist/', views.wishlist, name='wishlist')\n", (1026, 1072), False, 'from django.urls import path, re_path\n'), ((1078, 1119), 'django.urls.path', 'path', (['"""about/"""', 'views.about'], {'name': '"""about"""'}), "('about/', views.about, name='about')\n", (1082, 1119), False, 'from django.urls import path, re_path\n'), ((1125, 1166), 'django.urls.path', 'path', (['"""model/"""', 'views.model'], {'name': '"""model"""'}), "('model/', views.model, name='model')\n", (1129, 1166), False, 'from django.urls import path, re_path\n'), ((1172, 1234), 'django.urls.path', 'path', (['"""add_announce/"""', 'views.add_announce'], {'name': '"""add_announce"""'}), "('add_announce/', views.add_announce, name='add_announce')\n", (1176, 1234), False, 'from django.urls import path, re_path\n'), ((1240, 1305), 'django.urls.path', 'path', (['"""deleteAccount/"""', 'views.deleteAccount'], {'name': '"""deleteAccount"""'}), "('deleteAccount/', views.deleteAccount, name='deleteAccount')\n", (1244, 1305), False, 'from django.urls import path, re_path\n'), ((1311, 1399), 'django.urls.path', 'path', (['"""remove_announcement/"""', 'views.remove_announcement'], {'name': '"""remove_announcement"""'}), "('remove_announcement/', views.remove_announcement, name=\n 'remove_announcement')\n", (1315, 1399), False, 'from django.urls import path, re_path\n'), ((1400, 1435), 'django.urls.path', 'path', (['"""fav/"""', 'views.fav'], {'name': '"""fav"""'}), "('fav/', views.fav, name='fav')\n", (1404, 1435), False, 'from django.urls import path, re_path\n'), ((1441, 1476), 'django.urls.path', 'path', (['"""rev/"""', 'views.rev'], {'name': '"""rev"""'}), "('rev/', views.rev, name='rev')\n", (1445, 1476), False, 'from django.urls import path, re_path\n')]
|
from models.basemodel import BaseModel
from sklearn import model_selection, svm
class SupportVectorMachineRegressionPoly3(BaseModel):
def _train(self, X_train, y_train):
parametres = {'gamma': [0.01, 0.1, 1], 'C': [1, 10, 100], 'degree': [2,3,4,5,6]}
grid_search = model_selection.GridSearchCV(svm.SVR(kernel="poly" ), parametres, n_jobs=6)
grid_search = grid_search.fit(X_train, y_train)
return grid_search
def compute_and_output_r2_metric(self, trained_grid_search, y_train, y_train_pred, y_test, y_test_pred):
self._printResults(y_train, y_train_pred, y_test, y_test_pred, str(trained_grid_search.best_params_))
|
[
"sklearn.svm.SVR"
] |
[((317, 339), 'sklearn.svm.SVR', 'svm.SVR', ([], {'kernel': '"""poly"""'}), "(kernel='poly')\n", (324, 339), False, 'from sklearn import model_selection, svm\n')]
|
# Mathematics > Number Theory > John and GCD list
# Help John in making a list from GCD list
#
# https://www.hackerrank.com/challenges/john-and-gcd-list/problem
#
import math
import functools
def gcd(*numbers):
""" greatest common divisor """
return functools.reduce(math.gcd, numbers)
def lcm(*numbers):
""" least common multiple """
return functools.reduce(lambda a, b: (a * b) // gcd(a, b), numbers, 1)
# la réponse est le ppcm "glissant" des Ai
for _ in range(int(input())):
n = int(input())
A = list(map(int, input().split()))
x = A[0]
B = [x]
i = 1
while i < n:
y = A[i]
B.append(lcm(x, y))
x = y
i += 1
B.append(A[-1])
print(*B)
|
[
"functools.reduce"
] |
[((261, 296), 'functools.reduce', 'functools.reduce', (['math.gcd', 'numbers'], {}), '(math.gcd, numbers)\n', (277, 296), False, 'import functools\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import melopero_RV_3028 as mp
import time
def main():
# First initialize and create the rtc device
rtc = mp.RV_3028()
# setup the rtc to use the eeprom memory (disables the automatic configuration refresh)
rtc.use_eeprom()
my_reg_address = 0x00
my_data = 0x42
# to write to ram registers you must use rtc.write_register
# to write to eeprom you must use rtc.write_eeprom_register
# user eeprom address space : [0x00 - 0x2A]
# configuration eeprom address space : [0x30 - 0x37]
rtc.write_eeprom_register(register_address=my_reg_address, value=my_data)
print("Saved {} at address {} in eeprom".format(my_data, my_reg_address))
# give some time to execute writing operation
time.sleep(1)
# to read from ram registers you must use rtc.read_register
# to write to eeprom you must use rtc.read_eeprom_register
# user eeprom address space : [0x00 - 0x2A]
# configuration eeprom address space : [0x30 - 0x37]
my_saved_data = rtc.read_eeprom_register(register_address=my_reg_address)
print("Read {} from eeprom address {}".format(my_saved_data, my_reg_address))
if __name__ == "__main__":
main()
|
[
"melopero_RV_3028.RV_3028",
"time.sleep"
] |
[((186, 198), 'melopero_RV_3028.RV_3028', 'mp.RV_3028', ([], {}), '()\n', (196, 198), True, 'import melopero_RV_3028 as mp\n'), ((805, 818), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (815, 818), False, 'import time\n')]
|
# Generated by Django 3.1.13 on 2021-11-28 15:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('clinical_annotations_manager', '0017_auto_20210424_1321'),
]
operations = [
migrations.RenameField(
model_name='coreannotation',
old_name='creation_start_date',
new_name='action_start_time',
),
migrations.RenameField(
model_name='focusregionannotation',
old_name='creation_start_date',
new_name='action_start_time',
),
migrations.RenameField(
model_name='gleasonelement',
old_name='creation_start_date',
new_name='action_start_time',
),
migrations.RenameField(
model_name='sliceannotation',
old_name='creation_start_date',
new_name='action_start_time',
),
]
|
[
"django.db.migrations.RenameField"
] |
[((249, 367), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""coreannotation"""', 'old_name': '"""creation_start_date"""', 'new_name': '"""action_start_time"""'}), "(model_name='coreannotation', old_name=\n 'creation_start_date', new_name='action_start_time')\n", (271, 367), False, 'from django.db import migrations\n'), ((419, 544), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""focusregionannotation"""', 'old_name': '"""creation_start_date"""', 'new_name': '"""action_start_time"""'}), "(model_name='focusregionannotation', old_name=\n 'creation_start_date', new_name='action_start_time')\n", (441, 544), False, 'from django.db import migrations\n'), ((596, 714), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""gleasonelement"""', 'old_name': '"""creation_start_date"""', 'new_name': '"""action_start_time"""'}), "(model_name='gleasonelement', old_name=\n 'creation_start_date', new_name='action_start_time')\n", (618, 714), False, 'from django.db import migrations\n'), ((766, 885), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""sliceannotation"""', 'old_name': '"""creation_start_date"""', 'new_name': '"""action_start_time"""'}), "(model_name='sliceannotation', old_name=\n 'creation_start_date', new_name='action_start_time')\n", (788, 885), False, 'from django.db import migrations\n')]
|
"""
192.168.30.22 hostA.localdomain # hostA
192.168.30.33 hostB.localdomain # hostB
192.168.30.44 hostC.localdomain # hostB
"""
"""
groups = [{"hostname": "hostA","ip": "192.168.30.22", "fqdn": "hostA.localdomain"},
{"hostname": "hostB", "ip": "192.168.30.33", "fqdn": "hostB.localdomain"},
{"hostname": "hostC", "ip": "192.168.30.44", "fqdn": "hostC.localdomain"}]
"""
from flask import Flask, request, redirect, url_for, session, render_template
# from flask import request
# from flask import redirect
# from flask import url_for
# from flask import session
# from flask import render_template
app = Flask(__name__)
app.secret_key= "random random RANDOM!"
groups = [{"hostname": "hostA","ip": "192.168.30.22", "fqdn": "hostA.localdomain"},
{"hostname": "hostB", "ip": "192.168.30.33", "fqdn": "hostB.localdomain"},
{"hostname": "hostC", "ip": "192.168.30.44", "fqdn": "hostC.localdomain"}]
@app.route("/", methods= ["GET","POST"])
def hosts():
# GET returns the rendered hosts
# POST adds new hosts, then returns rendered hosts
if "username" in session and session["username"] == "admin":
if request.method == "POST":
# pull all values from posted form
hostname = request.form.get("hostname")
ip = request.form.get("ip")
fqdn = request.form.get("fqdn")
# create a new dictionary with values, add to groups
groups.append({"hostname": hostname, "ip": ip, "fqdn": fqdn})
return render_template("hosts.j2", groups=groups)
@app.route("/form", methods=["GET","POST"])
def form():
# HTML form that collects hostname, ip, and fqdn values
if request.method == "POST":
session["username"] = request.form.get("username")
if "username" in session and session["username"] == "admin":
return render_template("formcollector.html.j2")
else:
return """
<form action = "" method = "post">
<p>Invalid Login.</p>
<p><input type = text name = username></p>
<p><input type = submit value = Login></p>
</form>
"""
@app.route("/logout")
def logout():
# accessing this page pops the value of username of the session
session.pop("username", None)
return redirect("/")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=2224)
|
[
"flask.session.pop",
"flask.redirect",
"flask.request.form.get",
"flask.Flask",
"flask.render_template"
] |
[((628, 643), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (633, 643), False, 'from flask import Flask, request, redirect, url_for, session, render_template\n'), ((1522, 1564), 'flask.render_template', 'render_template', (['"""hosts.j2"""'], {'groups': 'groups'}), "('hosts.j2', groups=groups)\n", (1537, 1564), False, 'from flask import Flask, request, redirect, url_for, session, render_template\n'), ((2214, 2243), 'flask.session.pop', 'session.pop', (['"""username"""', 'None'], {}), "('username', None)\n", (2225, 2243), False, 'from flask import Flask, request, redirect, url_for, session, render_template\n'), ((2255, 2268), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (2263, 2268), False, 'from flask import Flask, request, redirect, url_for, session, render_template\n'), ((1745, 1773), 'flask.request.form.get', 'request.form.get', (['"""username"""'], {}), "('username')\n", (1761, 1773), False, 'from flask import Flask, request, redirect, url_for, session, render_template\n'), ((1854, 1894), 'flask.render_template', 'render_template', (['"""formcollector.html.j2"""'], {}), "('formcollector.html.j2')\n", (1869, 1894), False, 'from flask import Flask, request, redirect, url_for, session, render_template\n'), ((1259, 1287), 'flask.request.form.get', 'request.form.get', (['"""hostname"""'], {}), "('hostname')\n", (1275, 1287), False, 'from flask import Flask, request, redirect, url_for, session, render_template\n'), ((1305, 1327), 'flask.request.form.get', 'request.form.get', (['"""ip"""'], {}), "('ip')\n", (1321, 1327), False, 'from flask import Flask, request, redirect, url_for, session, render_template\n'), ((1347, 1371), 'flask.request.form.get', 'request.form.get', (['"""fqdn"""'], {}), "('fqdn')\n", (1363, 1371), False, 'from flask import Flask, request, redirect, url_for, session, render_template\n')]
|
import numpy as np
import tensorflow as tf
from read_data import get_X_y
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pickle
class NN():
def __init__(self, batch_size = 300, graph = tf.get_default_graph(),test_size = 0.1, steps_back=8, num_TCL=30):
self.num_TCL = num_TCL
with graph.as_default():
# Training Parameters
self.learning_rate = 0.1
self.num_steps = 100000
self.steps_back = steps_back
self.batch_size = batch_size
if batch_size==1:
self.test_proportion = 0
else:
self.test_proportion = test_size
self.batch_tr_size = int(self.batch_size * (1 - self.test_proportion))
self.test_size = int(self.test_proportion*self.batch_size)
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') # dropout (keep probability)
# display_step = 10
# Network Parameters
self.cnn_num_input = num_TCL # MNIST data input
self.fc_num_input = 4
self.num_output = 1 # MNIST total classes (0-9 digits)
self.dropout = 0.85 # Dropout, probability to keep units
# Placeholders
self.Xb = tf.placeholder(tf.float32, [self.batch_tr_size, self.steps_back, self.cnn_num_input],name='Xb')
self.Xe = tf.placeholder(tf.float32, [self.batch_tr_size, 1, 4], name='Xe')
self.Y = tf.placeholder(tf.float32, [self.batch_tr_size, self.num_output], name='Y')
if self.test_proportion != 0:
# Test Placeholders
self.Xb_test = tf.placeholder(tf.float32, [self.test_size, self.steps_back, self.cnn_num_input],name='Xb_test')
self.Xe_test = tf.placeholder(tf.float32, [self.test_size, 1, 4], name='Xe_test')
self.Y_test = tf.placeholder(tf.float32, [self.test_size, self.num_output], name='Y_test')
# Store layers weight & bias
self.weights = {
# 5x5 conv
'wc1': tf.Variable(tf.random_normal([2, 8, 1, 32])),
# 5x5 conv, 32 inputs, 64 outputs
'wc2': tf.Variable(tf.random_normal([2, 8, 32, 64])),
# fully connected for cnn
'wd1': tf.Variable(tf.random_normal([self.steps_back*self.cnn_num_input*64//4, 1024])),
'wd11': tf.Variable(tf.random_normal([1024, 20])),
# fully connected for fl_net,
'wd2': tf.Variable(tf.random_normal([4, 20])),
# 1024+10 inputs, 1 output (class prediction)
'out': tf.Variable(tf.random_normal([20+20, 50])),
# second fuly connected layer 100 inputs and 1 output
'out2': tf.Variable(tf.random_normal([50, self.num_output]))
}
self.biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bd1': tf.Variable(tf.random_normal([1024])),
'bd11': tf.Variable(tf.random_normal([20])),
'bd2': tf.Variable(tf.random_normal([20])),
'out': tf.Variable(tf.random_normal([50])),
'out2': tf.Variable(tf.random_normal([self.num_output]))
}
# Create some wrappers for simplicity
def conv2d(self, x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(self, x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(self,xb):
xb = tf.reshape(xb, shape=[-1, self.steps_back, self.num_TCL, 1])
# Convolution Layer
conv1 = self.conv2d(xb, self.weights['wc1'],self.biases['bc1'])
# Max Pooling (down-sampling)
conv1 = self.maxpool2d(conv1, k=2)
# Convolution Layer
conv2 = self.conv2d(conv1, self.weights['wc2'], self.biases['bc2'])
# Max Pooling (down-sampling)
# conv2 = self.maxpool2d(conv2, k=2)
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
conv2_reshaped = tf.reshape(conv2, [-1, self.weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(conv2_reshaped, self.weights['wd1']), self.biases['bd1'])
fc1_relued = tf.nn.relu(fc1)
fc11 = tf.add(tf.matmul(fc1_relued, self.weights['wd11']), self.biases['bd11'])
fc11_relued = tf.nn.relu(fc11)
## Apply Dropout
return tf.nn.dropout(fc11_relued, self.keep_prob)
def fc_net(self,xe):
xe = tf.reshape(xe, shape=[-1, self.weights['wd2'].get_shape().as_list()[0]])
fc2 = tf.add(tf.matmul(xe, self.weights['wd2']), self.biases['bd2'])
return tf.nn.relu(fc2)
def combined_net(self, graph = tf.get_default_graph()):
with graph.as_default():
conv_component = self.conv_net(self.Xb)
fc_component = self.fc_net(self.Xe)
# concatenate the to components
fc = tf.concat([conv_component,fc_component], axis=1)
# another fc net with sigmoid
fc3 = tf.add(tf.matmul(fc, self.weights['out']), self.biases['out'])
fc3_sigmoided = tf.nn.sigmoid(fc3)
#linear fc
prediction = tf.add(tf.matmul(fc3_sigmoided, self.weights['out2']), self.biases['out2'], name="prediction")
# Define loss and optimizer
loss_op = tf.losses.mean_squared_error(predictions = prediction ,labels = self.Y)
optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate)
train_op = optimizer.minimize(loss_op,name="train_op")
if self.test_proportion != 0:
# Test graph
conv_component_test = self.conv_net(graph.get_tensor_by_name("Xb_test:0"))
fc_component_test = self.fc_net(graph.get_tensor_by_name("Xe_test:0"))
# concatenate the to components
fc_test = tf.concat([conv_component_test, fc_component_test], axis=1)
# another fc net with sigmoid
fc3_test = tf.add(tf.matmul(fc_test, self.weights['out']), self.biases['out'])
fc3_sigmoided_test = tf.nn.sigmoid(fc3_test)
# linear fc
prediction_test = tf.add(tf.matmul(fc3_sigmoided_test, self.weights['out2']), self.biases['out2'], name="prediction_test")
loss_op_test = tf.losses.mean_squared_error(predictions=prediction_test, labels=self.Y_test)
def run_sess(self, sess, batch_xb, batch_xe, batch_y, saver, name):
graph = sess.graph
batch_xe = np.reshape(batch_xe,[-1,1,self.fc_num_input])
batch_xb = np.reshape(batch_xb, [-1, self.steps_back, self.cnn_num_input])
batch_y = np.reshape(batch_y,[-1,self.num_output])
batch_tr_xe = batch_xe[:self.batch_tr_size]
batch_test_xe = batch_xe[self.batch_tr_size:]
batch_tr_xb = batch_xb[:self.batch_tr_size]
batch_test_xb = batch_xb[self.batch_tr_size:]
batch_tr_y = batch_y[:self.batch_tr_size]
batch_test_y = batch_y[self.batch_tr_size:]
overfitting=0
for step in range(1, self.num_steps + 1):
# Run optimization op (backprop)
sess.run("train_op", feed_dict={graph.get_tensor_by_name("Xb:0"): batch_tr_xb,
graph.get_tensor_by_name("Xe:0"): batch_tr_xe,
graph.get_tensor_by_name("Y:0"): batch_tr_y,
graph.get_tensor_by_name("keep_prob:0"): self.dropout})
# Calculate batch loss
training_l = sess.run("mean_squared_error/value:0",
feed_dict={graph.get_tensor_by_name("Xb:0"): batch_tr_xb,
graph.get_tensor_by_name("Xe:0"): batch_tr_xe,
graph.get_tensor_by_name("Y:0"): batch_tr_y,
graph.get_tensor_by_name("keep_prob:0"): 1.0})
test_l = sess.run("mean_squared_error_1/value:0",
feed_dict={graph.get_tensor_by_name("Xb_test:0"): batch_test_xb,
graph.get_tensor_by_name("Xe_test:0"): batch_test_xe,
graph.get_tensor_by_name("Y_test:0"): batch_test_y,
graph.get_tensor_by_name("keep_prob:0"): 1.0})
if step % 10 == 0 or step == 1:
print("Step " + str(step) + ", Minibatch training Loss= " + str(training_l))
print("Step " + str(step) + ", Minibatch validation Loss= " + str(test_l))
if test_l - training_l> 0.015:
overfitting += 1
else: overfitting = 0
if overfitting >= 30 and training_l <= 0.01 :
print("condition satisfied")
break
if test_l < 0.009 and training_l < 0.009 :
print("condition satisfied")
break
# self.training_loss.append(training_l)
# self.validation_loss.append(test_l)
print("Optimization Finished!")
# Save the variables to disk.
save_path = saver.save(sess, name)
print("Model saved in path: %s" % save_path)
def train(self,xb, xe, y, name = "./model0.ckpt", graph = tf.get_default_graph() ):
self.training_loss = []
self.validation_loss = []
with tf.Session(graph=graph) as sess:
saver = tf.train.Saver()
try:
saver.restore(sess, name)
except:
sess.run(tf.global_variables_initializer())
for i in range(xb.shape[0]//self.batch_size):
# Run the initializer
index = i*self.batch_size
self.run_sess(sess, xb[index:index+self.batch_size],xe[index:index+self.batch_size],y[index:index+self.batch_size], saver, name= name)
# plt.plot(range(len(self.training_loss)), self.training_loss, label='Training')
# plt.plot(range(len(self.validation_loss)), self.validation_loss, label='Validation')
# plt.xlabel('Steps')
# # plt.ylabel('Loss')
#
# plt.title("Loss function")
#
# plt.legend()
#
# plt.show()
# def retrain(self,xb, xe, y,sess):
# saver.restore(sess, "./model.ckpt")
# self.run_sess(sess,xb,xe,y)
def predict(self, xb, xe, sess):
# tf Graph input
graph = sess.graph
xb = np.reshape(xb, [-1, self.steps_back, self.cnn_num_input])
xe = np.reshape(xe, [-1, 1, self.fc_num_input])
p = sess.run("prediction:0", feed_dict={graph.get_tensor_by_name("Xb:0"): xb, graph.get_tensor_by_name("Xe:0"): xe, graph.get_tensor_by_name("keep_prob:0"): 1.0})
return p
if __name__ == '__main__':
xb, xe, y = get_X_y(steps_back=7, filename="Q_data0.csv")
neural_net = NN(batch_size = 100, steps_back=8)
scaler1 = {}
for i in range(xb.shape[1]):
scaler1[i] = MinMaxScaler(feature_range=(0,1), copy=True)
xb[:,i,:] = scaler1[i].fit_transform(xb[:,i,:])
scaler2 = MinMaxScaler(feature_range=(0,1), copy=True).fit(xe)
scaler3 = MinMaxScaler(feature_range=(0, 1), copy=True).fit(y.reshape(-1,1))
xe= scaler2.transform(xe)
y= scaler3.transform(y.reshape(-1,1))
# graph = tf.Graph()
neural_net.combined_net()
# saver = tf.train.Saver()
# keep_prob = neural_net.keep_prob
# init = tf.global_variables_initializer()
# graph = tf.get_default_graph()
neural_net.train(xb, xe, y)
|
[
"tensorflow.reshape",
"sklearn.preprocessing.MinMaxScaler",
"tensorflow.matmul",
"tensorflow.nn.conv2d",
"tensorflow.get_default_graph",
"tensorflow.nn.relu",
"tensorflow.concat",
"tensorflow.placeholder",
"numpy.reshape",
"tensorflow.nn.bias_add",
"tensorflow.losses.mean_squared_error",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.nn.max_pool",
"tensorflow.random_normal",
"read_data.get_X_y",
"tensorflow.nn.sigmoid",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.dropout"
] |
[((11564, 11609), 'read_data.get_X_y', 'get_X_y', ([], {'steps_back': '(7)', 'filename': '"""Q_data0.csv"""'}), "(steps_back=7, filename='Q_data0.csv')\n", (11571, 11609), False, 'from read_data import get_X_y\n'), ((239, 261), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (259, 261), True, 'import tensorflow as tf\n'), ((3593, 3661), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, strides, strides, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, strides, strides, 1], padding='SAME')\n", (3605, 3661), True, 'import tensorflow as tf\n'), ((3675, 3695), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'b'], {}), '(x, b)\n', (3689, 3695), True, 'import tensorflow as tf\n'), ((3712, 3725), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (3722, 3725), True, 'import tensorflow as tf\n'), ((3807, 3882), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, k, k, 1]', 'strides': '[1, k, k, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')\n", (3821, 3882), True, 'import tensorflow as tf\n'), ((3978, 4038), 'tensorflow.reshape', 'tf.reshape', (['xb'], {'shape': '[-1, self.steps_back, self.num_TCL, 1]'}), '(xb, shape=[-1, self.steps_back, self.num_TCL, 1])\n', (3988, 4038), True, 'import tensorflow as tf\n'), ((4723, 4738), 'tensorflow.nn.relu', 'tf.nn.relu', (['fc1'], {}), '(fc1)\n', (4733, 4738), True, 'import tensorflow as tf\n'), ((4851, 4867), 'tensorflow.nn.relu', 'tf.nn.relu', (['fc11'], {}), '(fc11)\n', (4861, 4867), True, 'import tensorflow as tf\n'), ((4910, 4952), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['fc11_relued', 'self.keep_prob'], {}), '(fc11_relued, self.keep_prob)\n', (4923, 4952), True, 'import tensorflow as tf\n'), ((5164, 5179), 'tensorflow.nn.relu', 'tf.nn.relu', (['fc2'], {}), '(fc2)\n', (5174, 5179), True, 'import tensorflow as tf\n'), ((5220, 5242), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (5240, 5242), True, 'import tensorflow as tf\n'), ((7101, 7149), 'numpy.reshape', 'np.reshape', (['batch_xe', '[-1, 1, self.fc_num_input]'], {}), '(batch_xe, [-1, 1, self.fc_num_input])\n', (7111, 7149), True, 'import numpy as np\n'), ((7167, 7230), 'numpy.reshape', 'np.reshape', (['batch_xb', '[-1, self.steps_back, self.cnn_num_input]'], {}), '(batch_xb, [-1, self.steps_back, self.cnn_num_input])\n', (7177, 7230), True, 'import numpy as np\n'), ((7250, 7292), 'numpy.reshape', 'np.reshape', (['batch_y', '[-1, self.num_output]'], {}), '(batch_y, [-1, self.num_output])\n', (7260, 7292), True, 'import numpy as np\n'), ((9992, 10014), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (10012, 10014), True, 'import tensorflow as tf\n'), ((11212, 11269), 'numpy.reshape', 'np.reshape', (['xb', '[-1, self.steps_back, self.cnn_num_input]'], {}), '(xb, [-1, self.steps_back, self.cnn_num_input])\n', (11222, 11269), True, 'import numpy as np\n'), ((11284, 11326), 'numpy.reshape', 'np.reshape', (['xe', '[-1, 1, self.fc_num_input]'], {}), '(xe, [-1, 1, self.fc_num_input])\n', (11294, 11326), True, 'import numpy as np\n'), ((11737, 11782), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)', 'copy': '(True)'}), '(feature_range=(0, 1), copy=True)\n', (11749, 11782), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((898, 942), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""keep_prob"""'}), "(tf.float32, name='keep_prob')\n", (912, 942), True, 'import tensorflow as tf\n'), ((1329, 1430), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.batch_tr_size, self.steps_back, self.cnn_num_input]'], {'name': '"""Xb"""'}), "(tf.float32, [self.batch_tr_size, self.steps_back, self.\n cnn_num_input], name='Xb')\n", (1343, 1430), True, 'import tensorflow as tf\n'), ((1448, 1513), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.batch_tr_size, 1, 4]'], {'name': '"""Xe"""'}), "(tf.float32, [self.batch_tr_size, 1, 4], name='Xe')\n", (1462, 1513), True, 'import tensorflow as tf\n'), ((1536, 1611), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.batch_tr_size, self.num_output]'], {'name': '"""Y"""'}), "(tf.float32, [self.batch_tr_size, self.num_output], name='Y')\n", (1550, 1611), True, 'import tensorflow as tf\n'), ((4633, 4679), 'tensorflow.matmul', 'tf.matmul', (['conv2_reshaped', "self.weights['wd1']"], {}), "(conv2_reshaped, self.weights['wd1'])\n", (4642, 4679), True, 'import tensorflow as tf\n'), ((4762, 4805), 'tensorflow.matmul', 'tf.matmul', (['fc1_relued', "self.weights['wd11']"], {}), "(fc1_relued, self.weights['wd11'])\n", (4771, 4805), True, 'import tensorflow as tf\n'), ((5092, 5126), 'tensorflow.matmul', 'tf.matmul', (['xe', "self.weights['wd2']"], {}), "(xe, self.weights['wd2'])\n", (5101, 5126), True, 'import tensorflow as tf\n'), ((5444, 5493), 'tensorflow.concat', 'tf.concat', (['[conv_component, fc_component]'], {'axis': '(1)'}), '([conv_component, fc_component], axis=1)\n', (5453, 5493), True, 'import tensorflow as tf\n'), ((5647, 5665), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['fc3'], {}), '(fc3)\n', (5660, 5665), True, 'import tensorflow as tf\n'), ((5875, 5942), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', ([], {'predictions': 'prediction', 'labels': 'self.Y'}), '(predictions=prediction, labels=self.Y)\n', (5903, 5942), True, 'import tensorflow as tf\n'), ((5972, 6028), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (5994, 6028), True, 'import tensorflow as tf\n'), ((10102, 10125), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (10112, 10125), True, 'import tensorflow as tf\n'), ((10156, 10172), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (10170, 10172), True, 'import tensorflow as tf\n'), ((11856, 11901), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)', 'copy': '(True)'}), '(feature_range=(0, 1), copy=True)\n', (11868, 11901), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((11924, 11969), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)', 'copy': '(True)'}), '(feature_range=(0, 1), copy=True)\n', (11936, 11969), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1726, 1828), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.test_size, self.steps_back, self.cnn_num_input]'], {'name': '"""Xb_test"""'}), "(tf.float32, [self.test_size, self.steps_back, self.\n cnn_num_input], name='Xb_test')\n", (1740, 1828), True, 'import tensorflow as tf\n'), ((1855, 1921), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.test_size, 1, 4]'], {'name': '"""Xe_test"""'}), "(tf.float32, [self.test_size, 1, 4], name='Xe_test')\n", (1869, 1921), True, 'import tensorflow as tf\n'), ((1953, 2029), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.test_size, self.num_output]'], {'name': '"""Y_test"""'}), "(tf.float32, [self.test_size, self.num_output], name='Y_test')\n", (1967, 2029), True, 'import tensorflow as tf\n'), ((5562, 5596), 'tensorflow.matmul', 'tf.matmul', (['fc', "self.weights['out']"], {}), "(fc, self.weights['out'])\n", (5571, 5596), True, 'import tensorflow as tf\n'), ((5723, 5769), 'tensorflow.matmul', 'tf.matmul', (['fc3_sigmoided', "self.weights['out2']"], {}), "(fc3_sigmoided, self.weights['out2'])\n", (5732, 5769), True, 'import tensorflow as tf\n'), ((6430, 6489), 'tensorflow.concat', 'tf.concat', (['[conv_component_test, fc_component_test]'], {'axis': '(1)'}), '([conv_component_test, fc_component_test], axis=1)\n', (6439, 6489), True, 'import tensorflow as tf\n'), ((6671, 6694), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['fc3_test'], {}), '(fc3_test)\n', (6684, 6694), True, 'import tensorflow as tf\n'), ((6896, 6973), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', ([], {'predictions': 'prediction_test', 'labels': 'self.Y_test'}), '(predictions=prediction_test, labels=self.Y_test)\n', (6924, 6973), True, 'import tensorflow as tf\n'), ((2168, 2199), 'tensorflow.random_normal', 'tf.random_normal', (['[2, 8, 1, 32]'], {}), '([2, 8, 1, 32])\n', (2184, 2199), True, 'import tensorflow as tf\n'), ((2289, 2321), 'tensorflow.random_normal', 'tf.random_normal', (['[2, 8, 32, 64]'], {}), '([2, 8, 32, 64])\n', (2305, 2321), True, 'import tensorflow as tf\n'), ((2403, 2475), 'tensorflow.random_normal', 'tf.random_normal', (['[self.steps_back * self.cnn_num_input * 64 // 4, 1024]'], {}), '([self.steps_back * self.cnn_num_input * 64 // 4, 1024])\n', (2419, 2475), True, 'import tensorflow as tf\n'), ((2509, 2537), 'tensorflow.random_normal', 'tf.random_normal', (['[1024, 20]'], {}), '([1024, 20])\n', (2525, 2537), True, 'import tensorflow as tf\n'), ((2623, 2648), 'tensorflow.random_normal', 'tf.random_normal', (['[4, 20]'], {}), '([4, 20])\n', (2639, 2648), True, 'import tensorflow as tf\n'), ((2750, 2781), 'tensorflow.random_normal', 'tf.random_normal', (['[20 + 20, 50]'], {}), '([20 + 20, 50])\n', (2766, 2781), True, 'import tensorflow as tf\n'), ((2890, 2929), 'tensorflow.random_normal', 'tf.random_normal', (['[50, self.num_output]'], {}), '([50, self.num_output])\n', (2906, 2929), True, 'import tensorflow as tf\n'), ((3013, 3035), 'tensorflow.random_normal', 'tf.random_normal', (['[32]'], {}), '([32])\n', (3029, 3035), True, 'import tensorflow as tf\n'), ((3074, 3096), 'tensorflow.random_normal', 'tf.random_normal', (['[64]'], {}), '([64])\n', (3090, 3096), True, 'import tensorflow as tf\n'), ((3135, 3159), 'tensorflow.random_normal', 'tf.random_normal', (['[1024]'], {}), '([1024])\n', (3151, 3159), True, 'import tensorflow as tf\n'), ((3199, 3221), 'tensorflow.random_normal', 'tf.random_normal', (['[20]'], {}), '([20])\n', (3215, 3221), True, 'import tensorflow as tf\n'), ((3260, 3282), 'tensorflow.random_normal', 'tf.random_normal', (['[20]'], {}), '([20])\n', (3276, 3282), True, 'import tensorflow as tf\n'), ((3321, 3343), 'tensorflow.random_normal', 'tf.random_normal', (['[50]'], {}), '([50])\n', (3337, 3343), True, 'import tensorflow as tf\n'), ((3383, 3418), 'tensorflow.random_normal', 'tf.random_normal', (['[self.num_output]'], {}), '([self.num_output])\n', (3399, 3418), True, 'import tensorflow as tf\n'), ((6572, 6611), 'tensorflow.matmul', 'tf.matmul', (['fc_test', "self.weights['out']"], {}), "(fc_test, self.weights['out'])\n", (6581, 6611), True, 'import tensorflow as tf\n'), ((6766, 6817), 'tensorflow.matmul', 'tf.matmul', (['fc3_sigmoided_test', "self.weights['out2']"], {}), "(fc3_sigmoided_test, self.weights['out2'])\n", (6775, 6817), True, 'import tensorflow as tf\n'), ((10281, 10314), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10312, 10314), True, 'import tensorflow as tf\n')]
|
'''
Compare the data where they overlap in the uv plane.
No offset correction is needed.
'''
from spectral_cube import SpectralCube
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
import os
import scipy.ndimage as nd
from uvcombine.scale_factor import find_scale_factor
from cube_analysis.feather_cubes import feather_compare_cube
from paths import (seventeenB_HI_data_02kms_path,
seventeenB_HI_data_1kms_path,
data_path, allfigs_path)
from constants import hi_freq
from plotting_styles import onecolumn_figure
# Compare with the 1 km/s cube. Higher S/N
# vla_cube = SpectralCube.read(seventeenB_HI_data_02kms_path("M33_14B_17B_HI_contsub_width_02kms.image.pbcor.fits"))
vla_cube = SpectralCube.read(seventeenB_HI_data_1kms_path("M33_14B_17B_HI_contsub_width_1kms.image.pbcor.fits"))
# pb_cube = SpectralCube.read(seventeenB_HI_data_02kms_path("M33_14B_17B_HI_contsub_width_02kms.pb.fits"))
pb_cube = SpectralCube.read(seventeenB_HI_data_1kms_path("M33_14B_17B_HI_contsub_width_1kms.pb.fits"))
# PB minimally changes over the frequency range. So just grab one plane
pb_plane = pb_cube[0]
# We need to define a tapered weighting function to ignore emission outside
# of the VLA mosaic
def taper_weights(mask, sigma, nsig_cut=3):
dist = nd.distance_transform_edt(mask)
gauss_dists = np.where(np.logical_and(dist < nsig_cut * sigma, dist > 0.))
flat_dists = np.where(dist >= nsig_cut * sigma)
weight_arr = np.zeros_like(mask, dtype=float)
weight_arr[gauss_dists] = \
np.exp(- (dist[gauss_dists] - nsig_cut * sigma)**2 / (2 * sigma**2))
weight_arr[flat_dists] = 1.
return weight_arr
weight = taper_weights(np.isfinite(pb_plane), 30, nsig_cut=5)
gbt_path = os.path.join(data_path, "GBT")
# gbt_cube = SpectralCube.read(os.path.join(gbt_path, "17B-162_items/m33_gbt_vlsr_highres_Tmb_17B162_02kms.fits"))
gbt_cube = SpectralCube.read(os.path.join(gbt_path, "17B-162_items/m33_gbt_vlsr_highres_Tmb_17B162_1kms.fits"))
beam_fwhm = lambda diam: ((1.18 * hi_freq.to(u.cm, u.spectral())) / diam.to(u.cm)) * u.rad
# Already determined from the 14B HI analysis. Lowered spatial resolution
# due to lack of overlap in the GBT fields centered at M33. So the data were
# gridded with a Gaussian kernel, rather than a jinc function
gbt_eff_beam = beam_fwhm(87.5 * u.m)
# The shortest baseline in the 14B-088 data is ~44 m.
las = (hi_freq.to(u.cm, u.spectral()) / (44 * u.m)).to(u.arcsec, u.dimensionless_angles())
radii, ratios, high_pts, low_pts, chan_out = \
feather_compare_cube(vla_cube, gbt_cube, las,
num_cores=1,
lowresfwhm=gbt_eff_beam,
chunk=50,
verbose=False,
weights=weight,
relax_spectral_check=False,
# NOTE: there is an offset of ~0.4 km/s between the cubes
# The big GBT beam means this really doesn't matter (I
# manually checked). The difference is 0.36 times the
# channel size. I have no idea where this shift is coming
# from since the freq axis used in `gbt_regrid.py` matches
# the frequency in the individual channel MSs used in
# imaging. It's not even a half-channel offset like I
# would expect if the MS frequency was the channel edge...
spec_check_kwargs={'rtol': 0.4})
onecolumn_figure()
sc_factor, sc_err = find_scale_factor(np.hstack(low_pts), np.hstack(high_pts),
method='distrib',
verbose=True)
plt.grid(True)
plt.xlabel(r"ln I$_{\rm int}$ / I$_{\rm SD}$")
plt.tight_layout()
plt.savefig(allfigs_path("Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_v3_w_weights.png"))
plt.savefig(allfigs_path("Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_v3_w_weights.pdf"))
print("Factor: {0}+/-{1}".format(sc_factor, sc_err))
# Factor: 1.125046+/-0.00394768
# This isn't a fantastic fit, so this error was significantly underestimated
plt.close()
# Compare properties per-channel
sc_factor_chans = []
sc_err_chans = []
for low, high in zip(low_pts, high_pts):
sc_f, sc_e = \
find_scale_factor(low, high,
method='distrib',
verbose=False)
sc_factor_chans.append(sc_f)
sc_err_chans.append(sc_e)
sc_factor_chans_linfit = []
sc_err_chans_linfit = []
for low, high in zip(low_pts, high_pts):
sc_f, sc_e = \
find_scale_factor(low, high,
method='linfit',
verbose=False)
sc_factor_chans_linfit.append(sc_f)
sc_err_chans_linfit.append(sc_e)
sc_factor_chans_linfit = np.array(sc_factor_chans_linfit)
sc_err_chans_linfit = np.array(sc_err_chans_linfit)
chans = np.arange(len(low_pts))
onecolumn_figure()
plt.errorbar(chans, sc_factor_chans,
yerr=sc_err_chans,
alpha=0.5, label='Distrib Fit')
plt.errorbar(chans, sc_factor_chans_linfit,
yerr=[sc_factor_chans_linfit - sc_err_chans_linfit[:, 0],
sc_err_chans_linfit[:, 1] - sc_factor_chans_linfit],
alpha=0.5, label='Linear fit')
# plt.plot(chans, slope_lowess_85)
plt.axhline(1, linestyle='--')
plt.legend(frameon=True)
plt.ylabel(r"Scale Factor")
plt.xlabel("Channels")
plt.grid(True)
plt.tight_layout()
plt.savefig(allfigs_path("Imaging/ratio_hist_perchan_17B_vla_gbt_9.8arcmin_v3_w_weights.png"))
plt.savefig(allfigs_path("Imaging/ratio_hist_perchan_17B_vla_gbt_9.8arcmin_v3_w_weights.pdf"))
plt.close()
# Now refit with the channels near the systemic velocity, where most of the HI
# structure falls within the mosaic PB
chan_range = slice(80, 160)
onecolumn_figure()
sc_factor_chrange, sc_err_chrange = \
find_scale_factor(np.hstack(low_pts[chan_range]),
np.hstack(high_pts[chan_range]),
method='distrib',
verbose=True)
plt.grid(True)
plt.xlabel(r"ln I$_{\rm int}$ / I$_{\rm SD}$")
plt.tight_layout()
plt.savefig(allfigs_path(f"Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_chan_{chan_range.start}_{chan_range.stop}_v3_w_weights.png"))
plt.savefig(allfigs_path(f"Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_chan_{chan_range.start}_{chan_range.stop}_v3_w_weights.pdf"))
print("Factor: {0}+/-{1}".format(sc_factor_chrange, sc_err_chrange))
# Factor: 1.105133+/-0.00463
# Error still underestimated
# The >1 factor is due to some emission in the GBT data being cut-off by the
# PB limit of the VLA mosaic. The factor increases far from the systemic
# velocity, where bright HI gets cut-off (compared to the larger 14B data).
# So, despite the != 1 factor, no factor will be applied to the SD data.
# Besides, the 14B mosaic comparison gives a 1.0 factor with the GBT data.
# The tests here were for consistency and that's what we find.
plt.close()
|
[
"numpy.exp",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"scipy.ndimage.distance_transform_edt",
"numpy.zeros_like",
"uvcombine.scale_factor.find_scale_factor",
"cube_analysis.feather_cubes.feather_compare_cube",
"matplotlib.pyplot.close",
"plotting_styles.onecolumn_figure",
"numpy.isfinite",
"astropy.units.spectral",
"astropy.units.dimensionless_angles",
"paths.allfigs_path",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.legend",
"numpy.hstack",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"paths.seventeenB_HI_data_1kms_path",
"numpy.logical_and",
"numpy.where",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((1775, 1805), 'os.path.join', 'os.path.join', (['data_path', '"""GBT"""'], {}), "(data_path, 'GBT')\n", (1787, 1805), False, 'import os\n'), ((2574, 2768), 'cube_analysis.feather_cubes.feather_compare_cube', 'feather_compare_cube', (['vla_cube', 'gbt_cube', 'las'], {'num_cores': '(1)', 'lowresfwhm': 'gbt_eff_beam', 'chunk': '(50)', 'verbose': '(False)', 'weights': 'weight', 'relax_spectral_check': '(False)', 'spec_check_kwargs': "{'rtol': 0.4}"}), "(vla_cube, gbt_cube, las, num_cores=1, lowresfwhm=\n gbt_eff_beam, chunk=50, verbose=False, weights=weight,\n relax_spectral_check=False, spec_check_kwargs={'rtol': 0.4})\n", (2594, 2768), False, 'from cube_analysis.feather_cubes import feather_compare_cube\n'), ((3587, 3605), 'plotting_styles.onecolumn_figure', 'onecolumn_figure', ([], {}), '()\n', (3603, 3605), False, 'from plotting_styles import onecolumn_figure\n'), ((3794, 3808), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3802, 3808), True, 'import matplotlib.pyplot as plt\n'), ((3809, 3856), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ln I$_{\\\\rm int}$ / I$_{\\\\rm SD}$"""'], {}), "('ln I$_{\\\\rm int}$ / I$_{\\\\rm SD}$')\n", (3819, 3856), True, 'import matplotlib.pyplot as plt\n'), ((3856, 3874), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3872, 3874), True, 'import matplotlib.pyplot as plt\n'), ((4214, 4225), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4223, 4225), True, 'import matplotlib.pyplot as plt\n'), ((4883, 4915), 'numpy.array', 'np.array', (['sc_factor_chans_linfit'], {}), '(sc_factor_chans_linfit)\n', (4891, 4915), True, 'import numpy as np\n'), ((4938, 4967), 'numpy.array', 'np.array', (['sc_err_chans_linfit'], {}), '(sc_err_chans_linfit)\n', (4946, 4967), True, 'import numpy as np\n'), ((5002, 5020), 'plotting_styles.onecolumn_figure', 'onecolumn_figure', ([], {}), '()\n', (5018, 5020), False, 'from plotting_styles import onecolumn_figure\n'), ((5021, 5113), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['chans', 'sc_factor_chans'], {'yerr': 'sc_err_chans', 'alpha': '(0.5)', 'label': '"""Distrib Fit"""'}), "(chans, sc_factor_chans, yerr=sc_err_chans, alpha=0.5, label=\n 'Distrib Fit')\n", (5033, 5113), True, 'import matplotlib.pyplot as plt\n'), ((5135, 5328), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['chans', 'sc_factor_chans_linfit'], {'yerr': '[sc_factor_chans_linfit - sc_err_chans_linfit[:, 0], sc_err_chans_linfit[:,\n 1] - sc_factor_chans_linfit]', 'alpha': '(0.5)', 'label': '"""Linear fit"""'}), "(chans, sc_factor_chans_linfit, yerr=[sc_factor_chans_linfit -\n sc_err_chans_linfit[:, 0], sc_err_chans_linfit[:, 1] -\n sc_factor_chans_linfit], alpha=0.5, label='Linear fit')\n", (5147, 5328), True, 'import matplotlib.pyplot as plt\n'), ((5401, 5431), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(1)'], {'linestyle': '"""--"""'}), "(1, linestyle='--')\n", (5412, 5431), True, 'import matplotlib.pyplot as plt\n'), ((5432, 5456), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(True)'}), '(frameon=True)\n', (5442, 5456), True, 'import matplotlib.pyplot as plt\n'), ((5457, 5483), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Scale Factor"""'], {}), "('Scale Factor')\n", (5467, 5483), True, 'import matplotlib.pyplot as plt\n'), ((5485, 5507), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Channels"""'], {}), "('Channels')\n", (5495, 5507), True, 'import matplotlib.pyplot as plt\n'), ((5508, 5522), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5516, 5522), True, 'import matplotlib.pyplot as plt\n'), ((5524, 5542), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5540, 5542), True, 'import matplotlib.pyplot as plt\n'), ((5734, 5745), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5743, 5745), True, 'import matplotlib.pyplot as plt\n'), ((5894, 5912), 'plotting_styles.onecolumn_figure', 'onecolumn_figure', ([], {}), '()\n', (5910, 5912), False, 'from plotting_styles import onecolumn_figure\n'), ((6137, 6151), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6145, 6151), True, 'import matplotlib.pyplot as plt\n'), ((6152, 6199), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ln I$_{\\\\rm int}$ / I$_{\\\\rm SD}$"""'], {}), "('ln I$_{\\\\rm int}$ / I$_{\\\\rm SD}$')\n", (6162, 6199), True, 'import matplotlib.pyplot as plt\n'), ((6199, 6217), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6215, 6217), True, 'import matplotlib.pyplot as plt\n'), ((7045, 7056), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7054, 7056), True, 'import matplotlib.pyplot as plt\n'), ((774, 861), 'paths.seventeenB_HI_data_1kms_path', 'seventeenB_HI_data_1kms_path', (['"""M33_14B_17B_HI_contsub_width_1kms.image.pbcor.fits"""'], {}), "(\n 'M33_14B_17B_HI_contsub_width_1kms.image.pbcor.fits')\n", (802, 861), False, 'from paths import seventeenB_HI_data_02kms_path, seventeenB_HI_data_1kms_path, data_path, allfigs_path\n'), ((994, 1067), 'paths.seventeenB_HI_data_1kms_path', 'seventeenB_HI_data_1kms_path', (['"""M33_14B_17B_HI_contsub_width_1kms.pb.fits"""'], {}), "('M33_14B_17B_HI_contsub_width_1kms.pb.fits')\n", (1022, 1067), False, 'from paths import seventeenB_HI_data_02kms_path, seventeenB_HI_data_1kms_path, data_path, allfigs_path\n'), ((1318, 1349), 'scipy.ndimage.distance_transform_edt', 'nd.distance_transform_edt', (['mask'], {}), '(mask)\n', (1343, 1349), True, 'import scipy.ndimage as nd\n'), ((1447, 1481), 'numpy.where', 'np.where', (['(dist >= nsig_cut * sigma)'], {}), '(dist >= nsig_cut * sigma)\n', (1455, 1481), True, 'import numpy as np\n'), ((1500, 1532), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {'dtype': 'float'}), '(mask, dtype=float)\n', (1513, 1532), True, 'import numpy as np\n'), ((1574, 1645), 'numpy.exp', 'np.exp', (['(-(dist[gauss_dists] - nsig_cut * sigma) ** 2 / (2 * sigma ** 2))'], {}), '(-(dist[gauss_dists] - nsig_cut * sigma) ** 2 / (2 * sigma ** 2))\n', (1580, 1645), True, 'import numpy as np\n'), ((1723, 1744), 'numpy.isfinite', 'np.isfinite', (['pb_plane'], {}), '(pb_plane)\n', (1734, 1744), True, 'import numpy as np\n'), ((1950, 2035), 'os.path.join', 'os.path.join', (['gbt_path', '"""17B-162_items/m33_gbt_vlsr_highres_Tmb_17B162_1kms.fits"""'], {}), "(gbt_path,\n '17B-162_items/m33_gbt_vlsr_highres_Tmb_17B162_1kms.fits')\n", (1962, 2035), False, 'import os\n'), ((2496, 2520), 'astropy.units.dimensionless_angles', 'u.dimensionless_angles', ([], {}), '()\n', (2518, 2520), True, 'import astropy.units as u\n'), ((3644, 3662), 'numpy.hstack', 'np.hstack', (['low_pts'], {}), '(low_pts)\n', (3653, 3662), True, 'import numpy as np\n'), ((3664, 3683), 'numpy.hstack', 'np.hstack', (['high_pts'], {}), '(high_pts)\n', (3673, 3683), True, 'import numpy as np\n'), ((3887, 3960), 'paths.allfigs_path', 'allfigs_path', (['"""Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_v3_w_weights.png"""'], {}), "('Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_v3_w_weights.png')\n", (3899, 3960), False, 'from paths import seventeenB_HI_data_02kms_path, seventeenB_HI_data_1kms_path, data_path, allfigs_path\n'), ((3974, 4047), 'paths.allfigs_path', 'allfigs_path', (['"""Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_v3_w_weights.pdf"""'], {}), "('Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_v3_w_weights.pdf')\n", (3986, 4047), False, 'from paths import seventeenB_HI_data_02kms_path, seventeenB_HI_data_1kms_path, data_path, allfigs_path\n'), ((4367, 4428), 'uvcombine.scale_factor.find_scale_factor', 'find_scale_factor', (['low', 'high'], {'method': '"""distrib"""', 'verbose': '(False)'}), "(low, high, method='distrib', verbose=False)\n", (4384, 4428), False, 'from uvcombine.scale_factor import find_scale_factor\n'), ((4667, 4727), 'uvcombine.scale_factor.find_scale_factor', 'find_scale_factor', (['low', 'high'], {'method': '"""linfit"""', 'verbose': '(False)'}), "(low, high, method='linfit', verbose=False)\n", (4684, 4727), False, 'from uvcombine.scale_factor import find_scale_factor\n'), ((5556, 5642), 'paths.allfigs_path', 'allfigs_path', (['"""Imaging/ratio_hist_perchan_17B_vla_gbt_9.8arcmin_v3_w_weights.png"""'], {}), "(\n 'Imaging/ratio_hist_perchan_17B_vla_gbt_9.8arcmin_v3_w_weights.png')\n", (5568, 5642), False, 'from paths import seventeenB_HI_data_02kms_path, seventeenB_HI_data_1kms_path, data_path, allfigs_path\n'), ((5651, 5737), 'paths.allfigs_path', 'allfigs_path', (['"""Imaging/ratio_hist_perchan_17B_vla_gbt_9.8arcmin_v3_w_weights.pdf"""'], {}), "(\n 'Imaging/ratio_hist_perchan_17B_vla_gbt_9.8arcmin_v3_w_weights.pdf')\n", (5663, 5737), False, 'from paths import seventeenB_HI_data_02kms_path, seventeenB_HI_data_1kms_path, data_path, allfigs_path\n'), ((5973, 6003), 'numpy.hstack', 'np.hstack', (['low_pts[chan_range]'], {}), '(low_pts[chan_range])\n', (5982, 6003), True, 'import numpy as np\n'), ((6027, 6058), 'numpy.hstack', 'np.hstack', (['high_pts[chan_range]'], {}), '(high_pts[chan_range])\n', (6036, 6058), True, 'import numpy as np\n'), ((6230, 6356), 'paths.allfigs_path', 'allfigs_path', (['f"""Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_chan_{chan_range.start}_{chan_range.stop}_v3_w_weights.png"""'], {}), "(\n f'Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_chan_{chan_range.start}_{chan_range.stop}_v3_w_weights.png'\n )\n", (6242, 6356), False, 'from paths import seventeenB_HI_data_02kms_path, seventeenB_HI_data_1kms_path, data_path, allfigs_path\n'), ((6360, 6486), 'paths.allfigs_path', 'allfigs_path', (['f"""Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_chan_{chan_range.start}_{chan_range.stop}_v3_w_weights.pdf"""'], {}), "(\n f'Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_chan_{chan_range.start}_{chan_range.stop}_v3_w_weights.pdf'\n )\n", (6372, 6486), False, 'from paths import seventeenB_HI_data_02kms_path, seventeenB_HI_data_1kms_path, data_path, allfigs_path\n'), ((1378, 1429), 'numpy.logical_and', 'np.logical_and', (['(dist < nsig_cut * sigma)', '(dist > 0.0)'], {}), '(dist < nsig_cut * sigma, dist > 0.0)\n', (1392, 1429), True, 'import numpy as np\n'), ((2455, 2467), 'astropy.units.spectral', 'u.spectral', ([], {}), '()\n', (2465, 2467), True, 'import astropy.units as u\n'), ((2085, 2097), 'astropy.units.spectral', 'u.spectral', ([], {}), '()\n', (2095, 2097), True, 'import astropy.units as u\n')]
|
# Generated by Django 3.2.5 on 2021-08-05 18:01
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
dependencies = [
('exams', '0019_auto_20210805_1334'),
]
operations = [
migrations.AlterField(
model_name='examattempt',
name='guess1',
field=models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(-1000000000), django.core.validators.MaxValueValidator(1000000000)], verbose_name='Problem 1 response'),
),
migrations.AlterField(
model_name='examattempt',
name='guess2',
field=models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(-1000000000), django.core.validators.MaxValueValidator(1000000000)], verbose_name='Problem 2 response'),
),
migrations.AlterField(
model_name='examattempt',
name='guess3',
field=models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(-1000000000), django.core.validators.MaxValueValidator(1000000000)], verbose_name='Problem 3 response'),
),
migrations.AlterField(
model_name='examattempt',
name='guess4',
field=models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(-1000000000), django.core.validators.MaxValueValidator(1000000000)], verbose_name='Problem 4 response'),
),
migrations.AlterField(
model_name='examattempt',
name='guess5',
field=models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(-1000000000), django.core.validators.MaxValueValidator(1000000000)], verbose_name='Problem 5 response'),
),
migrations.AlterField(
model_name='practiceexam',
name='answer1',
field=models.CharField(default='', max_length=64, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')]),
preserve_default=False,
),
migrations.AlterField(
model_name='practiceexam',
name='answer2',
field=models.CharField(default='', max_length=64, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')]),
preserve_default=False,
),
migrations.AlterField(
model_name='practiceexam',
name='answer3',
field=models.CharField(default='', max_length=64, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')]),
preserve_default=False,
),
migrations.AlterField(
model_name='practiceexam',
name='answer4',
field=models.CharField(default='', max_length=64, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')]),
preserve_default=False,
),
migrations.AlterField(
model_name='practiceexam',
name='answer5',
field=models.CharField(default='', max_length=64, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')]),
preserve_default=False,
),
]
|
[
"re.compile"
] |
[((2105, 2137), 're.compile', 're.compile', (['"""^\\\\d+(?:,\\\\d+)*\\\\Z"""'], {}), "('^\\\\d+(?:,\\\\d+)*\\\\Z')\n", (2115, 2137), False, 'import re\n'), ((2465, 2497), 're.compile', 're.compile', (['"""^\\\\d+(?:,\\\\d+)*\\\\Z"""'], {}), "('^\\\\d+(?:,\\\\d+)*\\\\Z')\n", (2475, 2497), False, 'import re\n'), ((2825, 2857), 're.compile', 're.compile', (['"""^\\\\d+(?:,\\\\d+)*\\\\Z"""'], {}), "('^\\\\d+(?:,\\\\d+)*\\\\Z')\n", (2835, 2857), False, 'import re\n'), ((3185, 3217), 're.compile', 're.compile', (['"""^\\\\d+(?:,\\\\d+)*\\\\Z"""'], {}), "('^\\\\d+(?:,\\\\d+)*\\\\Z')\n", (3195, 3217), False, 'import re\n'), ((3545, 3577), 're.compile', 're.compile', (['"""^\\\\d+(?:,\\\\d+)*\\\\Z"""'], {}), "('^\\\\d+(?:,\\\\d+)*\\\\Z')\n", (3555, 3577), False, 'import re\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'GetAppResult',
'AwaitableGetAppResult',
'get_app',
]
@pulumi.output_type
class GetAppResult:
"""
A collection of values returned by getApp.
"""
def __init__(__self__, active_only=None, description=None, id=None, label=None, label_prefix=None, name=None, status=None):
if active_only and not isinstance(active_only, bool):
raise TypeError("Expected argument 'active_only' to be a bool")
pulumi.set(__self__, "active_only", active_only)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if label and not isinstance(label, str):
raise TypeError("Expected argument 'label' to be a str")
pulumi.set(__self__, "label", label)
if label_prefix and not isinstance(label_prefix, str):
raise TypeError("Expected argument 'label_prefix' to be a str")
pulumi.set(__self__, "label_prefix", label_prefix)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="activeOnly")
def active_only(self) -> Optional[bool]:
return pulumi.get(self, "active_only")
@property
@pulumi.getter
def description(self) -> str:
"""
`description` of application.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
`id` of application.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def label(self) -> Optional[str]:
"""
`label` of application.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter(name="labelPrefix")
def label_prefix(self) -> Optional[str]:
return pulumi.get(self, "label_prefix")
@property
@pulumi.getter
def name(self) -> str:
"""
`name` of application.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def status(self) -> str:
"""
`status` of application.
"""
return pulumi.get(self, "status")
class AwaitableGetAppResult(GetAppResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAppResult(
active_only=self.active_only,
description=self.description,
id=self.id,
label=self.label,
label_prefix=self.label_prefix,
name=self.name,
status=self.status)
def get_app(active_only: Optional[bool] = None,
id: Optional[str] = None,
label: Optional[str] = None,
label_prefix: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAppResult:
"""
Use this data source to retrieve the collaborators for a given repository.
## Example Usage
```python
import pulumi
import pulumi_okta as okta
example = okta.app.get_app(label="Example App")
```
:param bool active_only: tells the provider to query for only `ACTIVE` applications.
:param str id: `id` of application to retrieve, conflicts with `label` and `label_prefix`.
:param str label: The label of the app to retrieve, conflicts with `label_prefix` and `id`.
:param str label_prefix: Label prefix of the app to retrieve, conflicts with `label` and `id`. This will tell the provider to do a `starts with` query as opposed to an `equals` query.
"""
__args__ = dict()
__args__['activeOnly'] = active_only
__args__['id'] = id
__args__['label'] = label
__args__['labelPrefix'] = label_prefix
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('okta:app/getApp:getApp', __args__, opts=opts, typ=GetAppResult).value
return AwaitableGetAppResult(
active_only=__ret__.active_only,
description=__ret__.description,
id=__ret__.id,
label=__ret__.label,
label_prefix=__ret__.label_prefix,
name=__ret__.name,
status=__ret__.status)
|
[
"pulumi.get",
"pulumi.getter",
"pulumi.set",
"pulumi.InvokeOptions",
"pulumi.runtime.invoke"
] |
[((1894, 1926), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""activeOnly"""'}), "(name='activeOnly')\n", (1907, 1926), False, 'import pulumi\n'), ((2545, 2578), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""labelPrefix"""'}), "(name='labelPrefix')\n", (2558, 2578), False, 'import pulumi\n'), ((797, 845), 'pulumi.set', 'pulumi.set', (['__self__', '"""active_only"""', 'active_only'], {}), "(__self__, 'active_only', active_only)\n", (807, 845), False, 'import pulumi\n'), ((990, 1038), 'pulumi.set', 'pulumi.set', (['__self__', '"""description"""', 'description'], {}), "(__self__, 'description', description)\n", (1000, 1038), False, 'import pulumi\n'), ((1156, 1186), 'pulumi.set', 'pulumi.set', (['__self__', '"""id"""', 'id'], {}), "(__self__, 'id', id)\n", (1166, 1186), False, 'import pulumi\n'), ((1313, 1349), 'pulumi.set', 'pulumi.set', (['__self__', '"""label"""', 'label'], {}), "(__self__, 'label', label)\n", (1323, 1349), False, 'import pulumi\n'), ((1497, 1547), 'pulumi.set', 'pulumi.set', (['__self__', '"""label_prefix"""', 'label_prefix'], {}), "(__self__, 'label_prefix', label_prefix)\n", (1507, 1547), False, 'import pulumi\n'), ((1671, 1705), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (1681, 1705), False, 'import pulumi\n'), ((1835, 1873), 'pulumi.set', 'pulumi.set', (['__self__', '"""status"""', 'status'], {}), "(__self__, 'status', status)\n", (1845, 1873), False, 'import pulumi\n'), ((1987, 2018), 'pulumi.get', 'pulumi.get', (['self', '"""active_only"""'], {}), "(self, 'active_only')\n", (1997, 2018), False, 'import pulumi\n'), ((2164, 2195), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (2174, 2195), False, 'import pulumi\n'), ((2333, 2355), 'pulumi.get', 'pulumi.get', (['self', '"""id"""'], {}), "(self, 'id')\n", (2343, 2355), False, 'import pulumi\n'), ((2499, 2524), 'pulumi.get', 'pulumi.get', (['self', '"""label"""'], {}), "(self, 'label')\n", (2509, 2524), False, 'import pulumi\n'), ((2639, 2671), 'pulumi.get', 'pulumi.get', (['self', '"""label_prefix"""'], {}), "(self, 'label_prefix')\n", (2649, 2671), False, 'import pulumi\n'), ((2803, 2827), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (2813, 2827), False, 'import pulumi\n'), ((2963, 2989), 'pulumi.get', 'pulumi.get', (['self', '"""status"""'], {}), "(self, 'status')\n", (2973, 2989), False, 'import pulumi\n'), ((4582, 4604), 'pulumi.InvokeOptions', 'pulumi.InvokeOptions', ([], {}), '()\n', (4602, 4604), False, 'import pulumi\n'), ((4696, 4787), 'pulumi.runtime.invoke', 'pulumi.runtime.invoke', (['"""okta:app/getApp:getApp"""', '__args__'], {'opts': 'opts', 'typ': 'GetAppResult'}), "('okta:app/getApp:getApp', __args__, opts=opts, typ=\n GetAppResult)\n", (4717, 4787), False, 'import pulumi\n')]
|
#!/usr/bin/env Python # This line is needed only for unix-based systems.
# Written by <NAME>, <NAME>, <NAME>.
# March 2020.
#
import qcportal as ptl
from qcfractal import FractalSnowflake
import pandas as pd
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dry-run", action="store_true")
args = parser.parse_args()
SNOWFLAKE = args.dry_run
if SNOWFLAKE:
snowflake = FractalSnowflake()
client = snowflake.client()
else:
client = ptl.FractalClient.from_file()
print(client)
# The new subset you want to add.
dataset_name = "ASCDB"
ds = ptl.collections.ReactionDataset(dataset_name, client=client)
# Add the paper
ds.data.metadata["citations"] = [
ptl.models.Citation(
bibtex="""
@article{morgante2019statistically,
title={Statistically representative databases for density functional theory via data science},
author={<NAME> and <NAME>},
journal={Physical Chemistry Chemical Physics},
volume={21},
number={35},
pages={19092--19103},
year={2019},
publisher={Royal Society of Chemistry}
}
""",
acs_citation="<NAME>. & <NAME>. Statistically representative databases for density functional theory via data science. <em>Phys. Chem. Chem. Phys., </em><b>2019</b><i>, 21</i>, 19092-19103.",
url="https://pubs.rsc.org/en/content/articlehtml/2019/cp/c9cp03211h",
doi="10.1039/C9CP03211H",
)
]
# The .csv file needed to build everything.
filename = "ASCDB.csv"
# We read the ASCDB.csv file. The encoding flag is optional,
# but necessary if the csv is generated (for example) with Microsoft Excel.
#
with open(filename, "r", encoding="utf-8-sig") as handle:
rxns = [x.split(",") for x in handle.read().splitlines()]
# Where to find the geometry files (in .xyz)
gpath = "ACCDB/Geometries"
# We put names and reactions in the following lists:
contrib_name = []
contrib_value = []
for row in rxns:
# Datapoint's name.
name = row[0]
# Datapoint's reference energy.
energy = row[1]
# Datapoint's reaction: from 2 to the end of the rxns list.
rxn = row[2:]
# This is used to handle the list.
half = len(rxn) // 2
molecules = rxn[:half]
coefs = rxn[half:]
rxn_data = []
# This loop handles the definition of a reaction, putting together molecules
# and stoichiometric coefficients.
#
for mol_name, coef in zip(molecules, coefs):
mol = ptl.Molecule.from_file(gpath + "/" + mol_name + ".xyz")
coef = float(coef)
rxn_data.append((mol, coef))
rxn = {"default": rxn_data}
# We add the reaction to the dataset.
ds.add_rxn(name, rxn)
# We store the values to add in the "Contributed value" dictionary (see below).
contrib_name.append(name)
contrib_value.append(float(energy))
# Save the new subset.
ds.save()
#
# Adding a contributed value based on the ASCDB csv file and the molecules
# handled above.
#
contrib = {
"name": "Benchmark",
"theory_level": "CCSD(T), CASPT2, Experiment (see ref)",
"values": contrib_value,
"index": contrib_name,
"theory_level_details": {"driver": "energy"},
"units": "kcal / mol",
}
ds.units = "kcal/mol"
ds.set_default_benchmark("Benchmark")
ds.add_contributed_values(contrib)
ds.save()
# Test
ds = client.get_collection("ReactionDataset", dataset_name)
print(ds.list_values())
ds._ensure_contributed_values()
print(ds.get_values(native=False))
print(ds.data.metadata['citations'])
|
[
"qcportal.models.Citation",
"argparse.ArgumentParser",
"qcfractal.FractalSnowflake",
"qcportal.collections.ReactionDataset",
"qcportal.FractalClient.from_file",
"qcportal.Molecule.from_file"
] |
[((234, 259), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (257, 259), False, 'import argparse\n'), ((580, 640), 'qcportal.collections.ReactionDataset', 'ptl.collections.ReactionDataset', (['dataset_name'], {'client': 'client'}), '(dataset_name, client=client)\n', (611, 640), True, 'import qcportal as ptl\n'), ((403, 421), 'qcfractal.FractalSnowflake', 'FractalSnowflake', ([], {}), '()\n', (419, 421), False, 'from qcfractal import FractalSnowflake\n'), ((473, 502), 'qcportal.FractalClient.from_file', 'ptl.FractalClient.from_file', ([], {}), '()\n', (500, 502), True, 'import qcportal as ptl\n'), ((696, 1371), 'qcportal.models.Citation', 'ptl.models.Citation', ([], {'bibtex': '"""\n@article{morgante2019statistically,\n title={Statistically representative databases for density functional theory via data science},\n author={<NAME> and <NAME>},\n journal={Physical Chemistry Chemical Physics},\n volume={21},\n number={35},\n pages={19092--19103},\n year={2019},\n publisher={Royal Society of Chemistry}\n}\n"""', 'acs_citation': '"""<NAME>. & <NAME>. Statistically representative databases for density functional theory via data science. <em>Phys. Chem. Chem. Phys., </em><b>2019</b><i>, 21</i>, 19092-19103."""', 'url': '"""https://pubs.rsc.org/en/content/articlehtml/2019/cp/c9cp03211h"""', 'doi': '"""10.1039/C9CP03211H"""'}), '(bibtex=\n """\n@article{morgante2019statistically,\n title={Statistically representative databases for density functional theory via data science},\n author={<NAME> and <NAME>},\n journal={Physical Chemistry Chemical Physics},\n volume={21},\n number={35},\n pages={19092--19103},\n year={2019},\n publisher={Royal Society of Chemistry}\n}\n"""\n , acs_citation=\n \'<NAME>. & <NAME>. Statistically representative databases for density functional theory via data science. <em>Phys. Chem. Chem. Phys., </em><b>2019</b><i>, 21</i>, 19092-19103.\'\n , url=\'https://pubs.rsc.org/en/content/articlehtml/2019/cp/c9cp03211h\',\n doi=\'10.1039/C9CP03211H\')\n', (715, 1371), True, 'import qcportal as ptl\n'), ((2397, 2452), 'qcportal.Molecule.from_file', 'ptl.Molecule.from_file', (["(gpath + '/' + mol_name + '.xyz')"], {}), "(gpath + '/' + mol_name + '.xyz')\n", (2419, 2452), True, 'import qcportal as ptl\n')]
|
import pygame
class Font():
def __init__(self, name, size):
self._name = name
self._size = size
self._font = pygame.font.SysFont(name, size)
#TODO Add support for font files
def getFontName(self):
return self._name
def getFontSize(self):
return self._size
def render(self, text, antialias, color, background=None):
return self._font.render(text, antialias, color, background)
def size(self, text):
return self._font.size(text)
def get_height(self):
return self._font.get_height()
|
[
"pygame.font.SysFont"
] |
[((141, 172), 'pygame.font.SysFont', 'pygame.font.SysFont', (['name', 'size'], {}), '(name, size)\n', (160, 172), False, 'import pygame\n')]
|
from mite.utils import pack_msg
class DirectReceiverMock:
def __init__(self):
self._listeners = []
self._raw_listeners = []
def __call__(self, msg):
return
def add_listener(self, listener):
self._listeners.append(listener)
def add_raw_listener(self, raw_listener):
self._raw_listeners.append(raw_listener)
def recieve(self, msg):
for listener in self._listeners:
listener(msg)
packed_msg = pack_msg(msg)
for raw_listener in self._raw_listeners:
raw_listener(packed_msg)
|
[
"mite.utils.pack_msg"
] |
[((484, 497), 'mite.utils.pack_msg', 'pack_msg', (['msg'], {}), '(msg)\n', (492, 497), False, 'from mite.utils import pack_msg\n')]
|
from django.contrib.auth import authenticate, login
import graphene
import django_filters
from graphene_django import DjangoObjectType, DjangoListField
from graphene_django.filter import DjangoFilterConnectionField
from graphql_jwt.decorators import login_required
from .models import User
class UserType(DjangoObjectType):
class Meta:
model = User
fields = '__all__'
filter_fields = ['email']
interfaces = (graphene.relay.Node, )
class UserInput(graphene.InputObjectType):
email = graphene.String(required=True)
password = graphene.String(required=True)
first_name = graphene.String()
last_name = graphene.String()
class Query:
me = graphene.Field(UserType)
all_users = DjangoFilterConnectionField(UserType)
@login_required
def resolve_me(self, info):
user = info.context.user
return user
class RegisterUserMutation(graphene.Mutation):
class Arguments:
user_data = UserInput(required=True)
user = graphene.Field(UserType)
def mutate(self, info, user_data=None):
user = User.objects.create(
email=user_data.email
)
user.set_password(user_data.password)
user.save()
return RegisterUserMutation(user=user)
class CreateUserMutation(graphene.Mutation):
class Arguments:
user_data = UserInput(required=True)
user = graphene.Field(UserType)
def mutate(self, info, user_data=None):
user = User.objects.create(
email=user_data.email,
first_name=user_data.first_name,
last_name=user_data.last_name
)
user.set_password(user_data.password)
user.save()
return CreateUserMutation(user=user)
class UpdateUserMutation(graphene.Mutation):
class Arguments:
user_data = UserInput(required=True)
user = graphene.Field(UserType)
def mutate(self, info, user_data):
user = User.objects.get(email=user_data.email)
user.first_name = user_data.first_name
user.last_name = user_data.last_name
user.save()
return UpdateUserMutation(user=user)
class DeleteUserMutation(graphene.Mutation):
class Arguments:
email = graphene.String(required=True)
user = graphene.Field(UserType)
def mutate(self, info, email):
user = User.objects.get(email=email)
user.is_active = False
#shop.slug = slug+'_deleted'
user.save()
return DeleteUserMutation(user=user)
class UndeleteUserMutation(graphene.Mutation):
class Arguments:
email = graphene.String(required=True)
user = graphene.Field(UserType)
def mutate(self, info, email):
user = User.objects.get(email=email)
user.is_active = True
#shop.slug = slug.replace('_deleted', '')
user.save()
return UndeleteUserMutation(user=user)
class Mutation(graphene.ObjectType):
register_user = RegisterUserMutation.Field()
create_user = CreateUserMutation.Field()
update_user = UpdateUserMutation.Field()
delete_user = DeleteUserMutation.Field()
undelete_user = UndeleteUserMutation.Field()
|
[
"graphene.String",
"graphene.Field",
"graphene_django.filter.DjangoFilterConnectionField"
] |
[((527, 557), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (542, 557), False, 'import graphene\n'), ((573, 603), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (588, 603), False, 'import graphene\n'), ((621, 638), 'graphene.String', 'graphene.String', ([], {}), '()\n', (636, 638), False, 'import graphene\n'), ((655, 672), 'graphene.String', 'graphene.String', ([], {}), '()\n', (670, 672), False, 'import graphene\n'), ((697, 721), 'graphene.Field', 'graphene.Field', (['UserType'], {}), '(UserType)\n', (711, 721), False, 'import graphene\n'), ((738, 775), 'graphene_django.filter.DjangoFilterConnectionField', 'DjangoFilterConnectionField', (['UserType'], {}), '(UserType)\n', (765, 775), False, 'from graphene_django.filter import DjangoFilterConnectionField\n'), ((1010, 1034), 'graphene.Field', 'graphene.Field', (['UserType'], {}), '(UserType)\n', (1024, 1034), False, 'import graphene\n'), ((1399, 1423), 'graphene.Field', 'graphene.Field', (['UserType'], {}), '(UserType)\n', (1413, 1423), False, 'import graphene\n'), ((1878, 1902), 'graphene.Field', 'graphene.Field', (['UserType'], {}), '(UserType)\n', (1892, 1902), False, 'import graphene\n'), ((2284, 2308), 'graphene.Field', 'graphene.Field', (['UserType'], {}), '(UserType)\n', (2298, 2308), False, 'import graphene\n'), ((2653, 2677), 'graphene.Field', 'graphene.Field', (['UserType'], {}), '(UserType)\n', (2667, 2677), False, 'import graphene\n'), ((2241, 2271), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (2256, 2271), False, 'import graphene\n'), ((2610, 2640), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (2625, 2640), False, 'import graphene\n')]
|
"""Procedures to define the Command Line Interface (cli)"""
from pathlib import Path
import click
from conference_scheduler.scheduler import event_schedule_difference
from conference_scheduler.converter import solution_to_schedule
from conference_scheduler.validator import (
is_valid_solution, solution_violations)
import daiquiri
import scheduler.calculate as calc
from scheduler.decorators import timed
import scheduler.define as defn
from scheduler import convert, io, logging, session
logger = daiquiri.getLogger(__name__)
def events_and_slots(resources):
slots = defn.slots(resources)
events = defn.events(resources)
unavailability = defn.unavailability(resources, slots)
clashes = defn.clashes(resources)
unsuitability = defn.unsuitability(resources, slots)
defn.add_unavailability_to_events(events, slots, unavailability)
defn.add_clashes_to_events(events, clashes)
defn.add_unsuitability_to_events(events, slots, unsuitability)
return events, slots
@click.version_option(message='%(prog)s %(version)s :: UK Python Association')
@click.group()
@click.option(
'--verbosity', '-v', default='info',
type=click.Choice(['critical', 'error', 'warning', 'info', 'debug']),
help='Logging verbosity')
def scheduler(verbosity):
pass
@scheduler.command()
@click.option(
'--verbosity', '-v', default='info',
type=click.Choice(['critical', 'error', 'warning', 'info', 'debug']),
help='Logging verbosity')
@click.option(
'--algorithm', '-a', default='pulp_cbc_cmd',
type=click.Choice(
['pulp_cbc_cmd', 'glpk', 'hill_climber', 'simulated_annealing']),
help='Solver algorithm')
@click.option(
'--objective', '-o', default=None,
type=click.Choice(['efficiency', 'equity', 'consistency']),
help='Objective Function')
@click.option('--diff/--no-diff', default=False, help='Show schedule diff')
@click.option(
'--input_dir', '-i', default=None, help='Directory for input files')
@click.option(
'--solution_dir', '-s', default=None, help='Directory for solution files')
@click.option(
'--build_dir', '-b', default=None, help='Directory for output yaml files')
@timed
def build(
verbosity, algorithm, objective, diff, input_dir, solution_dir, build_dir
):
logging.setup(verbosity)
if input_dir:
session.folders['input'] = Path(input_dir)
if solution_dir:
session.folders['solution'] = Path(solution_dir)
if build_dir:
session.folders['build'] = Path(build_dir)
resources = defn.resources()
events, slots = events_and_slots(resources)
slots_by_index = {
idx: f'{slot.starts_at} {slot.venue}'
for idx, slot in enumerate(slots)}
logger.debug(f'\nSlots List:\n{slots_by_index}')
kwargs = {}
if objective == 'consistency' or algorithm == 'simulated_annealing' or diff:
original_solution = io.import_solution(session.folders['solution'])
revised_solution = [
item for item in original_solution
if item[0] < len(events)]
original_schedule = solution_to_schedule(
revised_solution, events, slots)
diff = True
kwargs['original_schedule'] = original_schedule
solution = calc.solution(events, slots, algorithm, objective, **kwargs)
if diff:
schedule = solution_to_schedule(solution, events, slots)
event_diff = event_schedule_difference(original_schedule, schedule)
logger.debug(f'\nevent_diff:')
for item in event_diff:
logger.debug(f'{item.event.name} has moved from {item.old_slot.venue} at {item.old_slot.starts_at} to {item.new_slot.venue} at {item.new_slot.starts_at}')
if solution is not None:
allocations = defn.allocations(resources)
unbounded = defn.unbounded(resources)
defn.add_allocations(events, slots, solution, allocations, unbounded)
logger.debug(convert.schedule_to_text(solution, events, slots))
io.export_solution_and_definition(
resources, events, slots, allocations, solution,
session.folders['solution'])
@scheduler.command()
@click.option(
'--verbosity', '-v', default='info',
type=click.Choice(['critical', 'error', 'warning', 'info', 'debug']),
help='Logging verbosity')
@click.option(
'--input_dir', '-i', default=None, help='Directory for input files')
@click.option(
'--solution_dir', '-s', default=None, help='Directory for solution files')
@click.option(
'--reload/--no-reload', default=False, help='Reload YAML definition')
@timed
def validate(verbosity, input_dir, solution_dir, reload):
logging.setup(verbosity)
if solution_dir:
session.folders['solution'] = Path(solution_dir)
solution = io.import_solution(session.folders['solution'])
if reload:
resources = defn.resources()
events, slots = events_and_slots(resources)
original_solution = io.import_solution(session.folders['solution'])
solution = [
item for item in original_solution
if item[0] < len(events)]
else:
solution = io.import_solution(session.folders['solution'])
definition = io.import_schedule_definition(session.folders['solution'])
events = definition['events']
slots = definition['slots']
logger.info('Validating schedule...')
if is_valid_solution(solution, events, slots):
logger.info('Imported solution is valid')
else:
for v in solution_violations(
solution, definition['events'], definition['slots']):
logger.error(v)
|
[
"click.version_option",
"scheduler.define.add_unavailability_to_events",
"click.option",
"scheduler.define.add_clashes_to_events",
"scheduler.define.add_allocations",
"scheduler.io.export_solution_and_definition",
"pathlib.Path",
"daiquiri.getLogger",
"scheduler.define.add_unsuitability_to_events",
"scheduler.calculate.solution",
"click.Choice",
"conference_scheduler.scheduler.event_schedule_difference",
"conference_scheduler.validator.solution_violations",
"scheduler.logging.setup",
"scheduler.define.events",
"scheduler.define.unavailability",
"click.group",
"scheduler.define.unbounded",
"scheduler.define.resources",
"conference_scheduler.validator.is_valid_solution",
"scheduler.define.clashes",
"scheduler.convert.schedule_to_text",
"scheduler.io.import_schedule_definition",
"scheduler.define.unsuitability",
"scheduler.define.slots",
"scheduler.define.allocations",
"scheduler.io.import_solution",
"conference_scheduler.converter.solution_to_schedule"
] |
[((506, 534), 'daiquiri.getLogger', 'daiquiri.getLogger', (['__name__'], {}), '(__name__)\n', (524, 534), False, 'import daiquiri\n'), ((1007, 1084), 'click.version_option', 'click.version_option', ([], {'message': '"""%(prog)s %(version)s :: UK Python Association"""'}), "(message='%(prog)s %(version)s :: UK Python Association')\n", (1027, 1084), False, 'import click\n'), ((1086, 1099), 'click.group', 'click.group', ([], {}), '()\n', (1097, 1099), False, 'import click\n'), ((1818, 1892), 'click.option', 'click.option', (['"""--diff/--no-diff"""'], {'default': '(False)', 'help': '"""Show schedule diff"""'}), "('--diff/--no-diff', default=False, help='Show schedule diff')\n", (1830, 1892), False, 'import click\n'), ((1894, 1980), 'click.option', 'click.option', (['"""--input_dir"""', '"""-i"""'], {'default': 'None', 'help': '"""Directory for input files"""'}), "('--input_dir', '-i', default=None, help=\n 'Directory for input files')\n", (1906, 1980), False, 'import click\n'), ((1982, 2074), 'click.option', 'click.option', (['"""--solution_dir"""', '"""-s"""'], {'default': 'None', 'help': '"""Directory for solution files"""'}), "('--solution_dir', '-s', default=None, help=\n 'Directory for solution files')\n", (1994, 2074), False, 'import click\n'), ((2076, 2168), 'click.option', 'click.option', (['"""--build_dir"""', '"""-b"""'], {'default': 'None', 'help': '"""Directory for output yaml files"""'}), "('--build_dir', '-b', default=None, help=\n 'Directory for output yaml files')\n", (2088, 2168), False, 'import click\n'), ((4297, 4383), 'click.option', 'click.option', (['"""--input_dir"""', '"""-i"""'], {'default': 'None', 'help': '"""Directory for input files"""'}), "('--input_dir', '-i', default=None, help=\n 'Directory for input files')\n", (4309, 4383), False, 'import click\n'), ((4385, 4477), 'click.option', 'click.option', (['"""--solution_dir"""', '"""-s"""'], {'default': 'None', 'help': '"""Directory for solution files"""'}), "('--solution_dir', '-s', default=None, help=\n 'Directory for solution files')\n", (4397, 4477), False, 'import click\n'), ((4479, 4566), 'click.option', 'click.option', (['"""--reload/--no-reload"""'], {'default': '(False)', 'help': '"""Reload YAML definition"""'}), "('--reload/--no-reload', default=False, help=\n 'Reload YAML definition')\n", (4491, 4566), False, 'import click\n'), ((582, 603), 'scheduler.define.slots', 'defn.slots', (['resources'], {}), '(resources)\n', (592, 603), True, 'import scheduler.define as defn\n'), ((617, 639), 'scheduler.define.events', 'defn.events', (['resources'], {}), '(resources)\n', (628, 639), True, 'import scheduler.define as defn\n'), ((661, 698), 'scheduler.define.unavailability', 'defn.unavailability', (['resources', 'slots'], {}), '(resources, slots)\n', (680, 698), True, 'import scheduler.define as defn\n'), ((713, 736), 'scheduler.define.clashes', 'defn.clashes', (['resources'], {}), '(resources)\n', (725, 736), True, 'import scheduler.define as defn\n'), ((757, 793), 'scheduler.define.unsuitability', 'defn.unsuitability', (['resources', 'slots'], {}), '(resources, slots)\n', (775, 793), True, 'import scheduler.define as defn\n'), ((799, 863), 'scheduler.define.add_unavailability_to_events', 'defn.add_unavailability_to_events', (['events', 'slots', 'unavailability'], {}), '(events, slots, unavailability)\n', (832, 863), True, 'import scheduler.define as defn\n'), ((868, 911), 'scheduler.define.add_clashes_to_events', 'defn.add_clashes_to_events', (['events', 'clashes'], {}), '(events, clashes)\n', (894, 911), True, 'import scheduler.define as defn\n'), ((916, 978), 'scheduler.define.add_unsuitability_to_events', 'defn.add_unsuitability_to_events', (['events', 'slots', 'unsuitability'], {}), '(events, slots, unsuitability)\n', (948, 978), True, 'import scheduler.define as defn\n'), ((2272, 2296), 'scheduler.logging.setup', 'logging.setup', (['verbosity'], {}), '(verbosity)\n', (2285, 2296), False, 'from scheduler import convert, io, logging, session\n'), ((2532, 2548), 'scheduler.define.resources', 'defn.resources', ([], {}), '()\n', (2546, 2548), True, 'import scheduler.define as defn\n'), ((3238, 3298), 'scheduler.calculate.solution', 'calc.solution', (['events', 'slots', 'algorithm', 'objective'], {}), '(events, slots, algorithm, objective, **kwargs)\n', (3251, 3298), True, 'import scheduler.calculate as calc\n'), ((4636, 4660), 'scheduler.logging.setup', 'logging.setup', (['verbosity'], {}), '(verbosity)\n', (4649, 4660), False, 'from scheduler import convert, io, logging, session\n'), ((4755, 4802), 'scheduler.io.import_solution', 'io.import_solution', (["session.folders['solution']"], {}), "(session.folders['solution'])\n", (4773, 4802), False, 'from scheduler import convert, io, logging, session\n'), ((5371, 5413), 'conference_scheduler.validator.is_valid_solution', 'is_valid_solution', (['solution', 'events', 'slots'], {}), '(solution, events, slots)\n', (5388, 5413), False, 'from conference_scheduler.validator import is_valid_solution, solution_violations\n'), ((1165, 1228), 'click.Choice', 'click.Choice', (["['critical', 'error', 'warning', 'info', 'debug']"], {}), "(['critical', 'error', 'warning', 'info', 'debug'])\n", (1177, 1228), False, 'import click\n'), ((2350, 2365), 'pathlib.Path', 'Path', (['input_dir'], {}), '(input_dir)\n', (2354, 2365), False, 'from pathlib import Path\n'), ((2426, 2444), 'pathlib.Path', 'Path', (['solution_dir'], {}), '(solution_dir)\n', (2430, 2444), False, 'from pathlib import Path\n'), ((2499, 2514), 'pathlib.Path', 'Path', (['build_dir'], {}), '(build_dir)\n', (2503, 2514), False, 'from pathlib import Path\n'), ((2889, 2936), 'scheduler.io.import_solution', 'io.import_solution', (["session.folders['solution']"], {}), "(session.folders['solution'])\n", (2907, 2936), False, 'from scheduler import convert, io, logging, session\n'), ((3079, 3132), 'conference_scheduler.converter.solution_to_schedule', 'solution_to_schedule', (['revised_solution', 'events', 'slots'], {}), '(revised_solution, events, slots)\n', (3099, 3132), False, 'from conference_scheduler.converter import solution_to_schedule\n'), ((3332, 3377), 'conference_scheduler.converter.solution_to_schedule', 'solution_to_schedule', (['solution', 'events', 'slots'], {}), '(solution, events, slots)\n', (3352, 3377), False, 'from conference_scheduler.converter import solution_to_schedule\n'), ((3399, 3453), 'conference_scheduler.scheduler.event_schedule_difference', 'event_schedule_difference', (['original_schedule', 'schedule'], {}), '(original_schedule, schedule)\n', (3424, 3453), False, 'from conference_scheduler.scheduler import event_schedule_difference\n'), ((3744, 3771), 'scheduler.define.allocations', 'defn.allocations', (['resources'], {}), '(resources)\n', (3760, 3771), True, 'import scheduler.define as defn\n'), ((3792, 3817), 'scheduler.define.unbounded', 'defn.unbounded', (['resources'], {}), '(resources)\n', (3806, 3817), True, 'import scheduler.define as defn\n'), ((3826, 3895), 'scheduler.define.add_allocations', 'defn.add_allocations', (['events', 'slots', 'solution', 'allocations', 'unbounded'], {}), '(events, slots, solution, allocations, unbounded)\n', (3846, 3895), True, 'import scheduler.define as defn\n'), ((3976, 4091), 'scheduler.io.export_solution_and_definition', 'io.export_solution_and_definition', (['resources', 'events', 'slots', 'allocations', 'solution', "session.folders['solution']"], {}), "(resources, events, slots, allocations,\n solution, session.folders['solution'])\n", (4009, 4091), False, 'from scheduler import convert, io, logging, session\n'), ((1383, 1446), 'click.Choice', 'click.Choice', (["['critical', 'error', 'warning', 'info', 'debug']"], {}), "(['critical', 'error', 'warning', 'info', 'debug'])\n", (1395, 1446), False, 'import click\n'), ((1551, 1628), 'click.Choice', 'click.Choice', (["['pulp_cbc_cmd', 'glpk', 'hill_climber', 'simulated_annealing']"], {}), "(['pulp_cbc_cmd', 'glpk', 'hill_climber', 'simulated_annealing'])\n", (1563, 1628), False, 'import click\n'), ((1731, 1784), 'click.Choice', 'click.Choice', (["['efficiency', 'equity', 'consistency']"], {}), "(['efficiency', 'equity', 'consistency'])\n", (1743, 1784), False, 'import click\n'), ((4720, 4738), 'pathlib.Path', 'Path', (['solution_dir'], {}), '(solution_dir)\n', (4724, 4738), False, 'from pathlib import Path\n'), ((4839, 4855), 'scheduler.define.resources', 'defn.resources', ([], {}), '()\n', (4853, 4855), True, 'import scheduler.define as defn\n'), ((4936, 4983), 'scheduler.io.import_solution', 'io.import_solution', (["session.folders['solution']"], {}), "(session.folders['solution'])\n", (4954, 4983), False, 'from scheduler import convert, io, logging, session\n'), ((5119, 5166), 'scheduler.io.import_solution', 'io.import_solution', (["session.folders['solution']"], {}), "(session.folders['solution'])\n", (5137, 5166), False, 'from scheduler import convert, io, logging, session\n'), ((5188, 5246), 'scheduler.io.import_schedule_definition', 'io.import_schedule_definition', (["session.folders['solution']"], {}), "(session.folders['solution'])\n", (5217, 5246), False, 'from scheduler import convert, io, logging, session\n'), ((5492, 5564), 'conference_scheduler.validator.solution_violations', 'solution_violations', (['solution', "definition['events']", "definition['slots']"], {}), "(solution, definition['events'], definition['slots'])\n", (5511, 5564), False, 'from conference_scheduler.validator import is_valid_solution, solution_violations\n'), ((4201, 4264), 'click.Choice', 'click.Choice', (["['critical', 'error', 'warning', 'info', 'debug']"], {}), "(['critical', 'error', 'warning', 'info', 'debug'])\n", (4213, 4264), False, 'import click\n'), ((3917, 3966), 'scheduler.convert.schedule_to_text', 'convert.schedule_to_text', (['solution', 'events', 'slots'], {}), '(solution, events, slots)\n', (3941, 3966), False, 'from scheduler import convert, io, logging, session\n')]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: utils.ipynb (unless otherwise specified).
__all__ = ['load_config', 'config', 'load_yaml', 'default_yaml', 'dict_to_paras']
# Cell
import pkg_resources
import configparser
import yaml
# Cell
def load_config(*configs):
config = configparser.ConfigParser()
config.read(configs)
return config
def config(new_config=None):
default_config=pkg_resources.resource_filename('pybiotools4p','default.ini')
if None is new_config:
print('loading default_config['+default_config+']')
return load_config(default_config)
else:
print('loading default_config and '+ new_config)
return load_config(pkg_resources.resource_filename('pybiotools4p','default.ini'),new_config)
def load_yaml(*yamls):
my_dict={}
for y in yamls:
with open(y,'r') as yf:
my_dict.update(yaml.load(yf))
return my_dict
def default_yaml(new_yaml=None):
default_config=pkg_resources.resource_filename('pybiotools4p','default.yaml')
if None is new_yaml:
print('loading default_config['+default_config+']')
return load_yaml(default_config)
else:
print('loading default_config and '+ new_yaml)
return load_yaml(pkg_resources.resource_filename('pybiotools4p','default.yaml'),new_yaml)
def dict_to_paras(mydict):
'''
using dict to store extension parameters
'''
return ' '.join([f'{i} {mydict[i]}' for i in mydict.keys()])
|
[
"yaml.load",
"configparser.ConfigParser",
"pkg_resources.resource_filename"
] |
[((280, 307), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (305, 307), False, 'import configparser\n'), ((400, 462), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""pybiotools4p"""', '"""default.ini"""'], {}), "('pybiotools4p', 'default.ini')\n", (431, 462), False, 'import pkg_resources\n'), ((966, 1029), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""pybiotools4p"""', '"""default.yaml"""'], {}), "('pybiotools4p', 'default.yaml')\n", (997, 1029), False, 'import pkg_resources\n'), ((686, 748), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""pybiotools4p"""', '"""default.ini"""'], {}), "('pybiotools4p', 'default.ini')\n", (717, 748), False, 'import pkg_resources\n'), ((1245, 1308), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""pybiotools4p"""', '"""default.yaml"""'], {}), "('pybiotools4p', 'default.yaml')\n", (1276, 1308), False, 'import pkg_resources\n'), ((879, 892), 'yaml.load', 'yaml.load', (['yf'], {}), '(yf)\n', (888, 892), False, 'import yaml\n')]
|
import os
import hashlib
def generate_password_hash(password):
salt = os.urandom(32)
key = hashlib.pbkdf2_hmac('sha256', password.encode('utf-8'), salt, 100000)
return (salt, key)
def check_password_hash(password, salt, key):
# Use the exact same setup you used to generate the key, but this time put in the password to check
new_key = hashlib.pbkdf2_hmac(
'sha256',
password.encode('utf-8'), # Convert the password to bytes
salt,
100000
)
success = False
if new_key == key:
success = True
print('Password is correct')
else:
print('Password is incorrect')
return success
|
[
"os.urandom"
] |
[((76, 90), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (86, 90), False, 'import os\n')]
|
#!/usr/bin/env python
# This file is part of the straitjacket project
# https://gitlab.com/mbarkhau/straitjacket
#
# Copyright (c) 2018 <NAME> (<EMAIL>) - MIT License
# SPDX-License-Identifier: MIT
try:
import backtrace
# To enable pretty tracebacks:
# echo "export ENABLE_BACKTRACE=1;" >> ~/.bashrc
backtrace.hook(align=True, strip_path=True, enable_on_envvar_only=True)
except ImportError:
pass
import sys
from . import sjfmt
if __name__ == '__main__':
sjfmt.main()
sys.exit(0)
|
[
"backtrace.hook",
"sys.exit"
] |
[((320, 391), 'backtrace.hook', 'backtrace.hook', ([], {'align': '(True)', 'strip_path': '(True)', 'enable_on_envvar_only': '(True)'}), '(align=True, strip_path=True, enable_on_envvar_only=True)\n', (334, 391), False, 'import backtrace\n'), ((505, 516), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (513, 516), False, 'import sys\n')]
|
"""terraform_to_ansible/parser.py"""
import json
import logging
import os
import subprocess
import sys
class Parser:
"""Main Terraform tfstate parser."""
def __init__(self, args):
"""Init a thing."""
# Define dictionary to hold all parsed resources
self.all_resources = {}
# Define Terraform tfstate file to load
self.tfstate = args.tfstate
# Define Terraform tfstate directory to load
self.tfstatedir = args.tfstatedir
# Setup logging
self.logger = logging.getLogger(__name__)
def load(self):
"""Load Terraform tfstate file."""
# Attempt to load tfstate file directly
if self.tfstate is not None:
# Log tfstate file path
self.logger.info("Loading --tfstate %s", self.tfstate)
try:
# Open tfstate file
with open(self.tfstate, "r") as stream:
# Load JSON data
try:
data = json.load(stream)
# Log and exit if JSON data not found
except json.JSONDecodeError as error:
self.logger.error(error)
sys.exit(1)
# Log and exit if file not found
except FileNotFoundError as error:
self.logger.error(error)
sys.exit(1)
# Attempt to load tfstate from directory using terraform state pull
else:
# Log tfstate directory
self.logger.info("Loading --tfstatedir %s", self.tfstatedir)
try:
# Capture current working directory prior to changing to the
# tfstate directory. So, we can changing back.
current_dir = os.getcwd()
# Change to the tfstate directory
os.chdir(self.tfstatedir)
try:
# Try to load JSON output from terraform state pull command
data = json.loads(
subprocess.getoutput("terraform state pull")
)
# Log and exit if JSON data not found
except json.decoder.JSONDecodeError as error:
self.logger.error(error)
sys.exit(1)
# Change back to the original current working directory
os.chdir(current_dir)
# Log and exit if file/directory not found
except FileNotFoundError as error:
self.logger.error(error)
sys.exit(1)
# Capture Terraform version from tfstate
terraform_version = data.get("terraform_version")
# Log Terraform version for additional logic if needed. Not used at
# this time.
self.logger.info("terraform_version: %s", terraform_version)
# Capture resources to parse
resources = data.get("resources")
if resources is None:
resources = []
modules = data.get("modules")
if modules is not None:
resources = modules[0].get("resources")
return resources
def parse(self):
"""Parse Terraform tfstate file."""
# Load resources up
resources = self.load()
# Check if resources are a list - newer Terraform versions
if isinstance(resources, list):
for resource in resources:
self.resource_types(resource)
instances = resource.get("instances")
if instances is not None:
for instance in instances:
self.all_resources[resource["type"]].append(
instance["attributes"]
)
# Check if resources are a dict - older Terraform versions
elif isinstance(resources, dict):
for resource, resource_config in resources.items():
self.resource_types(resource_config)
self.all_resources[resource_config["type"]].append(
resource_config["primary"]["attributes"]
)
return self.all_resources
def resource_types(self, resource):
"""Populate resource types."""
# Check to see if all_resources is already populated with resource type
resource_type_lookup = self.all_resources.get(resource["type"])
# Add resource type to all resources if not found in lookup
if resource_type_lookup is None:
self.all_resources[resource["type"]] = []
|
[
"json.load",
"os.getcwd",
"logging.getLogger",
"subprocess.getoutput",
"os.chdir",
"sys.exit"
] |
[((537, 564), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (554, 564), False, 'import logging\n'), ((1791, 1802), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1800, 1802), False, 'import os\n'), ((1870, 1895), 'os.chdir', 'os.chdir', (['self.tfstatedir'], {}), '(self.tfstatedir)\n', (1878, 1895), False, 'import os\n'), ((2411, 2432), 'os.chdir', 'os.chdir', (['current_dir'], {}), '(current_dir)\n', (2419, 2432), False, 'import os\n'), ((1391, 1402), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1399, 1402), False, 'import sys\n'), ((2593, 2604), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2601, 2604), False, 'import sys\n'), ((1021, 1038), 'json.load', 'json.load', (['stream'], {}), '(stream)\n', (1030, 1038), False, 'import json\n'), ((2061, 2105), 'subprocess.getoutput', 'subprocess.getoutput', (['"""terraform state pull"""'], {}), "('terraform state pull')\n", (2081, 2105), False, 'import subprocess\n'), ((2310, 2321), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2318, 2321), False, 'import sys\n'), ((1229, 1240), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1237, 1240), False, 'import sys\n')]
|
#!/usr/bin/env python3
# Set this to True to enable building extensions using Cython.
# Set it to False to build extensions from the C file (that
# was previously created using Cython).
# Set it to 'auto' to build with Cython if available, otherwise
# from the C file.
import sys
from setuptools import setup, find_packages, Extension
from distutils.command.sdist import sdist as _sdist
import numpy
USE_CYTHON = "auto"
if USE_CYTHON:
try:
from Cython.Distutils import build_ext
from Cython.Build import cythonize
except ImportError:
if USE_CYTHON == "auto":
USE_CYTHON = False
else:
raise
class CythonModule(object):
def __init__(self, name: str, path: str):
self.name = name
self.path = path
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, name: str) -> None:
self._name = name
@property
def path(self) -> str:
return self._path
@path.setter
def path(self, path: str) -> None:
self._path = path
@property
def pyx(self) -> str:
return self.path + ".pyx"
@property
def c(self) -> str:
return self.path + ".c"
cython_modules = [
CythonModule(
name="tyme.base_forecasters.exponential_smoothing_cy",
path="src/cython/exponential_smoothing_cy",
),
CythonModule(
name="tyme.base_forecasters.robust_exponential_smoothing_cy",
path="src/cython/robust_exponential_smoothing_cy",
),
]
if sys.version_info[0] == 2:
raise Exception("Python 2.x is no longer supported")
if USE_CYTHON:
class sdist(_sdist):
def run(self):
# Make sure the compiled Cython files in the distribution are up-to-date
cythonize([module.pyx for module in cython_modules])
_sdist.run(self)
ext_modules = [
Extension(module.name, [module.pyx]) for module in cython_modules
]
cmdclass = dict(build_ext=build_ext, sdist=sdist)
else:
ext_modules = [
Extension(module.name, [module.c]) for module in cython_modules
]
cmdclass = {}
requirements = [
"Bottleneck",
"cycler",
"kiwisolver",
"numpy",
"pandas",
"Pillow",
"pyparsing",
"python-dateutil",
"pytz",
"six",
"scipy",
"Cython",
]
requirements_dev = ["pytest", "pytest-cov", "Cython", "pre-commit", "tox"]
setup(
name="tyme",
# version="0.1.0",
description="A timeseries forecasting package, specialised in forecasting grouped timeseries",
author="<NAME>",
author_email="<EMAIL>",
url="http://github.com/sam-bailey/tyme",
packages=find_packages(where="src"),
package_dir={"": "src"},
cmdclass=cmdclass,
ext_modules=ext_modules,
include_dirs=[numpy.get_include()],
long_description=open("README.md").read(),
install_requires=requirements,
extras_require={"dev": requirements_dev},
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Cython",
"Topic :: Scientific/Engineering :: Mathematics",
],
keywords="timeseries forecast forecasting time",
)
|
[
"setuptools.Extension",
"Cython.Build.cythonize",
"distutils.command.sdist.sdist.run",
"numpy.get_include",
"setuptools.find_packages"
] |
[((1910, 1946), 'setuptools.Extension', 'Extension', (['module.name', '[module.pyx]'], {}), '(module.name, [module.pyx])\n', (1919, 1946), False, 'from setuptools import setup, find_packages, Extension\n'), ((2070, 2104), 'setuptools.Extension', 'Extension', (['module.name', '[module.c]'], {}), '(module.name, [module.c])\n', (2079, 2104), False, 'from setuptools import setup, find_packages, Extension\n'), ((2689, 2715), 'setuptools.find_packages', 'find_packages', ([], {'where': '"""src"""'}), "(where='src')\n", (2702, 2715), False, 'from setuptools import setup, find_packages, Extension\n'), ((1799, 1851), 'Cython.Build.cythonize', 'cythonize', (['[module.pyx for module in cython_modules]'], {}), '([module.pyx for module in cython_modules])\n', (1808, 1851), False, 'from Cython.Build import cythonize\n'), ((1864, 1880), 'distutils.command.sdist.sdist.run', '_sdist.run', (['self'], {}), '(self)\n', (1874, 1880), True, 'from distutils.command.sdist import sdist as _sdist\n'), ((2816, 2835), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (2833, 2835), False, 'import numpy\n')]
|
from typing import Optional
from peek_plugin_base.PluginCommonEntryHookABC import PluginCommonEntryHookABC
from peek_plugin_base.agent.PeekAgentPlatformHookABC import PeekAgentPlatformHookABC
class PluginAgentEntryHookABC(PluginCommonEntryHookABC):
def __init__(self, pluginName: str, pluginRootDir: str, platform: PeekAgentPlatformHookABC):
PluginCommonEntryHookABC.__init__(self, pluginName=pluginName, pluginRootDir=pluginRootDir)
self._platform = platform
@property
def platform(self) -> PeekAgentPlatformHookABC:
return self._platform
@property
def publishedAgentApi(self) -> Optional[object]:
return None
|
[
"peek_plugin_base.PluginCommonEntryHookABC.PluginCommonEntryHookABC.__init__"
] |
[((358, 453), 'peek_plugin_base.PluginCommonEntryHookABC.PluginCommonEntryHookABC.__init__', 'PluginCommonEntryHookABC.__init__', (['self'], {'pluginName': 'pluginName', 'pluginRootDir': 'pluginRootDir'}), '(self, pluginName=pluginName,\n pluginRootDir=pluginRootDir)\n', (391, 453), False, 'from peek_plugin_base.PluginCommonEntryHookABC import PluginCommonEntryHookABC\n')]
|
import os, math, yaml
from emonitor.extensions import db
class Struct(dict):
def __init__(self, **entries):
self.__dict__.update(entries)
class Settings(db.Model):
"""Settings class"""
__tablename__ = 'settings'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
_value = db.Column('value', db.Text)
def __init__(self, name, value=""):
self.name = name
self._value = value
@property
def value(self):
return yaml.load(self._value)
@value.setter
def value(self, val):
self._value = yaml.safe_dump(val, encoding='utf-8')
@staticmethod
def num2deg(xtile, ytile, zoom=17 or db.config.get('DEFAULTZOOM')):
"""
Translate tile into coordinate (lat, lon)
:param xtile: x-coordinate of tile
:param ytile: y-coordinate of tile
:param zoom: zoom level
:return: lat, lon tuple
"""
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_deg = math.degrees(math.atan(math.sinh(math.pi * (1 - 2 * ytile / n))))
return lat_deg, lon_deg
def getCarTypeNames(self):
return self.value
@staticmethod
def getCarTypes():
ctypes = Settings.query.filter_by(name='cartypes')
if ctypes.count():
return ctypes.one().value
return ""
@staticmethod
def get_byType(type):
return Settings.query.filter_by(name=type).first() or ""
@staticmethod
def getMapTiles(mid=0, zoom=17 or db.app.config.get('DEFAULTZOOM')):
from emonitor.modules.maps.map import Map
_map = Map.getMaps(mid)
tiles = []
try:
for ts in [f for f in os.listdir(_map.path + str(zoom) + '/') if f.endswith('png')]:
tiles.append(ts.replace('-', '/'))
except:
pass
return tiles
@staticmethod
def getFrontendSettings(area=""):
s = Settings.query.filter_by(name='frontend.default')
if s.count() == 1:
if area == "":
return s.first().value
elif area in s.first().value.keys():
return s.first().value[area]
return {'module': 'default', 'width': '.2', 'visible': '0', 'center': {'module': 'default'}, 'west': {'module': 'default', 'width': '.2'}, 'east': {'module': 'default', 'width': '.2'}}
@staticmethod
def get(option, default=''):
"""
Getter for option values
:param option: name as string
:param optional default: default value if not found in database
:return: value of option
"""
s = Settings.query.filter_by(name=option)
if s.count() == 1: # update
return s.first().value
return default # deliver default value
@staticmethod
def set(option, val):
"""
Setter for option
:param option: name as string
:param val: value of option
:return: value of option
"""
s = Settings.query.filter_by(name=option).first()
if s: # update settings
s.value = val
else: # add value
s = Settings(option, yaml.safe_dump(val, encoding='utf-8'))
db.session.add(s)
db.session.commit()
return s
@staticmethod
def getIntList(option, default=[]):
try:
return map(int, Settings.get(option, '').split(','))
except ValueError:
return default
@staticmethod
def getYaml(option):
try:
return Struct(**(Settings.get(option)))
except TypeError:
return Struct()
|
[
"yaml.load",
"emonitor.extensions.db.Column",
"yaml.safe_dump",
"emonitor.extensions.db.app.config.get",
"emonitor.extensions.db.session.commit",
"emonitor.extensions.db.session.add",
"emonitor.extensions.db.String",
"emonitor.extensions.db.config.get",
"emonitor.modules.maps.map.Map.getMaps",
"math.sinh"
] |
[((293, 332), 'emonitor.extensions.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (302, 332), False, 'from emonitor.extensions import db\n'), ((382, 409), 'emonitor.extensions.db.Column', 'db.Column', (['"""value"""', 'db.Text'], {}), "('value', db.Text)\n", (391, 409), False, 'from emonitor.extensions import db\n'), ((354, 367), 'emonitor.extensions.db.String', 'db.String', (['(64)'], {}), '(64)\n', (363, 367), False, 'from emonitor.extensions import db\n'), ((555, 577), 'yaml.load', 'yaml.load', (['self._value'], {}), '(self._value)\n', (564, 577), False, 'import os, math, yaml\n'), ((645, 682), 'yaml.safe_dump', 'yaml.safe_dump', (['val'], {'encoding': '"""utf-8"""'}), "(val, encoding='utf-8')\n", (659, 682), False, 'import os, math, yaml\n'), ((1692, 1708), 'emonitor.modules.maps.map.Map.getMaps', 'Map.getMaps', (['mid'], {}), '(mid)\n', (1703, 1708), False, 'from emonitor.modules.maps.map import Map\n'), ((3322, 3341), 'emonitor.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3339, 3341), False, 'from emonitor.extensions import db\n'), ((743, 771), 'emonitor.extensions.db.config.get', 'db.config.get', (['"""DEFAULTZOOM"""'], {}), "('DEFAULTZOOM')\n", (756, 771), False, 'from emonitor.extensions import db\n'), ((1592, 1624), 'emonitor.extensions.db.app.config.get', 'db.app.config.get', (['"""DEFAULTZOOM"""'], {}), "('DEFAULTZOOM')\n", (1609, 1624), False, 'from emonitor.extensions import db\n'), ((3296, 3313), 'emonitor.extensions.db.session.add', 'db.session.add', (['s'], {}), '(s)\n', (3310, 3313), False, 'from emonitor.extensions import db\n'), ((1108, 1148), 'math.sinh', 'math.sinh', (['(math.pi * (1 - 2 * ytile / n))'], {}), '(math.pi * (1 - 2 * ytile / n))\n', (1117, 1148), False, 'import os, math, yaml\n'), ((3245, 3282), 'yaml.safe_dump', 'yaml.safe_dump', (['val'], {'encoding': '"""utf-8"""'}), "(val, encoding='utf-8')\n", (3259, 3282), False, 'import os, math, yaml\n')]
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.5
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_checklist_scenariobased_step04 [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_checklist_scenariobased_step04&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ex-vue-4-copmarg).
# +
import numpy as np
import pandas as pd
from scipy.stats import t as tstu
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from arpym.statistics import quantile_sp, simulate_markov_chain_multiv, \
simulate_t, project_trans_matrix
from arpym.tools import histogram_sp, add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-parameters)
# t_now is 31-Aug-2012. Set t_hor>t_now
t_hor = np.datetime64('2012-10-26') # the future investment horizon
j_ = 5000 # number of scenarios
d_plot = 97 # projected risk driver to plot
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step00): Load data
# +
path = '../../../databases/temporary-databases/'
# Risk drivers identification
# realizations of risk drivers up to and including time t_now
db_riskdrivers_series = pd.read_csv(path + 'db_riskdrivers_series.csv',
index_col=0, parse_dates=True)
x = db_riskdrivers_series.values
risk_drivers_names = db_riskdrivers_series.columns
# additional information
db_riskdrivers_tools = pd.read_csv(path + 'db_riskdrivers_tools.csv')
d_ = int(db_riskdrivers_tools.d_.dropna())
d_credit = int(db_riskdrivers_tools.d_credit.dropna())
n_stocks = int(db_riskdrivers_tools.n_stocks.dropna())
d_implvol = int(db_riskdrivers_tools.d_implvol.dropna())
n_bonds = int(db_riskdrivers_tools.n_bonds.dropna())
i_bonds = n_bonds * 4 # 4 NS parameters x n_bonds
c_ = int(db_riskdrivers_tools.c_.dropna())
ratings_tnow = np.array(db_riskdrivers_tools.ratings_tnow.dropna())
t_now = np.datetime64(db_riskdrivers_tools.t_now[0], 'D')
# Quest for invariance
# values of invariants
db_invariants_series = pd.read_csv(path + 'db_invariants_series.csv',
index_col=0, parse_dates=True)
epsi = db_invariants_series.values
t_, i_ = np.shape(epsi)
# next step models
db_invariants_nextstep = pd.read_csv(path + 'db_invariants_nextstep.csv')
# parameters for next step models
db_invariants_param = pd.read_csv(path + 'db_invariants_param.csv', index_col=0)
# parameters for GARCH(1,1) next step models
db_garch_sig2 = pd.read_csv(path + 'db_garch_sig2.csv', index_col=0,
parse_dates=True)
# estimated annual credit transition matrix
p_credit = pd.read_csv(path +
'db_invariants_p_credit.csv').values.reshape(c_ + 1, c_ + 1)
# Estimation
# parameters for invariants modeled using Student t distribution
db_estimation_parametric = pd.read_csv(path + 'db_estimation_parametric.csv',
index_col=0)
# estimated probabilities for nonparametric distributions
db_estimation_nonparametric = pd.read_csv(path + 'db_estimation_nonparametric.csv',
index_col=False)
p_marginal = db_estimation_nonparametric.values
# parameters for estimated Student t copula
db_estimation_copula = pd.read_csv(path + 'db_estimation_copula.csv')
nu_copula = int(db_estimation_copula['nu'].iloc[0])
rho2_copula = np.array(db_estimation_copula['rho2']).reshape(i_, i_)
# parameters for the credit copula
db_estimation_credit_copula = pd.read_csv(path + 'db_estimation_credit_copula.csv')
rho2_credit = db_estimation_credit_copula.rho2_credit.values.reshape(2, 2)
nu_credit = db_estimation_credit_copula.nu_credit[0]
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step01): Determine number of projection steps and scenario probabilities
# number of monitoring times
m_ = np.busday_count(t_now, t_hor)
# projection scenario probabilities
p = np.ones(j_) / j_
# invariants modeled parametrically
ind_parametric = np.arange(n_stocks + 1 + d_implvol,
n_stocks + 1 + d_implvol + i_bonds)
# invariants modeled nonparametrically
ind_nonparametric = list(set(range(i_)) - set(ind_parametric))
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step02): Projection of invariants
# +
epsi_proj = np.zeros((j_, m_, i_))
for m in range(m_):
# copula scenarios
# simulate standardized invariants scenarios for copula
epsi_tilde_proj = simulate_t(np.zeros(i_), rho2_copula, nu_copula, j_)
# generate invariants scenarios
# invariants modeled nonparametrically
for i in ind_nonparametric:
# project t-copula standardized invariants scenarios
u_proj = tstu.cdf(epsi_tilde_proj[:, i], nu_copula)
epsi_proj[:, m, i] = quantile_sp(u_proj, epsi[:, i], p_marginal[:, i])
# invariants modeled parametrically (estimated as Student t distributed)
for i in ind_parametric:
# project t-copula standardized invariants scenarios
u_proj = tstu.cdf(epsi_tilde_proj[:, i], nu_copula)
mu_marg = db_estimation_parametric.loc['mu', str(i)]
sig2_marg = db_estimation_parametric.loc['sig2', str(i)]
nu_marg = db_estimation_parametric.loc['nu', str(i)]
epsi_proj[:, m, i] = mu_marg + np.sqrt(sig2_marg) * tstu.ppf(u_proj, nu_marg)
# -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step03): Projection of risk drivers
# +
x_proj = np.zeros((j_, m_ + 1, d_))
dx_proj = np.zeros((j_, m_ + 1, d_))
sig2_garch = np.zeros((j_, m_ + 1, d_))
a_garch = db_invariants_param.loc['a'].values
b_garch = db_invariants_param.loc['b'].values
c_garch = db_invariants_param.loc['c'].values
mu_garch = db_invariants_param.loc['mu'].values
# risk drivers at time t_now are the starting values for all scenarios
x_proj[:, 0, :] = db_riskdrivers_series.iloc[-1, :]
# initialize parameters for GARCH(1,1) projection
d_garch = [d for d in range(d_)
if db_invariants_nextstep.iloc[0, d] == 'GARCH(1,1)']
for d in d_garch:
sig2_garch[:, 0, d] = db_garch_sig2.iloc[-1, d]
dx_proj[:, 0, d] = x[-1, d] - x[-2, d]
# project daily scenarios
for m in range(1, m_ + 1):
for d in range(d_):
# risk drivers modeled as random walk
if db_invariants_nextstep.iloc[0, d] == 'Random walk':
x_proj[:, m, d] = x_proj[:, m - 1, d] + epsi_proj[:, m - 1, d]
# risk drivers modeled as GARCH(1,1)
elif db_invariants_nextstep.iloc[0, d] == 'GARCH(1,1)':
sig2_garch[:, m, d] = c_garch[d] + \
b_garch[d] * sig2_garch[:, m - 1, d] + \
a_garch[d] * (dx_proj[:, m - 1, d] - mu_garch[d]) ** 2
dx_proj[:, m, d] = mu_garch[d] + \
np.sqrt(sig2_garch[:, m, d]) * epsi_proj[:, m - 1, d]
x_proj[:, m, d] = x_proj[:, m - 1, d] + dx_proj[:, m, d]
# risk drivers modeled as AR(1)
elif db_invariants_nextstep.iloc[0, d] == 'AR(1)':
b_ar1 = db_invariants_param.loc['b'][d]
x_proj[:, m, d] = b_ar1 * x_proj[:, m - 1, d] + epsi_proj[:, m - 1, d]
# -
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step04): Projection of credit ratings
# +
# compute the daily credit transition matrix
p_credit_daily = project_trans_matrix(p_credit, 1 / 252)
# project ratings
ratings_proj = simulate_markov_chain_multiv(ratings_tnow, p_credit_daily,
m_, rho2=rho2_credit,
nu=nu_credit, j_=j_)
# -
# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step05): Save databases
# +
# delete big files
del dx_proj, sig2_garch
# projected risk drivers
out = pd.DataFrame({risk_drivers_names[d]:
x_proj[:, :, d].reshape((j_ * (m_ + 1),))
for d in range(d_)})
out = out[list(risk_drivers_names[:d_].values)]
out.to_csv(path + 'db_projection_riskdrivers.csv', index=None)
del out
# projected credit ratings
out = pd.DataFrame({'GE': ratings_proj[:, :, 0].reshape((j_ * (m_ + 1),)),
'JPM': ratings_proj[:, :, 1].reshape((j_ * (m_ + 1),))})
out.to_csv(path + 'db_projection_ratings.csv', index=None)
del out
# number of scenarios and future investment horizon
out = pd.DataFrame({'j_': pd.Series(j_),
't_hor': pd.Series(t_hor)})
out.to_csv(path + 'db_projection_tools.csv', index=None)
del out
# projected scenario probabilities
out = pd.DataFrame({'p': pd.Series(p)})
out.to_csv(path + 'db_scenario_probs.csv', index=None)
del out
# -
# ## Plots
# +
plt.style.use('arpm')
# number of paths to plot
num_plot = min(j_, 20)
# market risk driver path
fig1 = plt.figure(figsize=(1280.0 / 72.0, 720.0 / 72.0), dpi=72.0)
# plot historical series
f1 = plt.plot(np.arange(t_ + 1), db_riskdrivers_series.iloc[:, d_plot - 1], lw=1)
# plot projected series
for j in range(num_plot):
f1 = plt.plot(np.arange(t_ + 1, t_ + 1 + m_ + 1), x_proj[j, :, d_plot - 1], lw=1)
f, xp = histogram_sp(x_proj[:, -1, d_plot - 1], k_=10 * np.log(j_))
f1 = plt.barh(xp, f / 10, height=xp[1] - xp[0], left=t_ + 1 + m_,
facecolor=[.3, .3, .3], edgecolor='k')
plt.title('Projected path: ' + risk_drivers_names[d_plot - 1],
fontweight='bold', fontsize=20)
plt.xlabel('t (days)', fontsize=17)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
add_logo(fig1, set_fig_size=False)
fig1.tight_layout()
# plot projected ratings
# select paths with rating changes
ind_j_plot_GE = np.zeros(1)
ind_j_plot_GE[0] = 0
k = 0
while k < num_plot:
k = k + 1
for j in range(j_):
if (j not in ind_j_plot_GE and
ratings_proj[j, -1, 0] != ratings_proj[k, -1, 0]):
ind_j_plot_GE = np.append(ind_j_plot_GE, j)
break
ind_j_plot_JPM = np.zeros(1)
ind_j_plot_JPM[0] = 0
k = 0
while k < num_plot:
k = k + 1
for j in range(j_):
if (j not in ind_j_plot_JPM and
ratings_proj[j, -1, 1] != ratings_proj[k, -1, 1]):
ind_j_plot_JPM = np.append(ind_j_plot_JPM, j)
break
fig2, ax = plt.subplots(2, 1, figsize=(1280.0 / 72.0, 720.0 / 72.0), dpi=72.0)
plt.sca(ax[0])
for j in ind_j_plot_GE:
f5 = plt.plot(np.arange(m_ + 1), ratings_proj[int(j), :, 0] + 1)
plt.title('Projected rating GE', fontweight='bold', fontsize=20)
plt.yticks(np.arange(10), fontsize=14)
ax[0].set_yticklabels(['', 'AAA', 'AA', 'A', 'BBB', 'BB', 'B', 'CCC', 'D', ''])
plt.gca().invert_yaxis()
plt.sca(ax[1])
for j in ind_j_plot_JPM:
plt.plot(np.arange(m_ + 1), ratings_proj[int(j), :, 1] + 1)
plt.title('Projected rating JPM', fontweight='bold', fontsize=20)
plt.yticks(np.arange(10), fontsize=14)
ax[1].set_yticklabels(['', 'AAA', 'AA', 'A', 'BBB', 'BB', 'B', 'CCC', 'D', ''])
plt.gca().invert_yaxis()
add_logo(fig2, set_fig_size=False)
fig2.tight_layout()
|
[
"matplotlib.pyplot.title",
"pandas.read_csv",
"numpy.ones",
"numpy.shape",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.gca",
"numpy.sqrt",
"scipy.stats.t.cdf",
"arpym.statistics.quantile_sp",
"matplotlib.pyplot.yticks",
"arpym.tools.add_logo",
"numpy.append",
"arpym.statistics.project_trans_matrix",
"scipy.stats.t.ppf",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.barh",
"pandas.Series",
"numpy.log",
"numpy.datetime64",
"numpy.zeros",
"pandas.plotting.register_matplotlib_converters",
"arpym.statistics.simulate_markov_chain_multiv",
"numpy.array",
"matplotlib.pyplot.sca",
"matplotlib.pyplot.xlabel",
"numpy.busday_count"
] |
[((763, 795), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (793, 795), False, 'from pandas.plotting import register_matplotlib_converters\n'), ((1126, 1153), 'numpy.datetime64', 'np.datetime64', (['"""2012-10-26"""'], {}), "('2012-10-26')\n", (1139, 1153), True, 'import numpy as np\n'), ((1565, 1643), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_riskdrivers_series.csv')"], {'index_col': '(0)', 'parse_dates': '(True)'}), "(path + 'db_riskdrivers_series.csv', index_col=0, parse_dates=True)\n", (1576, 1643), True, 'import pandas as pd\n'), ((1813, 1859), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_riskdrivers_tools.csv')"], {}), "(path + 'db_riskdrivers_tools.csv')\n", (1824, 1859), True, 'import pandas as pd\n'), ((2293, 2342), 'numpy.datetime64', 'np.datetime64', (['db_riskdrivers_tools.t_now[0]', '"""D"""'], {}), "(db_riskdrivers_tools.t_now[0], 'D')\n", (2306, 2342), True, 'import numpy as np\n'), ((2413, 2490), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_invariants_series.csv')"], {'index_col': '(0)', 'parse_dates': '(True)'}), "(path + 'db_invariants_series.csv', index_col=0, parse_dates=True)\n", (2424, 2490), True, 'import pandas as pd\n'), ((2570, 2584), 'numpy.shape', 'np.shape', (['epsi'], {}), '(epsi)\n', (2578, 2584), True, 'import numpy as np\n'), ((2630, 2678), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_invariants_nextstep.csv')"], {}), "(path + 'db_invariants_nextstep.csv')\n", (2641, 2678), True, 'import pandas as pd\n'), ((2736, 2794), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_invariants_param.csv')"], {'index_col': '(0)'}), "(path + 'db_invariants_param.csv', index_col=0)\n", (2747, 2794), True, 'import pandas as pd\n'), ((2857, 2927), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_garch_sig2.csv')"], {'index_col': '(0)', 'parse_dates': '(True)'}), "(path + 'db_garch_sig2.csv', index_col=0, parse_dates=True)\n", (2868, 2927), True, 'import pandas as pd\n'), ((3221, 3284), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_estimation_parametric.csv')"], {'index_col': '(0)'}), "(path + 'db_estimation_parametric.csv', index_col=0)\n", (3232, 3284), True, 'import pandas as pd\n'), ((3413, 3483), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_estimation_nonparametric.csv')"], {'index_col': '(False)'}), "(path + 'db_estimation_nonparametric.csv', index_col=False)\n", (3424, 3483), True, 'import pandas as pd\n'), ((3642, 3688), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_estimation_copula.csv')"], {}), "(path + 'db_estimation_copula.csv')\n", (3653, 3688), True, 'import pandas as pd\n'), ((3876, 3929), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_estimation_credit_copula.csv')"], {}), "(path + 'db_estimation_credit_copula.csv')\n", (3887, 3929), True, 'import pandas as pd\n'), ((4280, 4309), 'numpy.busday_count', 'np.busday_count', (['t_now', 't_hor'], {}), '(t_now, t_hor)\n', (4295, 4309), True, 'import numpy as np\n'), ((4420, 4491), 'numpy.arange', 'np.arange', (['(n_stocks + 1 + d_implvol)', '(n_stocks + 1 + d_implvol + i_bonds)'], {}), '(n_stocks + 1 + d_implvol, n_stocks + 1 + d_implvol + i_bonds)\n', (4429, 4491), True, 'import numpy as np\n'), ((4782, 4804), 'numpy.zeros', 'np.zeros', (['(j_, m_, i_)'], {}), '((j_, m_, i_))\n', (4790, 4804), True, 'import numpy as np\n'), ((5960, 5986), 'numpy.zeros', 'np.zeros', (['(j_, m_ + 1, d_)'], {}), '((j_, m_ + 1, d_))\n', (5968, 5986), True, 'import numpy as np\n'), ((5997, 6023), 'numpy.zeros', 'np.zeros', (['(j_, m_ + 1, d_)'], {}), '((j_, m_ + 1, d_))\n', (6005, 6023), True, 'import numpy as np\n'), ((6037, 6063), 'numpy.zeros', 'np.zeros', (['(j_, m_ + 1, d_)'], {}), '((j_, m_ + 1, d_))\n', (6045, 6063), True, 'import numpy as np\n'), ((7876, 7915), 'arpym.statistics.project_trans_matrix', 'project_trans_matrix', (['p_credit', '(1 / 252)'], {}), '(p_credit, 1 / 252)\n', (7896, 7915), False, 'from arpym.statistics import quantile_sp, simulate_markov_chain_multiv, simulate_t, project_trans_matrix\n'), ((7950, 8056), 'arpym.statistics.simulate_markov_chain_multiv', 'simulate_markov_chain_multiv', (['ratings_tnow', 'p_credit_daily', 'm_'], {'rho2': 'rho2_credit', 'nu': 'nu_credit', 'j_': 'j_'}), '(ratings_tnow, p_credit_daily, m_, rho2=\n rho2_credit, nu=nu_credit, j_=j_)\n', (7978, 8056), False, 'from arpym.statistics import quantile_sp, simulate_markov_chain_multiv, simulate_t, project_trans_matrix\n'), ((9235, 9256), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""arpm"""'], {}), "('arpm')\n", (9248, 9256), True, 'import matplotlib.pyplot as plt\n'), ((9341, 9400), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1280.0 / 72.0, 720.0 / 72.0)', 'dpi': '(72.0)'}), '(figsize=(1280.0 / 72.0, 720.0 / 72.0), dpi=72.0)\n', (9351, 9400), True, 'import matplotlib.pyplot as plt\n'), ((9719, 9825), 'matplotlib.pyplot.barh', 'plt.barh', (['xp', '(f / 10)'], {'height': '(xp[1] - xp[0])', 'left': '(t_ + 1 + m_)', 'facecolor': '[0.3, 0.3, 0.3]', 'edgecolor': '"""k"""'}), "(xp, f / 10, height=xp[1] - xp[0], left=t_ + 1 + m_, facecolor=[0.3,\n 0.3, 0.3], edgecolor='k')\n", (9727, 9825), True, 'import matplotlib.pyplot as plt\n'), ((9833, 9932), 'matplotlib.pyplot.title', 'plt.title', (["('Projected path: ' + risk_drivers_names[d_plot - 1])"], {'fontweight': '"""bold"""', 'fontsize': '(20)'}), "('Projected path: ' + risk_drivers_names[d_plot - 1], fontweight=\n 'bold', fontsize=20)\n", (9842, 9932), True, 'import matplotlib.pyplot as plt\n'), ((9938, 9973), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t (days)"""'], {'fontsize': '(17)'}), "('t (days)', fontsize=17)\n", (9948, 9973), True, 'import matplotlib.pyplot as plt\n'), ((9974, 9997), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (9984, 9997), True, 'import matplotlib.pyplot as plt\n'), ((9998, 10021), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (10008, 10021), True, 'import matplotlib.pyplot as plt\n'), ((10022, 10056), 'arpym.tools.add_logo', 'add_logo', (['fig1'], {'set_fig_size': '(False)'}), '(fig1, set_fig_size=False)\n', (10030, 10056), False, 'from arpym.tools import histogram_sp, add_logo\n'), ((10154, 10165), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (10162, 10165), True, 'import numpy as np\n'), ((10449, 10460), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (10457, 10460), True, 'import numpy as np\n'), ((10742, 10809), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(1280.0 / 72.0, 720.0 / 72.0)', 'dpi': '(72.0)'}), '(2, 1, figsize=(1280.0 / 72.0, 720.0 / 72.0), dpi=72.0)\n', (10754, 10809), True, 'import matplotlib.pyplot as plt\n'), ((10810, 10824), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[0]'], {}), '(ax[0])\n', (10817, 10824), True, 'import matplotlib.pyplot as plt\n'), ((11131, 11145), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[1]'], {}), '(ax[1])\n', (11138, 11145), True, 'import matplotlib.pyplot as plt\n'), ((11450, 11484), 'arpym.tools.add_logo', 'add_logo', (['fig2'], {'set_fig_size': '(False)'}), '(fig2, set_fig_size=False)\n', (11458, 11484), False, 'from arpym.tools import histogram_sp, add_logo\n'), ((4350, 4361), 'numpy.ones', 'np.ones', (['j_'], {}), '(j_)\n', (4357, 4361), True, 'import numpy as np\n'), ((9441, 9458), 'numpy.arange', 'np.arange', (['(t_ + 1)'], {}), '(t_ + 1)\n', (9450, 9458), True, 'import numpy as np\n'), ((10922, 10986), 'matplotlib.pyplot.title', 'plt.title', (['"""Projected rating GE"""'], {'fontweight': '"""bold"""', 'fontsize': '(20)'}), "('Projected rating GE', fontweight='bold', fontsize=20)\n", (10931, 10986), True, 'import matplotlib.pyplot as plt\n'), ((10998, 11011), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (11007, 11011), True, 'import numpy as np\n'), ((11240, 11305), 'matplotlib.pyplot.title', 'plt.title', (['"""Projected rating JPM"""'], {'fontweight': '"""bold"""', 'fontsize': '(20)'}), "('Projected rating JPM', fontweight='bold', fontsize=20)\n", (11249, 11305), True, 'import matplotlib.pyplot as plt\n'), ((11317, 11330), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (11326, 11330), True, 'import numpy as np\n'), ((3755, 3793), 'numpy.array', 'np.array', (["db_estimation_copula['rho2']"], {}), "(db_estimation_copula['rho2'])\n", (3763, 3793), True, 'import numpy as np\n'), ((4942, 4954), 'numpy.zeros', 'np.zeros', (['i_'], {}), '(i_)\n', (4950, 4954), True, 'import numpy as np\n'), ((5174, 5216), 'scipy.stats.t.cdf', 'tstu.cdf', (['epsi_tilde_proj[:, i]', 'nu_copula'], {}), '(epsi_tilde_proj[:, i], nu_copula)\n', (5182, 5216), True, 'from scipy.stats import t as tstu\n'), ((5246, 5295), 'arpym.statistics.quantile_sp', 'quantile_sp', (['u_proj', 'epsi[:, i]', 'p_marginal[:, i]'], {}), '(u_proj, epsi[:, i], p_marginal[:, i])\n', (5257, 5295), False, 'from arpym.statistics import quantile_sp, simulate_markov_chain_multiv, simulate_t, project_trans_matrix\n'), ((5480, 5522), 'scipy.stats.t.cdf', 'tstu.cdf', (['epsi_tilde_proj[:, i]', 'nu_copula'], {}), '(epsi_tilde_proj[:, i], nu_copula)\n', (5488, 5522), True, 'from scipy.stats import t as tstu\n'), ((8947, 8960), 'pandas.Series', 'pd.Series', (['j_'], {}), '(j_)\n', (8956, 8960), True, 'import pandas as pd\n'), ((8991, 9007), 'pandas.Series', 'pd.Series', (['t_hor'], {}), '(t_hor)\n', (9000, 9007), True, 'import pandas as pd\n'), ((9136, 9148), 'pandas.Series', 'pd.Series', (['p'], {}), '(p)\n', (9145, 9148), True, 'import pandas as pd\n'), ((9577, 9611), 'numpy.arange', 'np.arange', (['(t_ + 1)', '(t_ + 1 + m_ + 1)'], {}), '(t_ + 1, t_ + 1 + m_ + 1)\n', (9586, 9611), True, 'import numpy as np\n'), ((10867, 10884), 'numpy.arange', 'np.arange', (['(m_ + 1)'], {}), '(m_ + 1)\n', (10876, 10884), True, 'import numpy as np\n'), ((11106, 11115), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11113, 11115), True, 'import matplotlib.pyplot as plt\n'), ((11185, 11202), 'numpy.arange', 'np.arange', (['(m_ + 1)'], {}), '(m_ + 1)\n', (11194, 11202), True, 'import numpy as np\n'), ((11425, 11434), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11432, 11434), True, 'import matplotlib.pyplot as plt\n'), ((3012, 3060), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_invariants_p_credit.csv')"], {}), "(path + 'db_invariants_p_credit.csv')\n", (3023, 3060), True, 'import pandas as pd\n'), ((9702, 9712), 'numpy.log', 'np.log', (['j_'], {}), '(j_)\n', (9708, 9712), True, 'import numpy as np\n'), ((10385, 10412), 'numpy.append', 'np.append', (['ind_j_plot_GE', 'j'], {}), '(ind_j_plot_GE, j)\n', (10394, 10412), True, 'import numpy as np\n'), ((10683, 10711), 'numpy.append', 'np.append', (['ind_j_plot_JPM', 'j'], {}), '(ind_j_plot_JPM, j)\n', (10692, 10711), True, 'import numpy as np\n'), ((5749, 5767), 'numpy.sqrt', 'np.sqrt', (['sig2_marg'], {}), '(sig2_marg)\n', (5756, 5767), True, 'import numpy as np\n'), ((5770, 5795), 'scipy.stats.t.ppf', 'tstu.ppf', (['u_proj', 'nu_marg'], {}), '(u_proj, nu_marg)\n', (5778, 5795), True, 'from scipy.stats import t as tstu\n'), ((7299, 7327), 'numpy.sqrt', 'np.sqrt', (['sig2_garch[:, m, d]'], {}), '(sig2_garch[:, m, d])\n', (7306, 7327), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 4 08:24:48 2021
proto_optim.py
- Find the protoimage vectors
"""
import torch
import torch.optim as optim
import torch.nn as nn
import math
from tqdm import tqdm
from . import utils
#Optimization to get the protoimages
def proto_optim(og_latents,recovered_latents, model, model_type, device, lr = 0.1, Lambda = 3.0, epsilon = 0.0001, minibatch_size = 16):
'''
Lambda: weight for the cosine similarity loss (3.0 for celeba_pgan, 5.0 for church_pgan, 10.0 for celebaHQ_pgan)
'''
print('Searching for the protoimage latent vector...')
#Get the number of samples and the dimensionality
num_samples, num_dims = og_latents.size()
#Number of batches needed
num_batches = int(math.ceil(num_samples/minibatch_size))
#Vector for the found protoimage latent vectors
protoLatents = torch.zeros(num_samples,num_dims)
for batch_num in tqdm(range(num_batches)):
#Check if there are fewer than minibatch_size images left
if (batch_num + 1) * minibatch_size > num_samples:
end = num_samples + 1
else:
end = (batch_num + 1) * minibatch_size
#Original latent vectors
x = og_latents[batch_num * minibatch_size: end].to(device)
batch_size,_ = x.size()
#Recovered latent vectors
y = recovered_latents[batch_num * minibatch_size: end].to(device)
#Put both on the device
x = x.detach().requires_grad_(False).to(device)
y = y.detach().requires_grad_(False).to(device)
og_x = x * 1.0
alpha = torch.ones(batch_size,512,device=device)
alpha = alpha.requires_grad_(True)
#Initial direction
diff = y - x
opt = optim.Adam([alpha], lr = lr)
cosSim = nn.CosineSimilarity()
#Learning rate scheduler
sched = optim.lr_scheduler.StepLR(optimizer = opt, step_size = 200, gamma = 0.1)
oldLoss = 0
for i in range(501):
#Zero the gradients
opt.zero_grad()
#Move the direction of the difference vector
ynew = y + (torch.mm(alpha,diff.t()).diagonal().unsqueeze(1) * ((diff / (diff.norm(dim=1).unsqueeze(1)**2))))
ynew = utils.normalize(ynew)
#Get the images of the current latent vectors
currImgs = model.netG(ynew)
#Get the discriminator score
discrimScore = model.netD(currImgs,getFeature = False)
#Calculate the loss
if model_type == 'wgangp':
loss = discrimScore.mean() + 0.2*cosSim(ynew,og_x).mean() + 1.0*discrimScore.std() + 3.0*cosSim(ynew,og_x).std()
else:
loss = discrimScore.mean() + Lambda*cosSim(ynew,og_x).mean()
#Backpropagate the error
loss.backward()
#Take a step with the optimizer
opt.step()
sched.step()
#Early stopping condition
if abs(loss-oldLoss) < epsilon:
break
else:
oldLoss = loss
x = y * 1.0
y = ynew.detach()
diff = y - x
#Show the progress
# if i % 1 == 0:
# print('Iterations: ' + str(i))
# print('Loss: ' + str(loss))
protoLatents[batch_num * minibatch_size: end] = ynew.detach().cpu()
return protoLatents
|
[
"torch.ones",
"torch.optim.lr_scheduler.StepLR",
"math.ceil",
"torch.nn.CosineSimilarity",
"torch.optim.Adam",
"torch.zeros"
] |
[((916, 950), 'torch.zeros', 'torch.zeros', (['num_samples', 'num_dims'], {}), '(num_samples, num_dims)\n', (927, 950), False, 'import torch\n'), ((801, 840), 'math.ceil', 'math.ceil', (['(num_samples / minibatch_size)'], {}), '(num_samples / minibatch_size)\n', (810, 840), False, 'import math\n'), ((1703, 1745), 'torch.ones', 'torch.ones', (['batch_size', '(512)'], {'device': 'device'}), '(batch_size, 512, device=device)\n', (1713, 1745), False, 'import torch\n'), ((1864, 1890), 'torch.optim.Adam', 'optim.Adam', (['[alpha]'], {'lr': 'lr'}), '([alpha], lr=lr)\n', (1874, 1890), True, 'import torch.optim as optim\n'), ((1919, 1940), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {}), '()\n', (1938, 1940), True, 'import torch.nn as nn\n'), ((1999, 2065), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', ([], {'optimizer': 'opt', 'step_size': '(200)', 'gamma': '(0.1)'}), '(optimizer=opt, step_size=200, gamma=0.1)\n', (2024, 2065), True, 'import torch.optim as optim\n')]
|
import bblfsh_sonar_checks.utils as utils
import bblfsh
def check(uast):
findings = []
fin_calls = bblfsh.filter(uast, "//MethodInvocation//"
"Identifier[@roleCall and @roleReceiver and @Name='System']/parent::MethodInvocation/"
"Identifier[@roleCall and @roleCallee and @Name='gc']/parent::MethodInvocation")
if len(list(fin_calls)):
findings.append({"msg": "Don't use System.gc()", "pos": None})
fin_calls = bblfsh.filter(uast, "//MethodInvocation//"
"Identifier[@roleCall and @roleReceiver and @Name='Runtime']/parent::MethodInvocation//"
"Identifier[@roleCall and @roleCallee and @Name='getRuntime']/parent::MethodInvocation/parent::MethodInvocation//"
"Identifier[@roleCall and @roleCallee and @Name='gc']/parent::MethodInvocation")
if len(list(fin_calls)):
findings.append({"msg": "Don't use Runtime.getRuntime().gc(})", "pos": None})
return findings
if __name__ == '__main__': utils.run_default_fixture(__file__, check)
|
[
"bblfsh.filter",
"bblfsh_sonar_checks.utils.run_default_fixture"
] |
[((110, 323), 'bblfsh.filter', 'bblfsh.filter', (['uast', '"""//MethodInvocation//Identifier[@roleCall and @roleReceiver and @Name=\'System\']/parent::MethodInvocation/Identifier[@roleCall and @roleCallee and @Name=\'gc\']/parent::MethodInvocation"""'], {}), '(uast,\n "//MethodInvocation//Identifier[@roleCall and @roleReceiver and @Name=\'System\']/parent::MethodInvocation/Identifier[@roleCall and @roleCallee and @Name=\'gc\']/parent::MethodInvocation"\n )\n', (123, 323), False, 'import bblfsh\n'), ((463, 790), 'bblfsh.filter', 'bblfsh.filter', (['uast', '"""//MethodInvocation//Identifier[@roleCall and @roleReceiver and @Name=\'Runtime\']/parent::MethodInvocation//Identifier[@roleCall and @roleCallee and @Name=\'getRuntime\']/parent::MethodInvocation/parent::MethodInvocation//Identifier[@roleCall and @roleCallee and @Name=\'gc\']/parent::MethodInvocation"""'], {}), '(uast,\n "//MethodInvocation//Identifier[@roleCall and @roleReceiver and @Name=\'Runtime\']/parent::MethodInvocation//Identifier[@roleCall and @roleCallee and @Name=\'getRuntime\']/parent::MethodInvocation/parent::MethodInvocation//Identifier[@roleCall and @roleCallee and @Name=\'gc\']/parent::MethodInvocation"\n )\n', (476, 790), False, 'import bblfsh\n'), ((992, 1034), 'bblfsh_sonar_checks.utils.run_default_fixture', 'utils.run_default_fixture', (['__file__', 'check'], {}), '(__file__, check)\n', (1017, 1034), True, 'import bblfsh_sonar_checks.utils as utils\n')]
|
# -*- coding: UTF-8 -*-
"""
This file is part of RT1.
(c) 2016- <NAME>
For COPYING and LICENSE details, please refer to the LICENSE file
"""
from setuptools import setup
#from setuptools import find_packages
from rt1 import __version__
setup(name='rt1',
version=__version__,
description='RT1 - bistatic single scattering radiative transfer model',
packages=['rt1'],
package_dir={'rt1': 'rt1'},
include_package_data=False,
author="<NAME>",
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
#~ license='APACHE 2',
url='https://github.com/TUW-GEO/rt1',
long_description=('A module to perform forward-simulation and ' +
'parameter-inversion of incidence-angle dependent ' +
'backscatter observations based on a first-order ' +
'radiative-transfer model describing a rough surface' +
'covered by a homogeneous layer of scattering' +
'media.'),
install_requires=["numpy>=1.16", "sympy>=1.4", "scipy>=1.2",
"pandas>=0.24", "matplotlib>=3.0"],
extras_require={'symengine' : ["symengine>=0.4"]},
keywords=["physics", "radiative transfer"],
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Atmospheric Science',
# Pick your license as you wish (should match "license" above)
#~ 'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7'
],
)
|
[
"setuptools.setup"
] |
[((239, 1250), 'setuptools.setup', 'setup', ([], {'name': '"""rt1"""', 'version': '__version__', 'description': '"""RT1 - bistatic single scattering radiative transfer model"""', 'packages': "['rt1']", 'package_dir': "{'rt1': 'rt1'}", 'include_package_data': '(False)', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'maintainer': '"""<NAME>"""', 'maintainer_email': '"""<EMAIL>"""', 'url': '"""https://github.com/TUW-GEO/rt1"""', 'long_description': "('A module to perform forward-simulation and ' +\n 'parameter-inversion of incidence-angle dependent ' +\n 'backscatter observations based on a first-order ' +\n 'radiative-transfer model describing a rough surface' +\n 'covered by a homogeneous layer of scattering' + 'media.')", 'install_requires': "['numpy>=1.16', 'sympy>=1.4', 'scipy>=1.2', 'pandas>=0.24', 'matplotlib>=3.0']", 'extras_require': "{'symengine': ['symengine>=0.4']}", 'keywords': "['physics', 'radiative transfer']", 'classifiers': "['Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n 'Programming Language :: Python :: 3.7']"}), "(name='rt1', version=__version__, description=\n 'RT1 - bistatic single scattering radiative transfer model', packages=[\n 'rt1'], package_dir={'rt1': 'rt1'}, include_package_data=False, author=\n '<NAME>', author_email='<EMAIL>', maintainer='<NAME>', maintainer_email\n ='<EMAIL>', url='https://github.com/TUW-GEO/rt1', long_description=\n 'A module to perform forward-simulation and ' +\n 'parameter-inversion of incidence-angle dependent ' +\n 'backscatter observations based on a first-order ' +\n 'radiative-transfer model describing a rough surface' +\n 'covered by a homogeneous layer of scattering' + 'media.',\n install_requires=['numpy>=1.16', 'sympy>=1.4', 'scipy>=1.2',\n 'pandas>=0.24', 'matplotlib>=3.0'], extras_require={'symengine': [\n 'symengine>=0.4']}, keywords=['physics', 'radiative transfer'],\n classifiers=['Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n 'Programming Language :: Python :: 3.7'])\n", (244, 1250), False, 'from setuptools import setup\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['VPCArgs', 'VPC']
@pulumi.input_type
class VPCArgs:
def __init__(__self__, *,
cidr_blocks: pulumi.Input[Sequence[pulumi.Input[str]]],
name: Optional[pulumi.Input[str]] = None,
remark: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VPC resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cidr_blocks: The CIDR blocks of VPC.
:param pulumi.Input[str] remark: The remarks of the VPC. (Default: `""`).
:param pulumi.Input[str] tag: A tag assigned to VPC, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
"""
pulumi.set(__self__, "cidr_blocks", cidr_blocks)
if name is not None:
pulumi.set(__self__, "name", name)
if remark is not None:
pulumi.set(__self__, "remark", remark)
if tag is not None:
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter(name="cidrBlocks")
def cidr_blocks(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The CIDR blocks of VPC.
"""
return pulumi.get(self, "cidr_blocks")
@cidr_blocks.setter
def cidr_blocks(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "cidr_blocks", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def remark(self) -> Optional[pulumi.Input[str]]:
"""
The remarks of the VPC. (Default: `""`).
"""
return pulumi.get(self, "remark")
@remark.setter
def remark(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remark", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[str]]:
"""
A tag assigned to VPC, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag", value)
@pulumi.input_type
class _VPCState:
def __init__(__self__, *,
cidr_blocks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
create_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_infos: Optional[pulumi.Input[Sequence[pulumi.Input['VPCNetworkInfoArgs']]]] = None,
remark: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[str]] = None,
update_time: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering VPC resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cidr_blocks: The CIDR blocks of VPC.
:param pulumi.Input[str] create_time: The time of creation for VPC, formatted in RFC3339 time string.
:param pulumi.Input[Sequence[pulumi.Input['VPCNetworkInfoArgs']]] network_infos: It is a nested type which documented below.
:param pulumi.Input[str] remark: The remarks of the VPC. (Default: `""`).
:param pulumi.Input[str] tag: A tag assigned to VPC, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
:param pulumi.Input[str] update_time: The time whenever there is a change made to VPC, formatted in RFC3339 time string.
"""
if cidr_blocks is not None:
pulumi.set(__self__, "cidr_blocks", cidr_blocks)
if create_time is not None:
pulumi.set(__self__, "create_time", create_time)
if name is not None:
pulumi.set(__self__, "name", name)
if network_infos is not None:
pulumi.set(__self__, "network_infos", network_infos)
if remark is not None:
pulumi.set(__self__, "remark", remark)
if tag is not None:
pulumi.set(__self__, "tag", tag)
if update_time is not None:
pulumi.set(__self__, "update_time", update_time)
@property
@pulumi.getter(name="cidrBlocks")
def cidr_blocks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The CIDR blocks of VPC.
"""
return pulumi.get(self, "cidr_blocks")
@cidr_blocks.setter
def cidr_blocks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "cidr_blocks", value)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> Optional[pulumi.Input[str]]:
"""
The time of creation for VPC, formatted in RFC3339 time string.
"""
return pulumi.get(self, "create_time")
@create_time.setter
def create_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_time", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkInfos")
def network_infos(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VPCNetworkInfoArgs']]]]:
"""
It is a nested type which documented below.
"""
return pulumi.get(self, "network_infos")
@network_infos.setter
def network_infos(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VPCNetworkInfoArgs']]]]):
pulumi.set(self, "network_infos", value)
@property
@pulumi.getter
def remark(self) -> Optional[pulumi.Input[str]]:
"""
The remarks of the VPC. (Default: `""`).
"""
return pulumi.get(self, "remark")
@remark.setter
def remark(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remark", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[str]]:
"""
A tag assigned to VPC, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag", value)
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> Optional[pulumi.Input[str]]:
"""
The time whenever there is a change made to VPC, formatted in RFC3339 time string.
"""
return pulumi.get(self, "update_time")
@update_time.setter
def update_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_time", value)
class VPC(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cidr_blocks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
remark: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a VPC resource.
> **Note** The network segment can only be created or deleted, can not perform both of them at the same time.
## Example Usage
```python
import pulumi
import pulumi_ucloud as ucloud
example = ucloud.vpc.VPC("example",
cidr_blocks=["192.168.0.0/16"],
tag="tf-example")
```
## Import
VPC can be imported using the `id`, e.g.
```sh
$ pulumi import ucloud:vpc/vPC:VPC example uvnet-abc123456
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cidr_blocks: The CIDR blocks of VPC.
:param pulumi.Input[str] remark: The remarks of the VPC. (Default: `""`).
:param pulumi.Input[str] tag: A tag assigned to VPC, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VPCArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a VPC resource.
> **Note** The network segment can only be created or deleted, can not perform both of them at the same time.
## Example Usage
```python
import pulumi
import pulumi_ucloud as ucloud
example = ucloud.vpc.VPC("example",
cidr_blocks=["192.168.0.0/16"],
tag="tf-example")
```
## Import
VPC can be imported using the `id`, e.g.
```sh
$ pulumi import ucloud:vpc/vPC:VPC example uvnet-abc123456
```
:param str resource_name: The name of the resource.
:param VPCArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VPCArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cidr_blocks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
remark: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VPCArgs.__new__(VPCArgs)
if cidr_blocks is None and not opts.urn:
raise TypeError("Missing required property 'cidr_blocks'")
__props__.__dict__["cidr_blocks"] = cidr_blocks
__props__.__dict__["name"] = name
__props__.__dict__["remark"] = remark
__props__.__dict__["tag"] = tag
__props__.__dict__["create_time"] = None
__props__.__dict__["network_infos"] = None
__props__.__dict__["update_time"] = None
super(VPC, __self__).__init__(
'ucloud:vpc/vPC:VPC',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
cidr_blocks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
create_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_infos: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VPCNetworkInfoArgs']]]]] = None,
remark: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[str]] = None,
update_time: Optional[pulumi.Input[str]] = None) -> 'VPC':
"""
Get an existing VPC resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cidr_blocks: The CIDR blocks of VPC.
:param pulumi.Input[str] create_time: The time of creation for VPC, formatted in RFC3339 time string.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VPCNetworkInfoArgs']]]] network_infos: It is a nested type which documented below.
:param pulumi.Input[str] remark: The remarks of the VPC. (Default: `""`).
:param pulumi.Input[str] tag: A tag assigned to VPC, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
:param pulumi.Input[str] update_time: The time whenever there is a change made to VPC, formatted in RFC3339 time string.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _VPCState.__new__(_VPCState)
__props__.__dict__["cidr_blocks"] = cidr_blocks
__props__.__dict__["create_time"] = create_time
__props__.__dict__["name"] = name
__props__.__dict__["network_infos"] = network_infos
__props__.__dict__["remark"] = remark
__props__.__dict__["tag"] = tag
__props__.__dict__["update_time"] = update_time
return VPC(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="cidrBlocks")
def cidr_blocks(self) -> pulumi.Output[Sequence[str]]:
"""
The CIDR blocks of VPC.
"""
return pulumi.get(self, "cidr_blocks")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> pulumi.Output[str]:
"""
The time of creation for VPC, formatted in RFC3339 time string.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInfos")
def network_infos(self) -> pulumi.Output[Sequence['outputs.VPCNetworkInfo']]:
"""
It is a nested type which documented below.
"""
return pulumi.get(self, "network_infos")
@property
@pulumi.getter
def remark(self) -> pulumi.Output[str]:
"""
The remarks of the VPC. (Default: `""`).
"""
return pulumi.get(self, "remark")
@property
@pulumi.getter
def tag(self) -> pulumi.Output[Optional[str]]:
"""
A tag assigned to VPC, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
"""
return pulumi.get(self, "tag")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> pulumi.Output[str]:
"""
The time whenever there is a change made to VPC, formatted in RFC3339 time string.
"""
return pulumi.get(self, "update_time")
|
[
"pulumi.get",
"pulumi.getter",
"pulumi.ResourceOptions",
"pulumi.set"
] |
[((1567, 1599), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cidrBlocks"""'}), "(name='cidrBlocks')\n", (1580, 1599), False, 'import pulumi\n'), ((5128, 5160), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cidrBlocks"""'}), "(name='cidrBlocks')\n", (5141, 5160), False, 'import pulumi\n'), ((5525, 5557), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""createTime"""'}), "(name='createTime')\n", (5538, 5557), False, 'import pulumi\n'), ((6153, 6187), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""networkInfos"""'}), "(name='networkInfos')\n", (6166, 6187), False, 'import pulumi\n'), ((7439, 7471), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""updateTime"""'}), "(name='updateTime')\n", (7452, 7471), False, 'import pulumi\n'), ((14917, 14949), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cidrBlocks"""'}), "(name='cidrBlocks')\n", (14930, 14949), False, 'import pulumi\n'), ((15132, 15164), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""createTime"""'}), "(name='createTime')\n", (15145, 15164), False, 'import pulumi\n'), ((15493, 15527), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""networkInfos"""'}), "(name='networkInfos')\n", (15506, 15527), False, 'import pulumi\n'), ((16339, 16371), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""updateTime"""'}), "(name='updateTime')\n", (16352, 16371), False, 'import pulumi\n'), ((1267, 1315), 'pulumi.set', 'pulumi.set', (['__self__', '"""cidr_blocks"""', 'cidr_blocks'], {}), "(__self__, 'cidr_blocks', cidr_blocks)\n", (1277, 1315), False, 'import pulumi\n'), ((1743, 1774), 'pulumi.get', 'pulumi.get', (['self', '"""cidr_blocks"""'], {}), "(self, 'cidr_blocks')\n", (1753, 1774), False, 'import pulumi\n'), ((1885, 1923), 'pulumi.set', 'pulumi.set', (['self', '"""cidr_blocks"""', 'value'], {}), "(self, 'cidr_blocks', value)\n", (1895, 1923), False, 'import pulumi\n'), ((2024, 2048), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (2034, 2048), False, 'import pulumi\n'), ((2131, 2162), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (2141, 2162), False, 'import pulumi\n'), ((2338, 2364), 'pulumi.get', 'pulumi.get', (['self', '"""remark"""'], {}), "(self, 'remark')\n", (2348, 2364), False, 'import pulumi\n'), ((2451, 2484), 'pulumi.set', 'pulumi.set', (['self', '"""remark"""', 'value'], {}), "(self, 'remark', value)\n", (2461, 2484), False, 'import pulumi\n'), ((2851, 2874), 'pulumi.get', 'pulumi.get', (['self', '"""tag"""'], {}), "(self, 'tag')\n", (2861, 2874), False, 'import pulumi\n'), ((2955, 2985), 'pulumi.set', 'pulumi.set', (['self', '"""tag"""', 'value'], {}), "(self, 'tag', value)\n", (2965, 2985), False, 'import pulumi\n'), ((5314, 5345), 'pulumi.get', 'pulumi.get', (['self', '"""cidr_blocks"""'], {}), "(self, 'cidr_blocks')\n", (5324, 5345), False, 'import pulumi\n'), ((5466, 5504), 'pulumi.set', 'pulumi.set', (['self', '"""cidr_blocks"""', 'value'], {}), "(self, 'cidr_blocks', value)\n", (5476, 5504), False, 'import pulumi\n'), ((5727, 5758), 'pulumi.get', 'pulumi.get', (['self', '"""create_time"""'], {}), "(self, 'create_time')\n", (5737, 5758), False, 'import pulumi\n'), ((5855, 5893), 'pulumi.set', 'pulumi.set', (['self', '"""create_time"""', 'value'], {}), "(self, 'create_time', value)\n", (5865, 5893), False, 'import pulumi\n'), ((5994, 6018), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (6004, 6018), False, 'import pulumi\n'), ((6101, 6132), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (6111, 6132), False, 'import pulumi\n'), ((6380, 6413), 'pulumi.get', 'pulumi.get', (['self', '"""network_infos"""'], {}), "(self, 'network_infos')\n", (6390, 6413), False, 'import pulumi\n'), ((6555, 6595), 'pulumi.set', 'pulumi.set', (['self', '"""network_infos"""', 'value'], {}), "(self, 'network_infos', value)\n", (6565, 6595), False, 'import pulumi\n'), ((6771, 6797), 'pulumi.get', 'pulumi.get', (['self', '"""remark"""'], {}), "(self, 'remark')\n", (6781, 6797), False, 'import pulumi\n'), ((6884, 6917), 'pulumi.set', 'pulumi.set', (['self', '"""remark"""', 'value'], {}), "(self, 'remark', value)\n", (6894, 6917), False, 'import pulumi\n'), ((7284, 7307), 'pulumi.get', 'pulumi.get', (['self', '"""tag"""'], {}), "(self, 'tag')\n", (7294, 7307), False, 'import pulumi\n'), ((7388, 7418), 'pulumi.set', 'pulumi.set', (['self', '"""tag"""', 'value'], {}), "(self, 'tag', value)\n", (7398, 7418), False, 'import pulumi\n'), ((7660, 7691), 'pulumi.get', 'pulumi.get', (['self', '"""update_time"""'], {}), "(self, 'update_time')\n", (7670, 7691), False, 'import pulumi\n'), ((7788, 7826), 'pulumi.set', 'pulumi.set', (['self', '"""update_time"""', 'value'], {}), "(self, 'update_time', value)\n", (7798, 7826), False, 'import pulumi\n'), ((15080, 15111), 'pulumi.get', 'pulumi.get', (['self', '"""cidr_blocks"""'], {}), "(self, 'cidr_blocks')\n", (15090, 15111), False, 'import pulumi\n'), ((15325, 15356), 'pulumi.get', 'pulumi.get', (['self', '"""create_time"""'], {}), "(self, 'create_time')\n", (15335, 15356), False, 'import pulumi\n'), ((15448, 15472), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (15458, 15472), False, 'import pulumi\n'), ((15701, 15734), 'pulumi.get', 'pulumi.get', (['self', '"""network_infos"""'], {}), "(self, 'network_infos')\n", (15711, 15734), False, 'import pulumi\n'), ((15901, 15927), 'pulumi.get', 'pulumi.get', (['self', '"""remark"""'], {}), "(self, 'remark')\n", (15911, 15927), False, 'import pulumi\n'), ((16295, 16318), 'pulumi.get', 'pulumi.get', (['self', '"""tag"""'], {}), "(self, 'tag')\n", (16305, 16318), False, 'import pulumi\n'), ((16551, 16582), 'pulumi.get', 'pulumi.get', (['self', '"""update_time"""'], {}), "(self, 'update_time')\n", (16561, 16582), False, 'import pulumi\n'), ((1357, 1391), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (1367, 1391), False, 'import pulumi\n'), ((1435, 1473), 'pulumi.set', 'pulumi.set', (['__self__', '"""remark"""', 'remark'], {}), "(__self__, 'remark', remark)\n", (1445, 1473), False, 'import pulumi\n'), ((1514, 1546), 'pulumi.set', 'pulumi.set', (['__self__', '"""tag"""', 'tag'], {}), "(__self__, 'tag', tag)\n", (1524, 1546), False, 'import pulumi\n'), ((4531, 4579), 'pulumi.set', 'pulumi.set', (['__self__', '"""cidr_blocks"""', 'cidr_blocks'], {}), "(__self__, 'cidr_blocks', cidr_blocks)\n", (4541, 4579), False, 'import pulumi\n'), ((4628, 4676), 'pulumi.set', 'pulumi.set', (['__self__', '"""create_time"""', 'create_time'], {}), "(__self__, 'create_time', create_time)\n", (4638, 4676), False, 'import pulumi\n'), ((4718, 4752), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (4728, 4752), False, 'import pulumi\n'), ((4803, 4855), 'pulumi.set', 'pulumi.set', (['__self__', '"""network_infos"""', 'network_infos'], {}), "(__self__, 'network_infos', network_infos)\n", (4813, 4855), False, 'import pulumi\n'), ((4899, 4937), 'pulumi.set', 'pulumi.set', (['__self__', '"""remark"""', 'remark'], {}), "(__self__, 'remark', remark)\n", (4909, 4937), False, 'import pulumi\n'), ((4978, 5010), 'pulumi.set', 'pulumi.set', (['__self__', '"""tag"""', 'tag'], {}), "(__self__, 'tag', tag)\n", (4988, 5010), False, 'import pulumi\n'), ((5059, 5107), 'pulumi.set', 'pulumi.set', (['__self__', '"""update_time"""', 'update_time'], {}), "(__self__, 'update_time', update_time)\n", (5069, 5107), False, 'import pulumi\n'), ((11324, 11348), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (11346, 11348), False, 'import pulumi\n'), ((14393, 14422), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (14415, 14422), False, 'import pulumi\n')]
|
import os
import sys
sys.path.append(os.getcwd())
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
from tensorflow import keras
from common.inputs.voc2010 import voc_parts
from common import layers, losses, utils, train, attacks
from common.ops.routing import activated_entropy, coupling_entropy
import numpy as np
import config
WEIGHT_DECAY = 1e-4
kernel_regularizer = keras.regularizers.l2(WEIGHT_DECAY)
kernel_initializer = keras.initializers.he_normal()
BASE_NAME = 'ex4_3'
def build_model_name(params):
model_name = BASE_NAME
model_name += '_{}'.format(params.model.backbone)
model_name += '_fine{}'.format(params.model.fine)
model_name += '_part{}'.format(params.caps.parts)
model_name += '_{}'.format(params.routing.type)
if params.routing.type == 'DR' or params.routing.type == 'EM':
model_name += '_iter{}'.format(params.routing.iter_num)
model_name += '_temper{}'.format(params.routing.temper)
model_name += '_atoms{}'.format(params.caps.atoms)
model_name += '_trial{}'.format(str(params.training.idx))
model_name += '_bs{}'.format(str(params.training.batch_size))
if params.dataset.flip:
model_name += '_flip'
if params.dataset.crop:
model_name += '_crop'
return model_name
def get_loss_opt(type):
optimizer = keras.optimizers.Adam(0.0001)
if type == 'DR' or type == 'EM':
loss = losses.MarginLoss(sparse=False, upper_margin=0.9, bottom_margin=0.1, down_weight=0.5)
else:
loss = keras.losses.CategoricalCrossentropy(from_logits=True)
return loss, optimizer
def build_model(num_out, params):
model_name = build_model_name(params)
inputs, probs, tensor_log = build(num_out,
params.model.backbone,
params.model.fine,
params.routing.type,
params.routing.iter_num,
params.routing.temper,
params.caps.parts,
params.caps.atoms
)
model = keras.Model(inputs=inputs, outputs=probs, name=model_name)
log_model = keras.Model(inputs=inputs, outputs=tensor_log.get_outputs(), name=model_name + '_log')
tensor_log.set_model(log_model)
loss, optimizer = get_loss_opt(params.routing.type)
model.compile(optimizer=optimizer,
loss=loss,
metrics=[])
model.summary()
model.callbacks = []
return model, tensor_log
def build(num_out, backbone, fine, routing, iter_num, temper, parts, atoms):
log = utils.TensorLog()
if backbone == 'VGG16':
in_shape = (224, 224, 3)
base = keras.applications.VGG16(include_top=False, input_shape=in_shape)
elif backbone == 'VGG19':
in_shape = (224, 224, 3)
base = keras.applications.VGG19(include_top=False, input_shape=in_shape)
elif backbone == 'InceptionV3':
in_shape = (299, 299, 3)
base = keras.applications.InceptionV3(include_top=False, input_shape=in_shape)
elif backbone == 'ResNet50':
in_shape = (224, 224, 3)
base = keras.applications.ResNet50(include_top=False, input_shape=in_shape)
else:
in_shape = (299, 299, 3)
base = keras.applications.InceptionV3(include_top=False, input_shape=in_shape)
layer_num = len(base.layers)
for i, layer in enumerate(base.layers):
if i < layer_num-fine:
layer.trainable = False
else:
for w in layer.weights:
if 'kernel' in w.name:
r = kernel_regularizer(w)
layer.add_loss(lambda: r)
inputs = keras.Input(in_shape)
features = base(inputs)
interpretable = keras.layers.Conv2D(filters=parts,
kernel_size=1,
activation='relu',
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(features)
shape = interpretable.get_shape().as_list()
if routing == 'avg':
pool = keras.layers.GlobalAveragePooling2D()(interpretable)
output = keras.layers.Dense(num_out)(pool)
elif routing == 'max':
pool = keras.layers.GlobalMaxPooling2D()(interpretable)
output = keras.layers.Dense(num_out)(pool)
elif routing == 'DR':
child_pose, child_prob = layers.CapsuleGroups(height=shape[1], width=shape[2], channel=shape[3],
atoms=16,
method='channel',
activation='squash')(interpretable)
log.add_hist('child_activation', child_prob)
transformed_caps = layers.CapsuleTransformDense(num_out=num_out,
out_atom=atoms,
share_weights=False,
initializer=keras.initializers.glorot_normal(),
regularizer=kernel_regularizer)(child_pose)
parent_poses, parent_probs, cs = layers.DynamicRouting(num_routing=iter_num,
softmax_in=False,
temper=temper,
activation='squash',
pooling=False,
log=log)((transformed_caps, child_prob))
log.add_hist('parent_activation', parent_probs[-1])
output = parent_probs[-1]
return inputs, output, log
def main():
args, params = config.parse_args()
if params.task == 'train':
params.dataset.name = 'voc2010'
if params.model.backbone == 'InceptionV3':
data_shape = (299, 299, 3)
else:
data_shape = (224, 224, 3)
train_set, test_set, info = voc_parts.build_dataset3(batch_size=params.training.batch_size,
shape=data_shape,
arch=params.model.backbone)
model, tensor_log = build_model(num_out=info.features['label'].num_classes,
params=params)
trainer = train.Trainer(model, params, info, tensor_log, finetune=True, inference_label=False, max_save=1)
trainer.metrics['accuracy'] = tf.keras.metrics.CategoricalAccuracy(name='accuracy')
if args.train:
trainer.fit(train_set, test_set)
else:
trainer.evaluate(test_set)
elif params.task == 'attack':
do_adv(os.getcwd())
elif params.task == 'score':
compute_entropies(os.getcwd())
def load_ckpt(model, model_dir):
model.compile(optimizer=keras.optimizers.Adam(0.0001),
loss=keras.losses.CategoricalCrossentropy(from_logits=False),
metrics=[])
ckpt = tf.train.Checkpoint(optimizer=model.optimizer, net=model)
manager = tf.train.CheckpointManager(ckpt, model_dir, max_to_keep=3)
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
def get_model_dir(backbone, log='log', routing='avg', dataset='voc2010',
iter_num=None, temper=None, atoms=None,
finetune=0, parts=128, bs=32, idx=1):
model_dir = '{}/{}/{}_{}_fine{}_part{}_{}'.format(log, dataset, BASE_NAME, backbone, finetune, parts, routing)
if routing == 'DR' or routing == 'EM':
model_dir += '_iter{}'.format(iter_num)
model_dir += '_temper{}'.format(temper)
model_dir += '_atoms{}'.format(atoms)
model_dir += '_trial{}_bs{}_flip_crop'.format(idx, bs)
if not os.path.exists(model_dir):
raise Exception('model not exist:{}'.format(model_dir))
return model_dir
def load_model(backbone, iter_num, temper, atoms=16,
log='log', routing='DR',
finetune=0, parts=128, bs=128, idx=1):
data_shape = utils.get_shape(backbone)
model_dir = get_model_dir(backbone=backbone,
log=log,
routing=routing,
finetune=finetune,
parts=parts,
bs=bs,
iter_num=iter_num,
temper=temper,
atoms=atoms,
idx=idx)
inputs, probs, log = build(6, backbone, finetune, routing, iter_num, temper, parts, atoms)
model = keras.Model(inputs=inputs, outputs=probs, name='x')
load_ckpt(model, model_dir)
return model, data_shape, model_dir
def evaluate_attack(epsilons, root='', log='log', backbone='InceptionV3', metric='acc', all_target=False,
method='FGSM', steps=10,
finetune=0, routing='DR', black_box=False, iter_num=10, temper=1.0, atoms=16, parts=128, bs=64, idx=1):
model, data_shape, model_dir = load_model(log=root + log,
backbone=backbone,
routing=routing,
iter_num=iter_num,
temper=temper,
atoms=atoms,
parts=parts,
bs=bs,
finetune=finetune,
idx=idx)
if black_box:
print('load black box source model')
model_src, data_shape, model_dir = load_model(log=root + log,
backbone=backbone,
routing=routing,
iter_num=iter_num,
temper=temper,
atoms=atoms,
parts=parts,
bs=bs,
finetune=finetune,
idx=2)
else:
model_src = model
loss, _ = get_loss_opt(routing)
_, test_set, info = voc_parts.build_dataset3(root + 'data', batch_size=32, shape=data_shape)
acc_adv = keras.metrics.CategoricalAccuracy(name='acc_adv')
if metric == 'acc':
results = attacks.evaluate_model_after_attacks(epsilons, acc_adv, test_set, model, loss, method=method, steps=steps, label_sparse=False, cost=True, model_src=model_src)
elif metric == 'success':
if all_target:
categories = [i for i in range(6)]
results = attacks.evaluate_attacks_success_rate_all_target(epsilons, test_set, model, loss, categories, method=method, steps=steps, label_sparse=False, cost=True, model_src=model_src)
else:
results = attacks.evaluate_attacks_success_rate(epsilons, test_set, model, loss, method=method, steps=steps, label_sparse=False, cost=True, model_src=model_src)
return results
def do_adv(root):
epsilons = [0.1, 0.2, 0.3]
tempers = [0.0, 20.0, 40.0, 60.0, 80.0]
parts_list = [128]
all_target = False
black_box = False
methods = ['PGD', 'BIM', 'FGSM']
backbones = ['InceptionV3']
routing = 'DR'
for backbone in backbones:
print('backbone:', backbone)
for parts in parts_list:
print('parts:', parts)
for method in methods:
print('method:', method)
if routing == 'avg' or routing == 'max':
tempers = [-1]
for temper in tempers:
print('temper:', temper)
if all_target:
epsilons = [0.1]
evaluate_attack(epsilons,
root=root,
backbone=backbone,
metric='success',
all_target=all_target,
method=method,
steps=5,
routing=routing,
black_box=black_box,
parts=parts,
iter_num=2,
temper=temper,
atoms=16,
bs=64,
idx=1)
def compute_entropy(root,
backbone='InceptionV3',
iter_num=2,
activated=True,
temper=10.0,
atoms=16,
routing='DR',
finetune=0,
parts=128,
bs=32):
model, data_shape, model_dir = load_model(log=root + 'log',
backbone=backbone,
iter_num=iter_num,
temper=temper,
atoms=atoms,
routing=routing,
finetune=finetune,
parts=parts,
bs=bs)
train_set, test_set, info = voc_parts.build_dataset3(root + 'data', batch_size=32, shape=data_shape)
test_model = keras.Model(model.layers[0].input, [model.layers[3].output, model.layers[5].output])
results = []
for images, labels in test_set:
(child_poses, child_probs), (parent_poses, parent_probs, cs) = test_model(images)
c = cs[-1]
if activated:
entropy = activated_entropy(c, child_probs)
else:
entropy = coupling_entropy(c)
results.append(entropy)
results = np.concatenate(results, 0)
mean = np.mean(results)
std = np.std(results)
print('{:.4}/{:.3}'.format(mean, std))
def compute_entropies(root):
tempers = [0.0, 20.0, 40.0, 60.0, 80.0]
for temper in tempers:
print('temper:{}'.format(temper))
compute_entropy(root,
backbone='InceptionV3',
iter_num=2,
temper=temper,
atoms=16,
routing='DR',
finetune=0,
parts=128,
bs=64)
if __name__ == "__main__":
main()
|
[
"tensorflow.keras.applications.VGG19",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.metrics.CategoricalAccuracy",
"common.attacks.evaluate_attacks_success_rate",
"tensorflow.keras.losses.CategoricalCrossentropy",
"common.train.Trainer",
"numpy.mean",
"config.parse_args",
"common.utils.get_shape",
"tensorflow.keras.applications.VGG16",
"tensorflow.keras.regularizers.l2",
"tensorflow.get_logger",
"tensorflow.train.Checkpoint",
"numpy.std",
"tensorflow.keras.Input",
"os.path.exists",
"common.layers.CapsuleGroups",
"tensorflow.keras.applications.InceptionV3",
"common.attacks.evaluate_attacks_success_rate_all_target",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.applications.ResNet50",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"common.utils.TensorLog",
"common.inputs.voc2010.voc_parts.build_dataset3",
"tensorflow.keras.Model",
"common.losses.MarginLoss",
"common.ops.routing.activated_entropy",
"numpy.concatenate",
"tensorflow.keras.initializers.glorot_normal",
"tensorflow.keras.layers.GlobalMaxPooling2D",
"tensorflow.keras.layers.Conv2D",
"os.getcwd",
"common.ops.routing.coupling_entropy",
"common.layers.DynamicRouting",
"tensorflow.train.CheckpointManager",
"common.attacks.evaluate_model_after_attacks",
"tensorflow.keras.initializers.he_normal"
] |
[((384, 419), 'tensorflow.keras.regularizers.l2', 'keras.regularizers.l2', (['WEIGHT_DECAY'], {}), '(WEIGHT_DECAY)\n', (405, 419), False, 'from tensorflow import keras\n'), ((441, 471), 'tensorflow.keras.initializers.he_normal', 'keras.initializers.he_normal', ([], {}), '()\n', (469, 471), False, 'from tensorflow import keras\n'), ((38, 49), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (47, 49), False, 'import os\n'), ((1329, 1358), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', (['(0.0001)'], {}), '(0.0001)\n', (1350, 1358), False, 'from tensorflow import keras\n'), ((2195, 2253), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'probs', 'name': 'model_name'}), '(inputs=inputs, outputs=probs, name=model_name)\n', (2206, 2253), False, 'from tensorflow import keras\n'), ((2710, 2727), 'common.utils.TensorLog', 'utils.TensorLog', ([], {}), '()\n', (2725, 2727), False, 'from common import layers, losses, utils, train, attacks\n'), ((3789, 3810), 'tensorflow.keras.Input', 'keras.Input', (['in_shape'], {}), '(in_shape)\n', (3800, 3810), False, 'from tensorflow import keras\n'), ((6016, 6035), 'config.parse_args', 'config.parse_args', ([], {}), '()\n', (6033, 6035), False, 'import config\n'), ((7323, 7380), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'optimizer': 'model.optimizer', 'net': 'model'}), '(optimizer=model.optimizer, net=model)\n', (7342, 7380), True, 'import tensorflow as tf\n'), ((7395, 7453), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['ckpt', 'model_dir'], {'max_to_keep': '(3)'}), '(ckpt, model_dir, max_to_keep=3)\n', (7421, 7453), True, 'import tensorflow as tf\n'), ((8439, 8464), 'common.utils.get_shape', 'utils.get_shape', (['backbone'], {}), '(backbone)\n', (8454, 8464), False, 'from common import layers, losses, utils, train, attacks\n'), ((9012, 9063), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'probs', 'name': '"""x"""'}), "(inputs=inputs, outputs=probs, name='x')\n", (9023, 9063), False, 'from tensorflow import keras\n'), ((10865, 10937), 'common.inputs.voc2010.voc_parts.build_dataset3', 'voc_parts.build_dataset3', (["(root + 'data')"], {'batch_size': '(32)', 'shape': 'data_shape'}), "(root + 'data', batch_size=32, shape=data_shape)\n", (10889, 10937), False, 'from common.inputs.voc2010 import voc_parts\n'), ((10953, 11002), 'tensorflow.keras.metrics.CategoricalAccuracy', 'keras.metrics.CategoricalAccuracy', ([], {'name': '"""acc_adv"""'}), "(name='acc_adv')\n", (10986, 11002), False, 'from tensorflow import keras\n'), ((14082, 14154), 'common.inputs.voc2010.voc_parts.build_dataset3', 'voc_parts.build_dataset3', (["(root + 'data')"], {'batch_size': '(32)', 'shape': 'data_shape'}), "(root + 'data', batch_size=32, shape=data_shape)\n", (14106, 14154), False, 'from common.inputs.voc2010 import voc_parts\n'), ((14172, 14261), 'tensorflow.keras.Model', 'keras.Model', (['model.layers[0].input', '[model.layers[3].output, model.layers[5].output]'], {}), '(model.layers[0].input, [model.layers[3].output, model.layers[5]\n .output])\n', (14183, 14261), False, 'from tensorflow import keras\n'), ((14599, 14625), 'numpy.concatenate', 'np.concatenate', (['results', '(0)'], {}), '(results, 0)\n', (14613, 14625), True, 'import numpy as np\n'), ((14637, 14653), 'numpy.mean', 'np.mean', (['results'], {}), '(results)\n', (14644, 14653), True, 'import numpy as np\n'), ((14664, 14679), 'numpy.std', 'np.std', (['results'], {}), '(results)\n', (14670, 14679), True, 'import numpy as np\n'), ((76, 91), 'tensorflow.get_logger', 'tf.get_logger', ([], {}), '()\n', (89, 91), True, 'import tensorflow as tf\n'), ((1411, 1500), 'common.losses.MarginLoss', 'losses.MarginLoss', ([], {'sparse': '(False)', 'upper_margin': '(0.9)', 'bottom_margin': '(0.1)', 'down_weight': '(0.5)'}), '(sparse=False, upper_margin=0.9, bottom_margin=0.1,\n down_weight=0.5)\n', (1428, 1500), False, 'from common import layers, losses, utils, train, attacks\n'), ((1522, 1576), 'tensorflow.keras.losses.CategoricalCrossentropy', 'keras.losses.CategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (1558, 1576), False, 'from tensorflow import keras\n'), ((2804, 2869), 'tensorflow.keras.applications.VGG16', 'keras.applications.VGG16', ([], {'include_top': '(False)', 'input_shape': 'in_shape'}), '(include_top=False, input_shape=in_shape)\n', (2828, 2869), False, 'from tensorflow import keras\n'), ((3859, 4014), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': 'parts', 'kernel_size': '(1)', 'activation': '"""relu"""', 'kernel_initializer': 'kernel_initializer', 'kernel_regularizer': 'kernel_regularizer'}), "(filters=parts, kernel_size=1, activation='relu',\n kernel_initializer=kernel_initializer, kernel_regularizer=\n kernel_regularizer)\n", (3878, 4014), False, 'from tensorflow import keras\n'), ((6286, 6400), 'common.inputs.voc2010.voc_parts.build_dataset3', 'voc_parts.build_dataset3', ([], {'batch_size': 'params.training.batch_size', 'shape': 'data_shape', 'arch': 'params.model.backbone'}), '(batch_size=params.training.batch_size, shape=\n data_shape, arch=params.model.backbone)\n', (6310, 6400), False, 'from common.inputs.voc2010 import voc_parts\n'), ((6664, 6764), 'common.train.Trainer', 'train.Trainer', (['model', 'params', 'info', 'tensor_log'], {'finetune': '(True)', 'inference_label': '(False)', 'max_save': '(1)'}), '(model, params, info, tensor_log, finetune=True,\n inference_label=False, max_save=1)\n', (6677, 6764), False, 'from common import layers, losses, utils, train, attacks\n'), ((6799, 6852), 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {'name': '"""accuracy"""'}), "(name='accuracy')\n", (6835, 6852), True, 'import tensorflow as tf\n'), ((8161, 8186), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (8175, 8186), False, 'import os\n'), ((11045, 11211), 'common.attacks.evaluate_model_after_attacks', 'attacks.evaluate_model_after_attacks', (['epsilons', 'acc_adv', 'test_set', 'model', 'loss'], {'method': 'method', 'steps': 'steps', 'label_sparse': '(False)', 'cost': '(True)', 'model_src': 'model_src'}), '(epsilons, acc_adv, test_set, model,\n loss, method=method, steps=steps, label_sparse=False, cost=True,\n model_src=model_src)\n', (11081, 11211), False, 'from common import layers, losses, utils, train, attacks\n'), ((2948, 3013), 'tensorflow.keras.applications.VGG19', 'keras.applications.VGG19', ([], {'include_top': '(False)', 'input_shape': 'in_shape'}), '(include_top=False, input_shape=in_shape)\n', (2972, 3013), False, 'from tensorflow import keras\n'), ((4265, 4302), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'keras.layers.GlobalAveragePooling2D', ([], {}), '()\n', (4300, 4302), False, 'from tensorflow import keras\n'), ((4335, 4362), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['num_out'], {}), '(num_out)\n', (4353, 4362), False, 'from tensorflow import keras\n'), ((7171, 7200), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', (['(0.0001)'], {}), '(0.0001)\n', (7192, 7200), False, 'from tensorflow import keras\n'), ((7225, 7280), 'tensorflow.keras.losses.CategoricalCrossentropy', 'keras.losses.CategoricalCrossentropy', ([], {'from_logits': '(False)'}), '(from_logits=False)\n', (7261, 7280), False, 'from tensorflow import keras\n'), ((14463, 14496), 'common.ops.routing.activated_entropy', 'activated_entropy', (['c', 'child_probs'], {}), '(c, child_probs)\n', (14480, 14496), False, 'from common.ops.routing import activated_entropy, coupling_entropy\n'), ((14533, 14552), 'common.ops.routing.coupling_entropy', 'coupling_entropy', (['c'], {}), '(c)\n', (14549, 14552), False, 'from common.ops.routing import activated_entropy, coupling_entropy\n'), ((3098, 3169), 'tensorflow.keras.applications.InceptionV3', 'keras.applications.InceptionV3', ([], {'include_top': '(False)', 'input_shape': 'in_shape'}), '(include_top=False, input_shape=in_shape)\n', (3128, 3169), False, 'from tensorflow import keras\n'), ((4411, 4444), 'tensorflow.keras.layers.GlobalMaxPooling2D', 'keras.layers.GlobalMaxPooling2D', ([], {}), '()\n', (4442, 4444), False, 'from tensorflow import keras\n'), ((4477, 4504), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['num_out'], {}), '(num_out)\n', (4495, 4504), False, 'from tensorflow import keras\n'), ((7023, 7034), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7032, 7034), False, 'import os\n'), ((11326, 11508), 'common.attacks.evaluate_attacks_success_rate_all_target', 'attacks.evaluate_attacks_success_rate_all_target', (['epsilons', 'test_set', 'model', 'loss', 'categories'], {'method': 'method', 'steps': 'steps', 'label_sparse': '(False)', 'cost': '(True)', 'model_src': 'model_src'}), '(epsilons, test_set, model,\n loss, categories, method=method, steps=steps, label_sparse=False, cost=\n True, model_src=model_src)\n', (11374, 11508), False, 'from common import layers, losses, utils, train, attacks\n'), ((11536, 11695), 'common.attacks.evaluate_attacks_success_rate', 'attacks.evaluate_attacks_success_rate', (['epsilons', 'test_set', 'model', 'loss'], {'method': 'method', 'steps': 'steps', 'label_sparse': '(False)', 'cost': '(True)', 'model_src': 'model_src'}), '(epsilons, test_set, model, loss,\n method=method, steps=steps, label_sparse=False, cost=True, model_src=\n model_src)\n', (11573, 11695), False, 'from common import layers, losses, utils, train, attacks\n'), ((3251, 3319), 'tensorflow.keras.applications.ResNet50', 'keras.applications.ResNet50', ([], {'include_top': '(False)', 'input_shape': 'in_shape'}), '(include_top=False, input_shape=in_shape)\n', (3278, 3319), False, 'from tensorflow import keras\n'), ((3378, 3449), 'tensorflow.keras.applications.InceptionV3', 'keras.applications.InceptionV3', ([], {'include_top': '(False)', 'input_shape': 'in_shape'}), '(include_top=False, input_shape=in_shape)\n', (3408, 3449), False, 'from tensorflow import keras\n'), ((4570, 4694), 'common.layers.CapsuleGroups', 'layers.CapsuleGroups', ([], {'height': 'shape[1]', 'width': 'shape[2]', 'channel': 'shape[3]', 'atoms': '(16)', 'method': '"""channel"""', 'activation': '"""squash"""'}), "(height=shape[1], width=shape[2], channel=shape[3],\n atoms=16, method='channel', activation='squash')\n", (4590, 4694), False, 'from common import layers, losses, utils, train, attacks\n'), ((5388, 5513), 'common.layers.DynamicRouting', 'layers.DynamicRouting', ([], {'num_routing': 'iter_num', 'softmax_in': '(False)', 'temper': 'temper', 'activation': '"""squash"""', 'pooling': '(False)', 'log': 'log'}), "(num_routing=iter_num, softmax_in=False, temper=temper,\n activation='squash', pooling=False, log=log)\n", (5409, 5513), False, 'from common import layers, losses, utils, train, attacks\n'), ((7095, 7106), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7104, 7106), False, 'import os\n'), ((5211, 5245), 'tensorflow.keras.initializers.glorot_normal', 'keras.initializers.glorot_normal', ([], {}), '()\n', (5243, 5245), False, 'from tensorflow import keras\n')]
|
# -*- coding: utf-8 -*-
"""
Displays some of the augmented images. Can be used to visually check that augmentation is working fine.
Created on Wed May 8 12:12:26 2019
@author: lbechberger
"""
import argparse, pickle
import matplotlib.pyplot as plt
import tensorflow as tf
parser = argparse.ArgumentParser(description='Visualizing augmented images')
parser.add_argument('input_file', help = 'pickle file containing the augmented images to display')
parser.add_argument('-r', '--rows', type = int, help = 'number of rows', default = 3)
parser.add_argument('-c', '--columns', type = int, help = 'number of columns', default = 4)
args = parser.parse_args()
with open(args.input_file, "rb") as f:
images = pickle.load(f)
# need to convert tensorflow string representation into numbers
tf_image_string = tf.placeholder(tf.string)
decoder = tf.image.decode_jpeg(tf_image_string)
fig = plt.figure(figsize=(16,10))
with tf.Session() as session:
session.run(tf.global_variables_initializer())
for i in range(args.rows * args.columns):
ax = fig.add_subplot(args.rows, args.columns, i+1)
img = session.run(decoder, feed_dict = {tf_image_string : images[i]})
# deal with greyscale images
if img.shape[2] == 1:
img = img.reshape((img.shape[0], img.shape[1]))
ax.imshow(img, cmap = "gray")
else:
ax.imshow(img)
plt.show()
|
[
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.placeholder",
"matplotlib.pyplot.figure",
"pickle.load",
"tensorflow.image.decode_jpeg"
] |
[((286, 353), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Visualizing augmented images"""'}), "(description='Visualizing augmented images')\n", (309, 353), False, 'import argparse, pickle\n'), ((809, 834), 'tensorflow.placeholder', 'tf.placeholder', (['tf.string'], {}), '(tf.string)\n', (823, 834), True, 'import tensorflow as tf\n'), ((845, 882), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['tf_image_string'], {}), '(tf_image_string)\n', (865, 882), True, 'import tensorflow as tf\n'), ((890, 918), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 10)'}), '(figsize=(16, 10))\n', (900, 918), True, 'import matplotlib.pyplot as plt\n'), ((1412, 1422), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1420, 1422), True, 'import matplotlib.pyplot as plt\n'), ((711, 725), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (722, 725), False, 'import argparse, pickle\n'), ((924, 936), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (934, 936), True, 'import tensorflow as tf\n'), ((965, 998), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (996, 998), True, 'import tensorflow as tf\n')]
|
from teca import *
import numpy as np
import sys
def get_request_callback(rank, var_names):
def request(port, md_in, req_in):
sys.stderr.write('descriptive_stats::request MPI %d\n'%(rank))
req = teca_metadata(req_in)
req['arrays'] = var_names
return [req]
return request
def get_execute_callback(rank, var_names):
def execute(port, data_in, req):
sys.stderr.write('descriptive_stats::execute MPI %d\n'%(rank))
mesh = as_teca_cartesian_mesh(data_in[0])
table = teca_table.New()
table.declare_columns(['step','time'], ['ul','d'])
table << mesh.get_time_step() << mesh.get_time()
for var_name in var_names:
table.declare_columns(['min '+var_name, 'avg '+var_name, \
'max '+var_name, 'std '+var_name, 'low_q '+var_name, \
'med '+var_name, 'up_q '+var_name], ['d']*7)
var = mesh.get_point_arrays().get(var_name).as_array()
table << float(np.min(var)) << float(np.average(var)) \
<< float(np.max(var)) << float(np.std(var)) \
<< map(float, np.percentile(var, [25.,50.,75.]))
return table
return execute
|
[
"numpy.average",
"numpy.std",
"numpy.percentile",
"numpy.max",
"numpy.min",
"sys.stderr.write"
] |
[((139, 201), 'sys.stderr.write', 'sys.stderr.write', (["('descriptive_stats::request MPI %d\\n' % rank)"], {}), "('descriptive_stats::request MPI %d\\n' % rank)\n", (155, 201), False, 'import sys\n'), ((401, 463), 'sys.stderr.write', 'sys.stderr.write', (["('descriptive_stats::execute MPI %d\\n' % rank)"], {}), "('descriptive_stats::execute MPI %d\\n' % rank)\n", (417, 463), False, 'import sys\n'), ((1134, 1172), 'numpy.percentile', 'np.percentile', (['var', '[25.0, 50.0, 75.0]'], {}), '(var, [25.0, 50.0, 75.0])\n', (1147, 1172), True, 'import numpy as np\n'), ((1089, 1100), 'numpy.std', 'np.std', (['var'], {}), '(var)\n', (1095, 1100), True, 'import numpy as np\n'), ((1067, 1078), 'numpy.max', 'np.max', (['var'], {}), '(var)\n', (1073, 1078), True, 'import numpy as np\n'), ((1023, 1038), 'numpy.average', 'np.average', (['var'], {}), '(var)\n', (1033, 1038), True, 'import numpy as np\n'), ((1001, 1012), 'numpy.min', 'np.min', (['var'], {}), '(var)\n', (1007, 1012), True, 'import numpy as np\n')]
|
# Copyright 2019-2020 by <NAME>, MGLAND animation studio. All rights reserved.
# This file is part of IUTest, and is released under the "MIT License Agreement".
# Please see the LICENSE file that should have been included as part of this package.
import os
from iutest.core import pathutils
from iutest.qt import iconFromPath
def _iconDir():
return os.path.join(pathutils.iutestPackageDir(), "icons")
def iconPath(iconName):
return os.path.join(_iconDir(), iconName)
def iconPathSet(iconName, suffixes):
iconDir = _iconDir()
nameParts = list(iconName.partition("."))
nameParts.insert(1, None)
paths = []
for suffix in suffixes:
nameParts[1] = suffix
fileName = "".join(nameParts)
paths.append(os.path.join(iconDir, fileName))
return paths
def initSingleClassIcon(obj, objAttributeName, iconFileName):
path = iconPath(iconFileName)
setattr(obj, objAttributeName, iconFromPath(path))
|
[
"iutest.core.pathutils.iutestPackageDir",
"os.path.join",
"iutest.qt.iconFromPath"
] |
[((369, 397), 'iutest.core.pathutils.iutestPackageDir', 'pathutils.iutestPackageDir', ([], {}), '()\n', (395, 397), False, 'from iutest.core import pathutils\n'), ((936, 954), 'iutest.qt.iconFromPath', 'iconFromPath', (['path'], {}), '(path)\n', (948, 954), False, 'from iutest.qt import iconFromPath\n'), ((752, 783), 'os.path.join', 'os.path.join', (['iconDir', 'fileName'], {}), '(iconDir, fileName)\n', (764, 783), False, 'import os\n')]
|
#!/usr/bin/env python
'''
Search a p4 for good indices. This imports the file specified by
the modulepath option, reads a function called "f" from it,
and filters the frames using it. Outputs a numpy array of
good trajectories.
Usage:
./search.py [options] <input> <hashd> <output>
Options:
--help -h Print this help.
--modulepath=M Set the path to the file to read "f"
from. [default: ./scanner.py]
'''
from lspreader import read;
from pys import load_pickle;
from lspreader.pmovie import filter_hashes_from_file;
import numpy as np;
import re;
import imp;
if __name__ == "__main__":
from docopt import docopt;
opts=docopt(__doc__,help=True);
fname = opts['--modulepath'];
m=re.search(r'(^.*)/(\w+)\.py$', fname);
if not m:
raise ValueError("module should be well named!");
path =m.group(1);
mname=m.group(2);
fp, path,desc = imp.find_module(mname, [path]);
try:
f=imp.load_module(mname, fp, path, desc).f
finally:
if fp:
fp.close();
hashd = load_pickle(opts['<hashd>']);
np.save(
opts['<output>'],
filter_hashes_from_file(opts['<input>'], f, **hashd));
|
[
"lspreader.pmovie.filter_hashes_from_file",
"imp.find_module",
"docopt.docopt",
"imp.load_module",
"re.search",
"pys.load_pickle"
] |
[((705, 731), 'docopt.docopt', 'docopt', (['__doc__'], {'help': '(True)'}), '(__doc__, help=True)\n', (711, 731), False, 'from docopt import docopt\n'), ((772, 810), 're.search', 're.search', (['"""(^.*)/(\\\\w+)\\\\.py$"""', 'fname'], {}), "('(^.*)/(\\\\w+)\\\\.py$', fname)\n", (781, 810), False, 'import re\n'), ((947, 977), 'imp.find_module', 'imp.find_module', (['mname', '[path]'], {}), '(mname, [path])\n', (962, 977), False, 'import imp\n'), ((1103, 1131), 'pys.load_pickle', 'load_pickle', (["opts['<hashd>']"], {}), "(opts['<hashd>'])\n", (1114, 1131), False, 'from pys import load_pickle\n'), ((1180, 1232), 'lspreader.pmovie.filter_hashes_from_file', 'filter_hashes_from_file', (["opts['<input>']", 'f'], {}), "(opts['<input>'], f, **hashd)\n", (1203, 1232), False, 'from lspreader.pmovie import filter_hashes_from_file\n'), ((998, 1036), 'imp.load_module', 'imp.load_module', (['mname', 'fp', 'path', 'desc'], {}), '(mname, fp, path, desc)\n', (1013, 1036), False, 'import imp\n')]
|
import asyncio
import dataclasses
import json
import re
import typing
from pathlib import Path
import aiohttp
from loguru import logger
from .utils import download_url, get_request, get_original_filename
# Format string linking to the download of a vscode extension .vsix file.
MARKETPLACE_DOWNLOAD_LINK = '''
https://marketplace.visualstudio.com/_apis/public/gallery/publishers/{publisher_name}/vsextensions/{extension_name}/{version}/vspackage
'''.strip()
# Format string linking to the marketplace page of some extension.
MARKETPLACE_PAGE_LINK = '''
https://marketplace.visualstudio.com/items?itemName={extension_id}
'''.strip()
# Regex used to extract the exact version of an extension from it's marketplace page.
VERSION_REGEX = re.compile(r'"Version":"(.*?)"')
@dataclasses.dataclass
class ExtensionPath:
"""
Dataclass for storing info regarding a certain VSCode extension.
"""
path: Path # Extension final save path.
extension_id: str # Extension ID.
version: str = 'latest' # Extension version.
def _build_extension_download_url(
extension_name: str, publisher_name: str, version: str
) -> str:
"""
Build the download url for the given parameters.
Just a shortcut for the string formatting.
:param extension_name: Desired extension name.
:type extension_name: str
:param publisher_name: Desired extension publisher's name.
:type publisher_name: str
:param version: Desired extension version.
:type version: str
:return: The formatted download url.
:rtype: str
"""
return MARKETPLACE_DOWNLOAD_LINK.format(
extension_name=extension_name, publisher_name=publisher_name, version=version
)
def _build_extension_download_url_from_ext_path(ext_path: ExtensionPath) -> str:
"""
Build the download url for the given parameters.
:param ext_path: A spec object describing the desired extension.
:type ext_path: ExtensionPath
:return: The formatted download url.
:rtype: str
"""
publisher_name, extension_name = ext_path.extension_id.split('.')
return _build_extension_download_url(extension_name, publisher_name, ext_path.version)
async def _download_extension(
session: aiohttp.ClientSession,
extension_name: str,
publisher_name: str,
version: str,
save_path: Path,
) -> None:
"""
Download an extension according to the given parameters.
When one needs to be a tiny bit more verbose than the `by_id` version.
:param session: An aiohttp session object to use.
:type session: aiohttp.ClientSession
:param extension_name: Desired extension name.
:type extension_name: str
:param publisher_name: Desired extension publisher's name.
:type publisher_name: str
:param version: Desired extension version.
:type version: str
:param save_path: Save path to downloaded the desired extension to.
:type save_path: Path
:return: None.
:rtype: None
"""
logger.info(f'Downloading {extension_name}...')
url = _build_extension_download_url(extension_name, publisher_name, version)
await download_url(session, url, save_path, return_type=bytes)
logger.info(f'Downloaded {extension_name} to {save_path}.')
async def download_extension_by_id(
session: aiohttp.ClientSession, extension_id: str, version: str, save_path: Path
) -> None:
"""
Download an extension according to the given parameters.
:param session: An aiohttp session object to use.
:type session: aiohttp.ClientSession
:param extension_id: Desired extension ID.
:type extension_id: str
:param version: Desired extension version.
:type version: str
:param save_path: Save path to downloaded the desired extension to.
:type save_path: Path
:return: None.
:rtype: None
"""
publisher_name, extension_name = extension_id.split('.')
await _download_extension(session, extension_name, publisher_name, version, save_path)
def _recursive_parse_to_dict(
root_dict: typing.Dict[str, typing.Union[str, typing.Dict]],
) -> typing.List[ExtensionPath]:
"""
Recursively parse the given config data:
If the value of a key is a dict, treat it like a directory and delve one level deeper into the value.
If the value of a key is a string, create a spec object from it and give it it's "path" down the hierarchy.
:param root_dict: The current "root" of our config.
:type root_dict: typing.Dict[str, typing.Union[str, typing.Dict]]
:raises ValueError: A given key had an empty value.
:raises TypeError: A given key was neither a str or a dict.
:return: List of spec objects parsed from the initial config.
:rtype: typing.List[ExtensionPath]
"""
path_list = []
for key, value in root_dict.items():
if isinstance(value, str):
if not value:
raise ValueError(f'Value for key {key} was empty.')
path_list.append(ExtensionPath(Path(key) / f'{value}', value))
elif isinstance(value, dict):
for ext_path in _recursive_parse_to_dict(value):
ext_path.path = Path(key, ext_path.path)
path_list.append(ext_path)
else:
raise TypeError(f'Value for key {key} was neither str or dict.')
return path_list
def parse_extensions_json(
json_data: typing.Union[typing.Dict[str, str], Path],
) -> typing.List[ExtensionPath]:
"""
Decide wether the data provided was a Path or not and act accordingly:
If it's valid json format data, parse it and return a list of specs.
If it's a Path, open it and then do the same thing.
:param json_data: Either a path to a json config file or it's raw data (dict / list).
:type json_data: typing.Union[typing.Dict[str, str], Path]
:return: List of spec objects describing the given extensions.
:rtype: typing.List[ExtensionPath]
"""
if isinstance(json_data, Path):
with json_data.open() as json_file:
json_data = json.load(json_file)['extensions']
return _recursive_parse_to_dict(json_data)
async def get_extension_version(session: aiohttp.ClientSession, extension_id: str) -> str:
"""
Get the latest version of an extension on the marketplace.
:param session: An aiohttp session object to use.
:type session: aiohttp.ClientSession
:param extension_id: Desired marketplace extension to get the version of.
:type extension_id: str
:raises ValueError: Can't find the extension version.
:return: String of the extension's latest version.
:rtype: str
"""
logger.debug(f'Requesting version of extension {extension_id}...')
url = MARKETPLACE_PAGE_LINK.format(extension_id=extension_id)
try:
text: str = await get_request(session, url, return_type=str)
match = re.search(r'"Version":"(.*?)"', text)
if not match:
raise ValueError('Extension marketplace page data doesn\'t contain a version.')
version = match.group(1) # The captured version specifier.
except Exception as error:
logger.debug(error)
logger.warning('Can\'t get extension version, setting version to \'latest\'...')
version = 'latest'
logger.debug(f'Extension {extension_id} is of version {version}.')
return version
async def versionize_extension_paths(
session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath]
) -> None:
"""
Add the `version` attributes to the extensions spec objects.
:param session: An aiohttp session object to use.
:type session: aiohttp.ClientSession
:param extension_paths: List of extension spec objects to patch.
:type extension_paths: typing.List[ExtensionPath]
:return: None, this patches the existing objects.
:rtype: None
"""
get_version_tasks = [
get_extension_version(session, ext_path.extension_id) for ext_path in extension_paths
]
versions = await asyncio.gather(*get_version_tasks)
for ext_path, version in zip(extension_paths, versions):
ext_path.version = version
async def patch_extension_paths(
session: aiohttp.ClientSession,
extension_paths: typing.List[ExtensionPath],
*,
versionize: bool = True,
) -> None:
"""
Fix up the extension paths by altering their name.
Basic functionality is to get the real names of extensions.
Can also append the current version number.
:param session: An aiohttp session object to use.
:type session: aiohttp.ClientSession
:param extension_paths: List of extension spec objects to patch.
:type extension_paths: typing.List[ExtensionPath]
:param versionize: Wether to append version names to the paths, defaults to True
:type versionize: bool, optional
:return: None, this patches the existing objects.
:rtype: None
"""
if versionize:
await versionize_extension_paths(session, extension_paths)
real_name_tasks = [
get_original_filename(session, _build_extension_download_url_from_ext_path(ext_path))
for ext_path in extension_paths
]
original_filenames = await asyncio.gather(*real_name_tasks)
for filename, ext_path in zip(original_filenames, extension_paths):
ext_path.path = ext_path.path.with_name(filename)
async def download_extensions_json(
json_data: typing.Union[typing.Dict[str, str], Path],
save_path: Path,
*,
real_name: typing.Optional[bool] = None,
versionize: typing.Optional[bool] = None,
) -> None:
"""
Parse the given json data and download the given VSCode extensions into the save path.
:param json_data: Either a path to a json config file or it's raw data (dict / list).
:type json_data: typing.Union[typing.Dict[str, str], Path]
:param save_path: Save path for all the downloaded VSCode binaries.
:type save_path: Path
:param real_name: Wether to patch the real filenames of the extensions, defaults to None (True)
:type real_name: typing.Optional[bool], optional
:param versionize: Wether to patch the current version of the extensions, has no effect without `real_name`, defaults to None (True)
:type versionize: typing.Optional[bool], optional
:return: None.
:rtype: None
"""
if real_name is None:
real_name = True
if versionize is None:
versionize = True
extension_paths = parse_extensions_json(json_data)
async with aiohttp.ClientSession() as session:
if real_name:
await patch_extension_paths(session, extension_paths, versionize=versionize)
download_extension_tasks = []
for ext_path in extension_paths:
extension_full_save_path = save_path / ext_path.path.with_suffix('.vsix')
extension_full_save_path.parent.mkdir(parents=True, exist_ok=True)
download_extension_tasks.append(
download_extension_by_id(
session, ext_path.extension_id, ext_path.version, extension_full_save_path
)
)
await asyncio.gather(*download_extension_tasks)
|
[
"asyncio.gather",
"json.load",
"loguru.logger.warning",
"aiohttp.ClientSession",
"loguru.logger.info",
"pathlib.Path",
"loguru.logger.debug",
"re.search",
"re.compile"
] |
[((747, 778), 're.compile', 're.compile', (['""""Version":"(.*?)\\""""'], {}), '(\'"Version":"(.*?)"\')\n', (757, 778), False, 'import re\n'), ((2980, 3027), 'loguru.logger.info', 'logger.info', (['f"""Downloading {extension_name}..."""'], {}), "(f'Downloading {extension_name}...')\n", (2991, 3027), False, 'from loguru import logger\n'), ((3180, 3239), 'loguru.logger.info', 'logger.info', (['f"""Downloaded {extension_name} to {save_path}."""'], {}), "(f'Downloaded {extension_name} to {save_path}.')\n", (3191, 3239), False, 'from loguru import logger\n'), ((6607, 6673), 'loguru.logger.debug', 'logger.debug', (['f"""Requesting version of extension {extension_id}..."""'], {}), "(f'Requesting version of extension {extension_id}...')\n", (6619, 6673), False, 'from loguru import logger\n'), ((7233, 7299), 'loguru.logger.debug', 'logger.debug', (['f"""Extension {extension_id} is of version {version}."""'], {}), "(f'Extension {extension_id} is of version {version}.')\n", (7245, 7299), False, 'from loguru import logger\n'), ((6834, 6870), 're.search', 're.search', (['""""Version":"(.*?)\\""""', 'text'], {}), '(\'"Version":"(.*?)"\', text)\n', (6843, 6870), False, 'import re\n'), ((7968, 8002), 'asyncio.gather', 'asyncio.gather', (['*get_version_tasks'], {}), '(*get_version_tasks)\n', (7982, 8002), False, 'import asyncio\n'), ((9142, 9174), 'asyncio.gather', 'asyncio.gather', (['*real_name_tasks'], {}), '(*real_name_tasks)\n', (9156, 9174), False, 'import asyncio\n'), ((10444, 10467), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (10465, 10467), False, 'import aiohttp\n'), ((7093, 7112), 'loguru.logger.debug', 'logger.debug', (['error'], {}), '(error)\n', (7105, 7112), False, 'from loguru import logger\n'), ((7121, 7198), 'loguru.logger.warning', 'logger.warning', (['"""Can\'t get extension version, setting version to \'latest\'..."""'], {}), '("Can\'t get extension version, setting version to \'latest\'...")\n', (7135, 7198), False, 'from loguru import logger\n'), ((11063, 11104), 'asyncio.gather', 'asyncio.gather', (['*download_extension_tasks'], {}), '(*download_extension_tasks)\n', (11077, 11104), False, 'import asyncio\n'), ((6018, 6038), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (6027, 6038), False, 'import json\n'), ((5134, 5158), 'pathlib.Path', 'Path', (['key', 'ext_path.path'], {}), '(key, ext_path.path)\n', (5138, 5158), False, 'from pathlib import Path\n'), ((4971, 4980), 'pathlib.Path', 'Path', (['key'], {}), '(key)\n', (4975, 4980), False, 'from pathlib import Path\n')]
|
from textwrap import dedent
def test_code_block(script):
"""
Test code block.
"""
script.set_content(
dedent(
"""
```python
m = {}
m["x"] = 1
```
"""
)
)
assert (
'<ac:structured-macro ac:name="code" ac:schema-version="1">'
'<ac:parameter ac:name="language">python</ac:parameter>'
"<ac:plain-text-body><![CDATA["
"m = {}\n"
'm["x"] = 1\n'
"]]></ac:plain-text-body>"
"</ac:structured-macro>"
) in script.run()
def test_code_block_default_language(script):
"""
Test code block with a default language.
"""
script.set_content(
dedent(
"""
```
cd $HOME
```
"""
)
)
assert (
'<ac:structured-macro ac:name="code" ac:schema-version="1">'
'<ac:parameter ac:name="language">bash</ac:parameter>'
"<ac:plain-text-body><![CDATA["
"cd $HOME\n"
"]]></ac:plain-text-body>"
"</ac:structured-macro>"
) in script.run()
def test_code_block_avoid_escape(script):
"""
Avoid escaping code.
"""
script.set_content(
dedent(
"""
```yaml
'test': '<[{}]>'
```
"""
)
)
assert (
'<ac:structured-macro ac:name="code" ac:schema-version="1">'
'<ac:parameter ac:name="language">yaml</ac:parameter>'
"<ac:plain-text-body><![CDATA["
"'test': '<[{}]>'\n"
"]]></ac:plain-text-body>"
"</ac:structured-macro>"
) in script.run()
def test_code_block_escape(script):
"""
If code contains "]]>" (CDATA end), split it into multiple CDATA sections.
"""
script.set_content(
dedent(
"""
```xml
<![CDATA[TEST]]>
```
"""
)
)
assert (
'<ac:structured-macro ac:name="code" ac:schema-version="1">'
'<ac:parameter ac:name="language">xml</ac:parameter>'
"<ac:plain-text-body><![CDATA[<![CDATA[TEST]]>]]><![CDATA[\n]]></ac:plain-text-body>"
"</ac:structured-macro>"
) in script.run()
|
[
"textwrap.dedent"
] |
[((128, 261), 'textwrap.dedent', 'dedent', (['"""\n ```python\n m = {}\n m["x"] = 1\n ```\n """'], {}), '(\n """\n ```python\n m = {}\n m["x"] = 1\n ```\n """\n )\n', (134, 261), False, 'from textwrap import dedent\n'), ((740, 842), 'textwrap.dedent', 'dedent', (['"""\n ```\n cd $HOME\n ```\n """'], {}), '(\n """\n ```\n cd $HOME\n ```\n """\n )\n', (746, 842), False, 'from textwrap import dedent\n'), ((1274, 1388), 'textwrap.dedent', 'dedent', (['"""\n ```yaml\n \'test\': \'<[{}]>\'\n ```\n """'], {}), '(\n """\n ```yaml\n \'test\': \'<[{}]>\'\n ```\n """\n )\n', (1280, 1388), False, 'from textwrap import dedent\n'), ((1876, 1989), 'textwrap.dedent', 'dedent', (['"""\n ```xml\n <![CDATA[TEST]]>\n ```\n """'], {}), '(\n """\n ```xml\n <![CDATA[TEST]]>\n ```\n """\n )\n', (1882, 1989), False, 'from textwrap import dedent\n')]
|
from threading import Timer
import logging
from igmp.packet.PacketIGMPHeader import PacketIGMPHeader
from igmp.packet.ReceivedPacket import ReceivedPacket
from igmp.rwlock.RWLock import RWLockWrite
from igmp.utils import TYPE_CHECKING
from . import igmp_globals
from .GroupState import GroupState
from .querier.Querier import Querier
from .nonquerier.NonQuerier import NonQuerier
if TYPE_CHECKING:
from igmp.InterfaceIGMP import InterfaceIGMP
class RouterState(object):
ROUTER_STATE_LOGGER = logging.getLogger('igmp.igmpv2.RouterState')
def __init__(self, interface: 'InterfaceIGMP'):
#logger
logger_extra = dict()
logger_extra['vif'] = interface.vif_index
logger_extra['interfacename'] = interface.interface_name
self.router_state_logger = logging.LoggerAdapter(RouterState.ROUTER_STATE_LOGGER, logger_extra)
# interface of the router connected to the network
self.interface = interface
# state of the router (Querier/NonQuerier)
self.interface_state = Querier
# state of each group
# Key: GroupIPAddress, Value: GroupState object
self.group_state = {}
self.group_state_lock = RWLockWrite()
# send general query
packet = PacketIGMPHeader(type=igmp_globals.MEMBERSHIP_QUERY,
max_resp_time=igmp_globals.QUERY_RESPONSE_INTERVAL * 10)
self.interface.send(packet.bytes())
# set initial general query timer
timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout)
timer.start()
self.general_query_timer = timer
# present timer
self.other_querier_present_timer = None
# Send packet via interface
def send(self, data: bytes, address: str):
self.interface.send(data, address)
############################################
# interface_state methods
############################################
def print_state(self):
return self.interface_state.state_name()
def set_general_query_timer(self):
"""
Set general query timer
"""
self.clear_general_query_timer()
general_query_timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout)
general_query_timer.start()
self.general_query_timer = general_query_timer
def clear_general_query_timer(self):
"""
Stop general query timer
"""
if self.general_query_timer is not None:
self.general_query_timer.cancel()
def set_other_querier_present_timer(self):
"""
Set other querier present timer
"""
self.clear_other_querier_present_timer()
other_querier_present_timer = Timer(igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL, self.other_querier_present_timeout)
other_querier_present_timer.start()
self.other_querier_present_timer = other_querier_present_timer
def clear_other_querier_present_timer(self):
"""
Stop other querier present timer
"""
if self.other_querier_present_timer is not None:
self.other_querier_present_timer.cancel()
def general_query_timeout(self):
"""
General Query timer has expired
"""
self.interface_state.general_query_timeout(self)
def other_querier_present_timeout(self):
"""
Other Querier Present timer has expired
"""
self.interface_state.other_querier_present_timeout(self)
def change_interface_state(self, querier: bool):
"""
Change state regarding querier state machine (Querier/NonQuerier)
"""
if querier:
self.interface_state = Querier
self.router_state_logger.debug('change querier state to -> Querier')
else:
self.interface_state = NonQuerier
self.router_state_logger.debug('change querier state to -> NonQuerier')
############################################
# group state methods
############################################
def get_group_state(self, group_ip):
"""
Get object that monitors a given group (with group_ip IP address)
"""
with self.group_state_lock.genRlock():
if group_ip in self.group_state:
return self.group_state[group_ip]
with self.group_state_lock.genWlock():
if group_ip in self.group_state:
group_state = self.group_state[group_ip]
else:
group_state = GroupState(self, group_ip)
self.group_state[group_ip] = group_state
return group_state
def receive_v1_membership_report(self, packet: ReceivedPacket):
"""
Received IGMP Version 1 Membership Report packet
"""
igmp_group = packet.payload.group_address
self.get_group_state(igmp_group).receive_v1_membership_report()
def receive_v2_membership_report(self, packet: ReceivedPacket):
"""
Received IGMP Membership Report packet
"""
igmp_group = packet.payload.group_address
self.get_group_state(igmp_group).receive_v2_membership_report()
def receive_leave_group(self, packet: ReceivedPacket):
"""
Received IGMP Leave packet
"""
igmp_group = packet.payload.group_address
self.get_group_state(igmp_group).receive_leave_group()
def receive_query(self, packet: ReceivedPacket):
"""
Received IGMP Query packet
"""
self.interface_state.receive_query(self, packet)
igmp_group = packet.payload.group_address
# process group specific query
if igmp_group != "0.0.0.0" and igmp_group in self.group_state:
max_response_time = packet.payload.max_resp_time
self.get_group_state(igmp_group).receive_group_specific_query(max_response_time)
def remove(self):
"""
Remove this IGMP interface
Clear all state
"""
for group in self.group_state.values():
group.remove()
|
[
"threading.Timer",
"igmp.rwlock.RWLock.RWLockWrite",
"igmp.packet.PacketIGMPHeader.PacketIGMPHeader",
"logging.LoggerAdapter",
"logging.getLogger"
] |
[((505, 549), 'logging.getLogger', 'logging.getLogger', (['"""igmp.igmpv2.RouterState"""'], {}), "('igmp.igmpv2.RouterState')\n", (522, 549), False, 'import logging\n'), ((799, 867), 'logging.LoggerAdapter', 'logging.LoggerAdapter', (['RouterState.ROUTER_STATE_LOGGER', 'logger_extra'], {}), '(RouterState.ROUTER_STATE_LOGGER, logger_extra)\n', (820, 867), False, 'import logging\n'), ((1203, 1216), 'igmp.rwlock.RWLock.RWLockWrite', 'RWLockWrite', ([], {}), '()\n', (1214, 1216), False, 'from igmp.rwlock.RWLock import RWLockWrite\n'), ((1264, 1378), 'igmp.packet.PacketIGMPHeader.PacketIGMPHeader', 'PacketIGMPHeader', ([], {'type': 'igmp_globals.MEMBERSHIP_QUERY', 'max_resp_time': '(igmp_globals.QUERY_RESPONSE_INTERVAL * 10)'}), '(type=igmp_globals.MEMBERSHIP_QUERY, max_resp_time=\n igmp_globals.QUERY_RESPONSE_INTERVAL * 10)\n', (1280, 1378), False, 'from igmp.packet.PacketIGMPHeader import PacketIGMPHeader\n'), ((1511, 1573), 'threading.Timer', 'Timer', (['igmp_globals.QUERY_INTERVAL', 'self.general_query_timeout'], {}), '(igmp_globals.QUERY_INTERVAL, self.general_query_timeout)\n', (1516, 1573), False, 'from threading import Timer\n'), ((2205, 2267), 'threading.Timer', 'Timer', (['igmp_globals.QUERY_INTERVAL', 'self.general_query_timeout'], {}), '(igmp_globals.QUERY_INTERVAL, self.general_query_timeout)\n', (2210, 2267), False, 'from threading import Timer\n'), ((2752, 2843), 'threading.Timer', 'Timer', (['igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL', 'self.other_querier_present_timeout'], {}), '(igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL, self.\n other_querier_present_timeout)\n', (2757, 2843), False, 'from threading import Timer\n')]
|
###
# Copyright 2021 New H3C Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
# -*- coding: utf-8 -*-
from exception.ToolException import FailException
from utils.client import RedfishClient, RestfulClient
from utils.common import Constant
from utils.model import BaseModule
from utils import globalvar
from utils.predo import GetVersion
class Controller:
def __init__(self):
self.member_id = None
self.manufacturer = None
self.model = None
self.supported_device_protocols = None
self.sas_address = None
self.firmware_version = None
self.maintain_pd_fail_history = None
self.copy_back_state = None
self.jbod_state = None
self.min_stripe_size_bytes = None
self.max_stripe_size_bytes = None
self.memory_size_mib = None
self.supported_raid_levels = None
self.ddrecc_count = None
self.temperature_celsius = None
self.package_version = None
@property
def dict(self):
return {
"MemberId": self.member_id,
"Manufacturer": self.manufacturer,
"Model": self.model,
"SupportedDeviceProtocols": self.supported_device_protocols,
"SASAddress": self.sas_address,
"FirmwareVersion": self.firmware_version,
"MaintainPDFailHistory": self.maintain_pd_fail_history,
"CopyBackState": self.copy_back_state,
"JBODState": self.jbod_state,
"MinStripeSizeBytes": self.min_stripe_size_bytes,
"MaxStripeSizeBytes": self.max_stripe_size_bytes,
"MemorySizeMiB": self.memory_size_mib,
"SupportedRAIDLevels": self.supported_raid_levels,
"DDRECCCount": self.ddrecc_count,
"TemperatureCelsius": self.temperature_celsius,
"PackageVersion": self.package_version
}
def pack_ctrl(self, controller):
self.member_id = controller.get("MemberId", None)
self.manufacturer = controller.get("Manufacturer", None)
self.model = controller.get("Name", None)
self.supported_device_protocols = (
controller.get("SupportedDeviceProtocols", None))
self.firmware_version = controller.get("FirmwareVersion", None)
self.maintain_pd_fail_history = controller.get("MaintainPDFailHistory")
self.copy_back_state = controller.get("CopyBackState", None)
if (controller.get("Oem", None) and
isinstance(controller["Oem"].get("Public", None), dict)):
oem_info = controller["Oem"]["Public"]
self.jbod_state = oem_info.get("JBODState", None)
self.package_version = oem_info.get("PackageVersion", None)
self.min_stripe_size_bytes = oem_info.get("MinStripeSizeBytes",
None)
self.max_stripe_size_bytes = oem_info.get("MaxStripeSizeBytes",
None)
if self.maintain_pd_fail_history is None:
self.maintain_pd_fail_history = oem_info.get(
"MaintainPDFailHistory", None)
if self.copy_back_state is None:
self.copy_back_state = oem_info.get("CopyBackState", None)
if oem_info.get("DDRECCCount", None) is not None:
self.ddrecc_count = oem_info.get("DDRECCCount")
else:
self.ddrecc_count = controller.get("DDRECCCount", None)
self.memory_size_mib = oem_info.get("MemorySizeMiB", None)
if oem_info.get("SupportedRAIDLevels", None) is not None:
self.supported_raid_levels = (
", ".join(oem_info["SupportedRAIDLevels"]))
self.sas_address = oem_info.get("SASAddress", None)
self.temperature_celsius = controller.get("TemperatureCelsius", None)
class Raid:
def __init__(self):
self.name = None
self.location = "mainboard"
self.manufacturer = None
self.serial_number = None
self.state = None
self.health = None
self.controller = []
@property
def dict(self):
return {
"Name": self.name,
"Location": self.location,
"Manufacturer": self.manufacturer,
"SerialNumber": self.serial_number,
"State": self.state,
"Health": self.health,
"Controller": self.controller
}
def pack_raid_resource(self, resp):
self.name = resp.get("Name", None)
raid_ctrls = resp.get("StorageControllers", None)
if isinstance(raid_ctrls, list):
for controller in raid_ctrls:
ctrl = Controller()
ctrl.pack_ctrl(controller)
self.controller.append(ctrl)
self.serial_number = controller.get("SerialNumber", None)
self.manufacturer = controller.get("Manufacturer", None)
if controller.get("Status", None):
self.state = controller["Status"].get("State", None)
self.health = controller["Status"].get("Health", None)
class GetRaid(BaseModule):
def __init__(self):
super().__init__()
self.overall_health = None
self.maximum = None
self.raids = []
@property
def dict(self):
return {
"OverallHealth": self.overall_health,
"Maximum": None,
"Raids": self.raids
}
@GetVersion()
def run(self, args):
is_adapt_b01 = globalvar.IS_ADAPT_B01
if is_adapt_b01:
client = RestfulClient(args)
try:
self._get_b01_raid(client)
finally:
if client.cookie:
client.delete_session()
else:
client = RedfishClient(args)
self._get_raid(client)
if self.suc_list:
return self.suc_list
client = RestfulClient(args)
try:
self._get_health_info(client)
finally:
if client.cookie:
client.delete_session()
return self.suc_list
def _get_health_info(self, client):
status_dict = {
"0": "OK",
"1": "Caution",
"2": "Warning",
"3": "Critical"
}
url = "/api/health_info"
resp = client.send_request("GET", url)
if (isinstance(resp, dict) and
Constant.SUCCESS_0 == resp.get("cc", None)):
raid_health = status_dict.get(str(resp.get("disk", None)), None)
self.overall_health = raid_health
else:
self.err_list.append("Failure: failed to get overall health "
"status information")
raise FailException(*self.err_list)
def _get_raid(self, client):
systems_id = client.get_systems_id()
url = "/redfish/v1/Systems/%s/Storages" % systems_id
resp = client.send_request("GET", url)
if (isinstance(resp, dict) and
resp.get("status_code", None) in Constant.SUC_CODE):
raid_members = resp["resource"].get("Members", None)
if not raid_members:
self.suc_list.append("Success: raid card resource is empty")
return
for member in raid_members:
url = member.get("@odata.id", None)
resp = client.send_request("GET", url)
if (isinstance(resp, dict) and
resp.get("status_code", None) in Constant.SUC_CODE):
raid = Raid()
raid.pack_raid_resource(resp["resource"])
self.raids.append(raid)
else:
self.err_list.append("Failure: failed to get raid card "
"details")
raise FailException(*self.err_list)
else:
self.err_list.append("Failure: failed to get raid card"
" collection information")
raise FailException(*self.err_list)
def _get_b01_raid(self, client):
try:
url = "/api/settings/storageinfo"
resp1 = client.send_request("GET", url)
if isinstance(resp1, dict) and \
Constant.SUCCESS_0 == resp1.get("cc"):
raid_members = resp1.get("adapter")
if not raid_members:
self.suc_list.append(
"Success: raid card resource is empty")
return
raid = Raid()
ctrl = Controller()
name = raid_members.get("type")
raid.name = name
raid.serial_number = raid_members.get("serial")
url = "/api/system/pcie"
resp2 = client.send_request("GET", url)
if isinstance(resp2, dict) and Constant.SUCCESS_0 == \
resp1.get("cc"):
pcie_members = resp2.get("pcie_info", None)
for member in pcie_members:
if member.get("produce_name") == name:
raid.location = member.get("slot", None)
ctrl.member_id = member.get("device_id", None)
ctrl.model = name
ctrl.memory_size_mib = \
raid_members.get("ddr_size", None)
raid.controller.append(ctrl)
self.raids.append(raid)
else:
self.err_list.append("Failure: failed to get raid card"
" collection information")
raise FailException(*self.err_list)
finally:
if client.cookie:
client.delete_session()
|
[
"utils.client.RedfishClient",
"utils.client.RestfulClient",
"exception.ToolException.FailException",
"utils.predo.GetVersion"
] |
[((6216, 6228), 'utils.predo.GetVersion', 'GetVersion', ([], {}), '()\n', (6226, 6228), False, 'from utils.predo import GetVersion\n'), ((6708, 6727), 'utils.client.RestfulClient', 'RestfulClient', (['args'], {}), '(args)\n', (6721, 6727), False, 'from utils.client import RedfishClient, RestfulClient\n'), ((6352, 6371), 'utils.client.RestfulClient', 'RestfulClient', (['args'], {}), '(args)\n', (6365, 6371), False, 'from utils.client import RedfishClient, RestfulClient\n'), ((6573, 6592), 'utils.client.RedfishClient', 'RedfishClient', (['args'], {}), '(args)\n', (6586, 6592), False, 'from utils.client import RedfishClient, RestfulClient\n'), ((7573, 7602), 'exception.ToolException.FailException', 'FailException', (['*self.err_list'], {}), '(*self.err_list)\n', (7586, 7602), False, 'from exception.ToolException import FailException\n'), ((8903, 8932), 'exception.ToolException.FailException', 'FailException', (['*self.err_list'], {}), '(*self.err_list)\n', (8916, 8932), False, 'from exception.ToolException import FailException\n'), ((10625, 10654), 'exception.ToolException.FailException', 'FailException', (['*self.err_list'], {}), '(*self.err_list)\n', (10638, 10654), False, 'from exception.ToolException import FailException\n'), ((8709, 8738), 'exception.ToolException.FailException', 'FailException', (['*self.err_list'], {}), '(*self.err_list)\n', (8722, 8738), False, 'from exception.ToolException import FailException\n')]
|
import torch
import argparse
import logging
from utils import corpora2idx, normalizeString
from const import *
class Dictionary(object):
def __init__(self):
self.word2idx = {
WORD[BOS]: BOS,
WORD[EOS]: EOS,
WORD[PAD]: PAD,
WORD[UNK]: UNK
}
self.idx = 4
def add(self, word):
if self.word2idx.get(word) is None:
self.word2idx[word] = self.idx
self.idx += 1
def __call__(self, sents, min_count):
words = [word for sent in sents for word in sent]
word_count = {w: 0 for w in set(words)}
for w in words: word_count[w]+=1
ignored_word_count = 0
for word, count in word_count.items():
if count <= min_count:
ignored_word_count += 1
continue
self.add(word)
return ignored_word_count
def __len__(self):
return self.idx
def __str__(self):
return "%s(size = %d)".format(self.__class__.__name__, len(self.idx))
class Corpus(object):
def __init__(self, save_data, max_len=20, min_word_count=1):
self._save_data = save_data
self._max_len = max_len
self._min_word_count = min_word_count
self.src_sents = None
self.tgt_sents = None
self.src_valid_sents = None
self.tgt_valid_sents = None
self.src_dict = Dictionary()
self.tgt_dict = Dictionary()
def parse(self):
def gather_file(file_, max_len):
en_sents, fra_sents, en_cut_count, fra_cut_count = [], [], 0, 0
for sentences in open(file_):
en_, fra_ = [normalizeString(s) for s in sentences.strip().split('\t')]
en_ws = [word for word in en_.strip().split()]
fra_ws = [word for word in fra_.strip().split()]
if len(en_ws) > max_len:
en_cut_count += 1
en_ws = en_ws[:max_len]
en_sents.append([WORD[BOS]] + en_ws + [WORD[EOS]])
if len(fra_ws) > max_len:
fra_cut_count += 1
fra_ws = fra_ws[:max_len]
fra_sents.append([WORD[BOS]] + fra_ws + [WORD[EOS]])
return fra_sents, en_sents, fra_cut_count, en_cut_count
max_len = self._max_len - 2
src_train, tgt_train, fra_cut_count, en_cut_count = gather_file('data/train', max_len)
src_valid, tgt_valid, _, _ = gather_file('data/test', max_len)
print("English data`s length out of range numbers - [{}]".format(en_cut_count))
print("French data`s length out of range numbers - [{}]".format(fra_cut_count))
src_ignore = self.src_dict(src_train, self._min_word_count)
tgt_ignore = self.tgt_dict(tgt_train, self._min_word_count)
if src_ignore != 0:
print("Ignored src word counts - [{}]".format(src_ignore))
if tgt_ignore != 0:
print("Ignored tgt word counts - [{}]".format(tgt_ignore))
self.src_train = src_train
self.tgt_train = tgt_train
self.src_valid = src_valid
self.tgt_valid = tgt_valid
def save(self):
data = {
'max_word_len': self._max_len,
'dict': {
'src': self.src_dict.word2idx,
'src_size': len(self.src_dict),
'tgt': self.tgt_dict.word2idx,
'tgt_size': len(self.tgt_dict)
},
'train': {
'src': corpora2idx(self.src_train, self.src_dict.word2idx),
'tgt': corpora2idx(self.tgt_train, self.tgt_dict.word2idx)
},
'valid': {
'src': corpora2idx(self.src_valid, self.src_dict.word2idx),
'tgt': corpora2idx(self.tgt_valid, self.tgt_dict.word2idx)
}
}
torch.save(data, self._save_data)
print('src corpora length - [{}] | target corpora length - [{}]'.format(len(self.src_dict), len(self.tgt_dict)))
def process(self):
self.parse()
self.save()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='seq2sqe corpora')
parser.add_argument('--save-data', type=str, default='data/seq2seq.pt',
help='path to save processed data')
parser.add_argument('--max-lenth', type=int, default=20,
help='max length of sentence')
parser.add_argument('--min-word-count', type=int, default=1,
help='min corpora count to discard')
args = parser.parse_args()
corpus = Corpus(args.save_data, args.max_lenth, args.min_word_count)
corpus.process()
|
[
"torch.save",
"utils.normalizeString",
"utils.corpora2idx",
"argparse.ArgumentParser"
] |
[((4136, 4190), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""seq2sqe corpora"""'}), "(description='seq2sqe corpora')\n", (4159, 4190), False, 'import argparse\n'), ((3875, 3908), 'torch.save', 'torch.save', (['data', 'self._save_data'], {}), '(data, self._save_data)\n', (3885, 3908), False, 'import torch\n'), ((3525, 3576), 'utils.corpora2idx', 'corpora2idx', (['self.src_train', 'self.src_dict.word2idx'], {}), '(self.src_train, self.src_dict.word2idx)\n', (3536, 3576), False, 'from utils import corpora2idx, normalizeString\n'), ((3601, 3652), 'utils.corpora2idx', 'corpora2idx', (['self.tgt_train', 'self.tgt_dict.word2idx'], {}), '(self.tgt_train, self.tgt_dict.word2idx)\n', (3612, 3652), False, 'from utils import corpora2idx, normalizeString\n'), ((3714, 3765), 'utils.corpora2idx', 'corpora2idx', (['self.src_valid', 'self.src_dict.word2idx'], {}), '(self.src_valid, self.src_dict.word2idx)\n', (3725, 3765), False, 'from utils import corpora2idx, normalizeString\n'), ((3790, 3841), 'utils.corpora2idx', 'corpora2idx', (['self.tgt_valid', 'self.tgt_dict.word2idx'], {}), '(self.tgt_valid, self.tgt_dict.word2idx)\n', (3801, 3841), False, 'from utils import corpora2idx, normalizeString\n'), ((1671, 1689), 'utils.normalizeString', 'normalizeString', (['s'], {}), '(s)\n', (1686, 1689), False, 'from utils import corpora2idx, normalizeString\n')]
|
import streamlit as st
import os
from streamlit_img_label import st_img_label
from streamlit_img_label.manage import ImageManager, ImageDirManager
def run(img_dir, labels):
st.set_option("deprecation.showfileUploaderEncoding", False)
idm = ImageDirManager(img_dir)
if "files" not in st.session_state:
st.session_state["files"] = idm.get_all_files()
st.session_state["annotation_files"] = idm.get_exist_annotation_files()
st.session_state["image_index"] = 0
else:
idm.set_all_files(st.session_state["files"])
idm.set_annotation_files(st.session_state["annotation_files"])
def refresh():
st.session_state["files"] = idm.get_all_files()
st.session_state["annotation_files"] = idm.get_exist_annotation_files()
st.session_state["image_index"] = 0
def next_image():
image_index = st.session_state["image_index"]
if image_index < len(st.session_state["files"]) - 1:
st.session_state["image_index"] += 1
else:
st.warning('This is the last image.')
def previous_image():
image_index = st.session_state["image_index"]
if image_index > 0:
st.session_state["image_index"] -= 1
else:
st.warning('This is the first image.')
def next_annotate_file():
image_index = st.session_state["image_index"]
next_image_index = idm.get_next_annotation_image(image_index)
if next_image_index:
st.session_state["image_index"] = idm.get_next_annotation_image(image_index)
else:
st.warning("All images are annotated.")
next_image()
def go_to_image():
file_index = st.session_state["files"].index(st.session_state["file"])
st.session_state["image_index"] = file_index
# Sidebar: show status
n_files = len(st.session_state["files"])
n_annotate_files = len(st.session_state["annotation_files"])
st.sidebar.write("Total files:", n_files)
st.sidebar.write("Total annotate files:", n_annotate_files)
st.sidebar.write("Remaining files:", n_files - n_annotate_files)
st.sidebar.selectbox(
"Files",
st.session_state["files"],
index=st.session_state["image_index"],
on_change=go_to_image,
key="file",
)
col1, col2 = st.sidebar.columns(2)
with col1:
st.button(label="Previous image", on_click=previous_image)
with col2:
st.button(label="Next image", on_click=next_image)
st.sidebar.button(label="Next need annotate", on_click=next_annotate_file)
st.sidebar.button(label="Refresh", on_click=refresh)
# Main content: annotate images
img_file_name = idm.get_image(st.session_state["image_index"])
img_path = os.path.join(img_dir, img_file_name)
im = ImageManager(img_path)
img = im.get_img()
resized_img = im.resizing_img()
resized_rects = im.get_resized_rects()
rects = st_img_label(resized_img, box_color="red", rects=resized_rects)
def annotate():
im.save_annotation()
image_annotate_file_name = img_file_name.split(".")[0] + ".xml"
if image_annotate_file_name not in st.session_state["annotation_files"]:
st.session_state["annotation_files"].append(image_annotate_file_name)
next_annotate_file()
if rects:
st.button(label="Save", on_click=annotate)
preview_imgs = im.init_annotation(rects)
for i, prev_img in enumerate(preview_imgs):
prev_img[0].thumbnail((200, 200))
col1, col2 = st.columns(2)
with col1:
col1.image(prev_img[0])
with col2:
default_index = 0
if prev_img[1]:
default_index = labels.index(prev_img[1])
select_label = col2.selectbox(
"Label", labels, key=f"label_{i}", index=default_index
)
im.set_annotation(i, select_label)
if __name__ == "__main__":
custom_labels = ["", "dog", "cat"]
run("img_dir", custom_labels)
|
[
"streamlit_img_label.st_img_label",
"streamlit.columns",
"streamlit.set_option",
"streamlit.sidebar.write",
"os.path.join",
"streamlit.sidebar.selectbox",
"streamlit.button",
"streamlit.sidebar.columns",
"streamlit_img_label.manage.ImageManager",
"streamlit.warning",
"streamlit.sidebar.button",
"streamlit_img_label.manage.ImageDirManager"
] |
[((178, 238), 'streamlit.set_option', 'st.set_option', (['"""deprecation.showfileUploaderEncoding"""', '(False)'], {}), "('deprecation.showfileUploaderEncoding', False)\n", (191, 238), True, 'import streamlit as st\n'), ((249, 273), 'streamlit_img_label.manage.ImageDirManager', 'ImageDirManager', (['img_dir'], {}), '(img_dir)\n', (264, 273), False, 'from streamlit_img_label.manage import ImageManager, ImageDirManager\n'), ((1969, 2010), 'streamlit.sidebar.write', 'st.sidebar.write', (['"""Total files:"""', 'n_files'], {}), "('Total files:', n_files)\n", (1985, 2010), True, 'import streamlit as st\n'), ((2015, 2074), 'streamlit.sidebar.write', 'st.sidebar.write', (['"""Total annotate files:"""', 'n_annotate_files'], {}), "('Total annotate files:', n_annotate_files)\n", (2031, 2074), True, 'import streamlit as st\n'), ((2079, 2143), 'streamlit.sidebar.write', 'st.sidebar.write', (['"""Remaining files:"""', '(n_files - n_annotate_files)'], {}), "('Remaining files:', n_files - n_annotate_files)\n", (2095, 2143), True, 'import streamlit as st\n'), ((2149, 2284), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Files"""', "st.session_state['files']"], {'index': "st.session_state['image_index']", 'on_change': 'go_to_image', 'key': '"""file"""'}), "('Files', st.session_state['files'], index=st.\n session_state['image_index'], on_change=go_to_image, key='file')\n", (2169, 2284), True, 'import streamlit as st\n'), ((2344, 2365), 'streamlit.sidebar.columns', 'st.sidebar.columns', (['(2)'], {}), '(2)\n', (2362, 2365), True, 'import streamlit as st\n'), ((2526, 2600), 'streamlit.sidebar.button', 'st.sidebar.button', ([], {'label': '"""Next need annotate"""', 'on_click': 'next_annotate_file'}), "(label='Next need annotate', on_click=next_annotate_file)\n", (2543, 2600), True, 'import streamlit as st\n'), ((2605, 2657), 'streamlit.sidebar.button', 'st.sidebar.button', ([], {'label': '"""Refresh"""', 'on_click': 'refresh'}), "(label='Refresh', on_click=refresh)\n", (2622, 2657), True, 'import streamlit as st\n'), ((2777, 2813), 'os.path.join', 'os.path.join', (['img_dir', 'img_file_name'], {}), '(img_dir, img_file_name)\n', (2789, 2813), False, 'import os\n'), ((2823, 2845), 'streamlit_img_label.manage.ImageManager', 'ImageManager', (['img_path'], {}), '(img_path)\n', (2835, 2845), False, 'from streamlit_img_label.manage import ImageManager, ImageDirManager\n'), ((2960, 3023), 'streamlit_img_label.st_img_label', 'st_img_label', (['resized_img'], {'box_color': '"""red"""', 'rects': 'resized_rects'}), "(resized_img, box_color='red', rects=resized_rects)\n", (2972, 3023), False, 'from streamlit_img_label import st_img_label\n'), ((2389, 2447), 'streamlit.button', 'st.button', ([], {'label': '"""Previous image"""', 'on_click': 'previous_image'}), "(label='Previous image', on_click=previous_image)\n", (2398, 2447), True, 'import streamlit as st\n'), ((2471, 2521), 'streamlit.button', 'st.button', ([], {'label': '"""Next image"""', 'on_click': 'next_image'}), "(label='Next image', on_click=next_image)\n", (2480, 2521), True, 'import streamlit as st\n'), ((3361, 3403), 'streamlit.button', 'st.button', ([], {'label': '"""Save"""', 'on_click': 'annotate'}), "(label='Save', on_click=annotate)\n", (3370, 3403), True, 'import streamlit as st\n'), ((1046, 1083), 'streamlit.warning', 'st.warning', (['"""This is the last image."""'], {}), "('This is the last image.')\n", (1056, 1083), True, 'import streamlit as st\n'), ((1268, 1306), 'streamlit.warning', 'st.warning', (['"""This is the first image."""'], {}), "('This is the first image.')\n", (1278, 1306), True, 'import streamlit as st\n'), ((1606, 1645), 'streamlit.warning', 'st.warning', (['"""All images are annotated."""'], {}), "('All images are annotated.')\n", (1616, 1645), True, 'import streamlit as st\n'), ((3577, 3590), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (3587, 3590), True, 'import streamlit as st\n')]
|
r"""
Library routines for minimally 3-connected graph generation.
This program requires cython.
"""
import pyximport
pyximport.install(language_level=3)
|
[
"pyximport.install"
] |
[((130, 165), 'pyximport.install', 'pyximport.install', ([], {'language_level': '(3)'}), '(language_level=3)\n', (147, 165), False, 'import pyximport\n')]
|
from PyQt5 import QtCore, QtNetwork
import random
from gpusim_utils import smiles_to_fingerprint_bin
def parse_args():
import argparse
parser = argparse.ArgumentParser(description="Sample GPUSim Server - "
"run an HTTP server that loads fingerprint data onto GPU and " #noqa
"responds to queries to find most similar fingperints.") #noqa
parser.add_argument('dbname', help=".fsim file containing fingerprint "
"data to be searched")
parser.add_argument('dbkey', default="", help="Key for fsim file")
return parser.parse_args()
def main():
args = parse_args()
app = QtCore.QCoreApplication([])
socket = QtNetwork.QLocalSocket(app)
smiles = input("Smiles: ")
dbcount = 1
dbname = args.dbname
dbkey = args.dbkey
socket.connectToServer('gpusimilarity')
while smiles and smiles.lower() not in ('quit', 'exit'):
return_count = 20
similarity_cutoff = 0
fp_binary, _ = smiles_to_fingerprint_bin(smiles)
fp_qba = QtCore.QByteArray(fp_binary)
output_qba = QtCore.QByteArray()
output_qds = QtCore.QDataStream(output_qba, QtCore.QIODevice.WriteOnly)
output_qds.writeInt(dbcount)
output_qds.writeString(dbname.encode())
output_qds.writeString(dbkey.encode())
request_num = random.randint(0, 2**31)
output_qds.writeInt(request_num)
output_qds.writeInt(return_count)
output_qds.writeFloat(similarity_cutoff)
output_qds << fp_qba
socket.write(output_qba)
socket.flush()
socket.waitForReadyRead(30000)
output_qba = socket.readAll()
smiles = []
scores = []
ids = []
data_reader = QtCore.QDataStream(output_qba)
returned_request = data_reader.readInt()
if request_num != returned_request:
raise RuntimeError("Incorrect result ID returned!")
return_count = data_reader.readInt()
approximate_matches = data_reader.readUInt64()
for i in range(return_count):
smiles.append(data_reader.readString())
for i in range(return_count):
ids.append(data_reader.readString())
for i in range(return_count):
scores.append(data_reader.readFloat())
print("Approximate total matches: {0}, returning {1}".format(
approximate_matches, return_count))
for cid, smi, score in zip(ids, smiles, scores):
print("{0} {1}: {2}".format(cid, smi, score))
smiles = input("Smiles: ")
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"random.randint",
"PyQt5.QtCore.QCoreApplication",
"PyQt5.QtCore.QByteArray",
"PyQt5.QtNetwork.QLocalSocket",
"gpusim_utils.smiles_to_fingerprint_bin",
"PyQt5.QtCore.QDataStream"
] |
[((155, 340), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sample GPUSim Server - run an HTTP server that loads fingerprint data onto GPU and responds to queries to find most similar fingperints."""'}), "(description=\n 'Sample GPUSim Server - run an HTTP server that loads fingerprint data onto GPU and responds to queries to find most similar fingperints.'\n )\n", (178, 340), False, 'import argparse\n'), ((646, 673), 'PyQt5.QtCore.QCoreApplication', 'QtCore.QCoreApplication', (['[]'], {}), '([])\n', (669, 673), False, 'from PyQt5 import QtCore, QtNetwork\n'), ((688, 715), 'PyQt5.QtNetwork.QLocalSocket', 'QtNetwork.QLocalSocket', (['app'], {}), '(app)\n', (710, 715), False, 'from PyQt5 import QtCore, QtNetwork\n'), ((997, 1030), 'gpusim_utils.smiles_to_fingerprint_bin', 'smiles_to_fingerprint_bin', (['smiles'], {}), '(smiles)\n', (1022, 1030), False, 'from gpusim_utils import smiles_to_fingerprint_bin\n'), ((1048, 1076), 'PyQt5.QtCore.QByteArray', 'QtCore.QByteArray', (['fp_binary'], {}), '(fp_binary)\n', (1065, 1076), False, 'from PyQt5 import QtCore, QtNetwork\n'), ((1099, 1118), 'PyQt5.QtCore.QByteArray', 'QtCore.QByteArray', ([], {}), '()\n', (1116, 1118), False, 'from PyQt5 import QtCore, QtNetwork\n'), ((1140, 1198), 'PyQt5.QtCore.QDataStream', 'QtCore.QDataStream', (['output_qba', 'QtCore.QIODevice.WriteOnly'], {}), '(output_qba, QtCore.QIODevice.WriteOnly)\n', (1158, 1198), False, 'from PyQt5 import QtCore, QtNetwork\n'), ((1355, 1381), 'random.randint', 'random.randint', (['(0)', '(2 ** 31)'], {}), '(0, 2 ** 31)\n', (1369, 1381), False, 'import random\n'), ((1756, 1786), 'PyQt5.QtCore.QDataStream', 'QtCore.QDataStream', (['output_qba'], {}), '(output_qba)\n', (1774, 1786), False, 'from PyQt5 import QtCore, QtNetwork\n')]
|
import click
import pendulum
import subprocess
import os
from pathlib import Path
from aoc.script import Script
import aoc.paths
import pendulum
@click.command()
@click.option("-y", "--year", type=str)
@click.option("-d", "--day", type=str)
def new(year: str, day: str):
"""Create new script for AOC"""
if not year:
year = click.prompt(f"Year", default=_get_year())
if not day:
day = click.prompt(f"Day", default=_get_day(year))
script_file = _new_script(year=year, day=day)
print(f"Created script {script_file}!")
if "EDITOR" in os.environ:
subprocess.Popen(
f"$EDITOR {script_file}",
shell=True,
)
def _get_year() -> int:
east = "US/Eastern"
now = pendulum.now(tz=east)
if now.month == 12:
if now.hour == 23:
# if it's right before 12AM in December, use tomorrow as the default date
# because it's almost AOC time
return pendulum.tomorrow(east).year
elif now.hour == 0:
# if it's after 12AM in December, use yestrday as the default date because
# you probably want to do yesteray's date
return pendulum.today(east).year
return int(os.environ.get("AOC_YEAR", 0)) or now.year
def _get_day(year: str) -> str:
year_dir = Path(__file__).parent.parent.parent.parent / str(year)
if not year_dir.exists():
return "1"
else:
return str(max([int(p.stem) for p in year_dir.iterdir()]) + 1)
def _new_script(year: str, day: str, overwrite: bool = False) -> Path:
day = day.zfill(2)
script = Script.from_year_day(year=year, day=day)
script_dir = script.path.parent
if script_dir.parent.exists() and not overwrite:
if pendulum.now() > pendulum.datetime(year=int(year), month=12, day=int(day)):
print("Allow override of solution file, because script date in the future")
else:
raise RuntimeError(f"Script already exists for {year}-{day}!!!")
script_dir.mkdir(parents=True, exist_ok=True)
script.path.touch(exist_ok=True)
script.path.write_text(
(aoc.paths.AOC_PKG / "templates" / "script" / script.path.name).read_text()
)
return script.path
|
[
"subprocess.Popen",
"pendulum.tomorrow",
"click.option",
"click.command",
"pendulum.now",
"os.environ.get",
"pathlib.Path",
"pendulum.today",
"aoc.script.Script.from_year_day"
] |
[((148, 163), 'click.command', 'click.command', ([], {}), '()\n', (161, 163), False, 'import click\n'), ((165, 203), 'click.option', 'click.option', (['"""-y"""', '"""--year"""'], {'type': 'str'}), "('-y', '--year', type=str)\n", (177, 203), False, 'import click\n'), ((205, 242), 'click.option', 'click.option', (['"""-d"""', '"""--day"""'], {'type': 'str'}), "('-d', '--day', type=str)\n", (217, 242), False, 'import click\n'), ((748, 769), 'pendulum.now', 'pendulum.now', ([], {'tz': 'east'}), '(tz=east)\n', (760, 769), False, 'import pendulum\n'), ((1617, 1657), 'aoc.script.Script.from_year_day', 'Script.from_year_day', ([], {'year': 'year', 'day': 'day'}), '(year=year, day=day)\n', (1637, 1657), False, 'from aoc.script import Script\n'), ((597, 651), 'subprocess.Popen', 'subprocess.Popen', (['f"""$EDITOR {script_file}"""'], {'shell': '(True)'}), "(f'$EDITOR {script_file}', shell=True)\n", (613, 651), False, 'import subprocess\n'), ((1229, 1258), 'os.environ.get', 'os.environ.get', (['"""AOC_YEAR"""', '(0)'], {}), "('AOC_YEAR', 0)\n", (1243, 1258), False, 'import os\n'), ((1759, 1773), 'pendulum.now', 'pendulum.now', ([], {}), '()\n', (1771, 1773), False, 'import pendulum\n'), ((970, 993), 'pendulum.tomorrow', 'pendulum.tomorrow', (['east'], {}), '(east)\n', (987, 993), False, 'import pendulum\n'), ((1187, 1207), 'pendulum.today', 'pendulum.today', (['east'], {}), '(east)\n', (1201, 1207), False, 'import pendulum\n'), ((1321, 1335), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1325, 1335), False, 'from pathlib import Path\n')]
|
# coding: utf-8
import setuptools
setuptools.setup(
name = 'Pynames',
version = '0.1.0',
author = '<NAME>',
author_email = '<EMAIL>',
packages = setuptools.find_packages(),
url = 'https://github.com/Tiendil/pynames',
license = 'LICENSE',
description = "characters' name generation library",
long_description = open('README.md').read(),
include_package_data = True, # setuptools-git MUST be installed
test_suite = 'tests',
install_requires = ['unicodecsv'],
# package_data = { '': ['*.json'] }
)
|
[
"setuptools.find_packages"
] |
[((166, 192), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (190, 192), False, 'import setuptools\n')]
|
from setuptools import setup, find_packages
setup(
name='TracSoftDueDate', version='1.0',
packages=find_packages(exclude=['*.tests*']),
entry_points = {
'trac.plugins': [
'softduedate = softduedate',
],
},
)
|
[
"setuptools.find_packages"
] |
[((102, 137), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['*.tests*']"}), "(exclude=['*.tests*'])\n", (115, 137), False, 'from setuptools import setup, find_packages\n')]
|
import os
from urllib.parse import urlparse
import requests
from PyPDF2 import PdfFileReader
def download_pdf(url):
parse = urlparse(url)
base_url = parse.scheme + '://' + parse.netloc
try:
redirect = requests.get(url, allow_redirects=False)
except requests.exceptions.ConnectionError as e:
print(e, 2)
raise
if redirect.status_code == 302:
url = base_url + redirect.headers['location']
else:
pass
filename = url.split('/')[-1]
if not is_pdf(filename):
return None
if os.path.isfile(filename):
return filename.strip()
else:
print(filename, 'downloading')
request = requests.get(url)
# https://stackoverflow.com/questions/34503412/download-and-save-pdf-file-with-python-requests-module
with open(filename, 'wb') as f:
f.write(request.content)
return filename.strip()
def is_pdf(filename):
if filename[-4:] != '.pdf':
return False
else:
return True
def get_pdf_title(filename):
# http://www.blog.pythonlibrary.org/2018/04/10/extracting-pdf-metadata-and-text-with-python/
with open(filename, 'rb') as f:
pdf = PdfFileReader(f)
info = pdf.getDocumentInfo()
pdf.getNumPages()
title = info.title if info.title else filename
return title.strip()
|
[
"requests.get",
"os.path.isfile",
"PyPDF2.PdfFileReader",
"urllib.parse.urlparse"
] |
[((131, 144), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (139, 144), False, 'from urllib.parse import urlparse\n'), ((559, 583), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (573, 583), False, 'import os\n'), ((224, 264), 'requests.get', 'requests.get', (['url'], {'allow_redirects': '(False)'}), '(url, allow_redirects=False)\n', (236, 264), False, 'import requests\n'), ((684, 701), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (696, 701), False, 'import requests\n'), ((1206, 1222), 'PyPDF2.PdfFileReader', 'PdfFileReader', (['f'], {}), '(f)\n', (1219, 1222), False, 'from PyPDF2 import PdfFileReader\n')]
|
#!/usr/bin/env python
r'''
Compare two datasets to determine whether there is a significant
difference between them for a specific confidence level using the
t-test methodology for unpaired observations.
Please note that this is not, strictly, a t-test because it switches
over to the standard normal distribution (SND) when the number of
effective degrees of freedom (DOF) is larger than 32.
It is really useful for determining whether runtime or memory use has
changed between two different versions of software. The datasets are
completely independent of the program (i.e. the data values are
created by tools like /usr/bin/time) so they can be used in a black
box testing environment.
Each dataset contains a series of numbers to be compared. The numbers
must be greater than 0. That is a reasonable constraint given that
they typically represent something like elapsed time or memory used.
The size of the datasets can be different because we are treating
the samples as unpaired observations (t-test) but the smallest one
must have more than 2 entries. Typically you would like to have
at least 50 entries in each dataset.
You must specify the confidence level that you want to use to
determine whether the datasets differ. Typical confidence levels 0.90
(90%), 0.95 (95%) and 0.99 (99%). The tool will automatically
determine the associated z-value based on the confidence level and the
number of effective degrees of freedom. No table look ups are
necessary. The methodology used to calculate the z-value is described
in detail here: https://github.com/jlinoff/ztables.
To determine significance, you specify the confidence level that you
want to use to determine significance. Typical confidence levels 0.90
(90%), 0.95 (95%) and 0.99 (99%). The tool will automatically
determine the associated z-value based on the confidence level and the
number of effective degrees of freedom. No table look ups are
necessary.
EXAMPLE 1 - two datasets in one file
Here is an example to make sense of it all.
We want to compare two versions of the foobar program to see if the
second version is faster than the first for the same inputs. The
versions are 1.1 and 1.2. The program takes about 2 minutes to run
(120 seconds) and we want to determine whether v1.2 is faster.
The table below shows sample data 10 runs for each version.
# Run time data collected for v1.1 and v1.2.
#
# Num v1.1 v1.2
# === ======= =======
1 119.041 117.038
2 119.670 119.733
3 120.675 118.346
4 118.628 117.261
5 120.363 118.863
6 118.076 117.545
7 120.539 119.751
8 118.880 119.042
9 120.164 116.203
10 119.134 118.049
For this example we assume that the data is stored in a single file
but normally it is easier to have it exist in two separate files
because, by default, the tool looks at the first token on each line
and collects it if the token is a floating point number. When the data
is not in a single column in a file, you must explicitly specify the
which column to collect. In this case, the first dataset is in column
2 and the second dataset is in column 3 of the same file. Blank lines
and lines where the token is not a floating point number are ignored.
Here is what the run looks like:
$ ./cmpds.py -c 0.95 -k 2 3 data.txt
With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.1%.
As you can see, dataset-2 (v1.2) is slightly faster.
Note that we use -k to specify the columns because -c is already
reserved for specifying the confidence level.
If you reverse the columns, you will get the opposite result:
$ ./cmpds.py -c 0.95 -k 3 2 data.txt
With 95.0% confidence, dataset-2 is larger than dataset-1 by about 1.1%.
EXAMPLE 2 - datasets in separate files
A more realistic example would be running a program called blackbox-v1
50 times and collecting the timing output to a file and then running
blackbox-v2 and collecting its output. Here is how you might do it:
$ rm -f /tmp/blackbox-v1.out /tmp/blackbox-v2.out
$ for((i=1;i<=50;i++)) ; do printf '\nExp %03d\n' $i ; /usr/bin/time -p blackbox-v1 >> /tmp/v1.out ; done
$ for((i=1;i<=50;i++)) ; do printf '\nExp %03d\n' $i ; /usr/bin/time -p blackbox-v2 >> /tmp/v2.out ; done
We can now capture the real run time data by simply grepping out the
data like this:
$ grep -w ^real /tmp/v1.out > /tmp/v1.ds
$ grep -w ^real /tmp/v2.out > /tmp/v2.ds
The above command takes advantage of the fact that posix time format
(-p) outputs the time data on 3 separate lines as shown in this simple
example:
$ /usr/bin/time -p sleep 0.3
real 0.30
user 0.00
sys 0.00
At this point we have the unpaired observations from both runs in two
different files so we can use cmpds.py to figure out whether v2 is
faster than v1 at a 95% confidence level.
$ ./cmpds.py -c 0.95 /tmp/v1.ds /tmp/v2.ds
With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.3%.
That tells us that v2 is indeed slightly faster.
'''
# License: MIT Open Source
# Copyright (c) 2016 by <NAME>
#REFERENCES:
# <NAME> (1991). "The Art Computer Systems Performance Analysis", <NAME>iley and Sons, New York.
import argparse
import datetime
import inspect
import math
import os
import sys
#VERSION='0.1' # Initial load.
VERSION='0.2' # Made the std dev calculation simpler.
# ================================================================
#
# Message utility functions.
#
# ================================================================
def _msg(prefix, frame, msg, ofp=sys.stdout):
'''
Base for printing messages.
'''
lineno = inspect.stack()[frame][2]
now = datetime.datetime.now()
ofp.write('{!s:<26} {} {:>5} - {}\n'.format(now, prefix, lineno, msg))
def info(msg, f=1):
'''
Write an info message to stdout.
'''
_msg('INFO', f+1, msg)
def infov(opts, msg, f=1):
'''
Write an info message to stdout.
'''
if opts.verbose > 0:
_msg('INFO', f+1, msg)
def warn(msg, f=1):
'''
Write a warning message to stdout.
'''
_msg('WARNING', f+1, msg)
def err(msg, f=1):
'''
Write an error message to stderr and exit.
'''
_msg('ERROR', f+1, msg, sys.stderr)
sys.exit(1)
# ================================================================
#
# Statistical utility functions.
# See https://github.com/jlinoff/ztables for background.
#
# ================================================================
def gamma(x):
'''
Gamma function.
Uses the Lanczos approximation and natural logarithms.
For integer values of x we can use the exact value of (x-1)!.
gamma(1/2) = 1.77245385091
gamma(3/2) = 0.886226925453
gamma(5/2) = 1.32934038818
gamma(7/2) = 3.32335097045
gamma(4) = 6.0
'''
if (x - int(x)) == 0:
# Optimization for integer values: (x-1)!.
return reduce(lambda a, b: a * b, [float(i) for i in range(1, int(x))])
# Lanczos approximation, page 214 of Numerical Recipes in C.
c = [76.18009172947146,
-86.50532032941677,
24.01409824083091,
-1.231739572450155,
0.1208650973866179e-2,
-0.5395239384953e-5,
]
c0 = 1.000000000190015
c1 = 2.5066282746310005
x1 = float(x) + 5.5
x2 = (float(x) + 0.5) * math.log(x1)
x3 = x1 - x2
x4 = c0
x5 = float(x)
for i in range(6):
x5 += 1.0
x4 += c[i] / x5
x6 = math.log((c1 * x4) / float(x))
x7 = -x3 + x6 # ln(gamma(x))
g = math.exp(x7)
return g
def pdf_t(x, dof):
'''
Calculate the probability density function (PDF) at x for a
student-t distribution with dof degrees of freedom.
This is basically the height of the curve at x.
'''
assert dof > 2
x1 = gamma((float(dof) + 1.0) / 2.0)
x2 = math.sqrt(dof * math.pi) * gamma((float(dof) / 2.0))
x3 = 1.0 + (float((x ** 2)) / float(dof))
x4 = float((dof + 1)) / 2.0
x5 = x3 ** -x4
y = (x1 * x5) / x2
return y
def pdf_nd(x, s=1.0, u=0.0):
'''
Calculate the probability density function (PDF) for a normal
distribution.
s = standard deviation (1 for a standard normal distribution)
u = mean (0 for a standard normal distribution)
This is the height of the curve at x.
'''
dx = float(x) - float(u)
dx2 = dx * dx
xden = 2 * (s ** 2)
den = s * math.sqrt(2 * math.pi)
exp = math.e ** ( -dx2 / xden )
y = exp / den
return y
def pdf_snd(x):
'''
Calculate the probability density function (PDF) for a standard
normal distribution.
s = standard deviation (1 for a standard normal distribution)
u = mean (0 for a standard normal distribution)
This is the height of the curve at x.
It is exactly the same as pdf_nd(x, 1, 0) but is somewhat more
efficient.
'''
dx2 = float(x) ** 2
den = math.sqrt(2 * math.pi)
exp = math.e ** - (dx2 / 2)
y = exp / den
return y
def area_under_curve(x1, x2, intervals, fct, *args, **kwargs):
'''
Calculate the approximate area under a curve using trapezoidal
approximation.
It breaks the interval between x1 and x2 into trapezoids whose
width is fixed (proportional to how the interval is sliced). The
height of each rectangle is the pdf function value for x at the
start of the interval. The accumulation of the areas provides an
estimate of the area under the curve.
The greater the number of intervals the better the estimate is at
the cost of performance.
'''
assert x2 > x1 # just a sanity check
assert intervals > 1 # another sanity check
total_area = 0.0
width = (float(x2) - float(x1)) / float(intervals)
x = float(x1)
py = float(fct(x, *args, **kwargs))
for i in range(intervals):
y = float(fct(x, *args, **kwargs))
rectangle_area = width * y # area of rectangle at x with height y
triangle_area = ((y - py) * width) / 2.0 # adjustment based on height change
total_area += rectangle_area + triangle_area # trapezoid area
x += width # advance to the next edge
py = y # remember the previous height
return total_area
def binary_search_for_z(probability, tolerance, maxtop, minval, iterations, v, fct, *args):
'''
Get the z value that matches the specified percentage.
'''
# Binary search to find the closest value.
z = 0.0
adjustment = float(maxtop) / 2.0
top = maxtop
bot = 0.0
diff = tolerance * 2 # start the loop
while diff > tolerance:
mid = bot + ((top - bot) / 2.0)
z = mid - adjustment
q = area_under_curve(minval, z, iterations, fct, *args)
cp = 1.0 - (2.0 * (1.0 - q))
diff = abs(cp - probability)
if v:
info('p={}, cp={}, t={:f}, mt={}, mv={}, i={}, top={}, bot={}, mid={}, z={}, q={}'.format(
probability, cp, tolerance, maxtop, minval, iterations, top, bot, mid, z, q))
if probability < cp:
# It is to the right.
top = mid
elif probability > cp:
# It is to the left.
bot = mid
else:
break
# Sanity checks.
assert top <= maxtop
assert bot >= 0
return z
# ================================================================
#
# t-test implementation
#
# ================================================================
def ttest(a, b, opts):
'''
Analyze unpaired observations to determine whether they are
significantly different.
'''
cl = opts.conf
infov(opts, 'a: {:>3} {}'.format(len(a), a))
infov(opts, 'b: {:>3} {}'.format(len(b), b))
infov(opts, 'confidence level: {:.1f}%'.format(100.*cl))
na = float(len(a))
nb = float(len(b))
infov(opts, 'na: {}'.format(na))
infov(opts, 'nb: {}'.format(nb))
# means
ma = sum(a) / na
mb = sum(b) / nb
infov(opts, 'mean a: {:.3f}'.format(ma))
infov(opts, 'mean b: {:.3f}'.format(mb))
# variances
vara = sum([(xa - ma) ** 2 for xa in a]) / float(na - 1.)
varb = sum([(xb - mb) ** 2 for xb in b]) / float(nb - 1.)
infov(opts, 'variance a: {:.3f}'.format(vara))
infov(opts, 'variance b: {:.3f}'.format(varb))
# standard deviations
stddeva = math.sqrt(vara)
stddevb = math.sqrt(varb)
infov(opts, 'stddev a: {:.3f}'.format(stddeva))
infov(opts, 'stddev b: {:.3f}'.format(stddevb))
# mean difference
md = ma - mb
infov(opts, 'mean diff: {:.3f}'.format(md))
# standard deviation of the mean difference
sa2qna = stddeva**2 / na
sb2qnb = stddevb**2 / nb
sdmd = math.sqrt(sa2qna + sb2qnb)
infov(opts, 'stddev of the mean diff: {:.3f}'.format(sdmd))
# effective degrees of freedom
dof_num = (sa2qna + sb2qnb)**2
dof_dena = (1. / (na + 1.)) * sa2qna**2
dof_denb = (1. / (nb + 1.)) * sb2qnb**2
dof = (dof_num / (dof_dena + dof_denb)) - 2.0
infov(opts, 'effective DOF: {:.2f}'.format(dof))
dofr = int('{:.0f}'.format(dof))
infov(opts, 'effective DOF (rounded): {}'.format(dofr))
# confidence interval for the mean difference
z = 0.0
# allow the user to play with the parameters
t = opts.internal[0]
lb = opts.internal[1]
ub = opts.internal[2]
intervals = int(opts.internal[3])
maxv = 2 * round(abs(lb) + ub + 0.5, 0)
minv = -maxv
infov(opts, 'internal threshold: {:.1f}'.format(t))
infov(opts, 'internal lower bound: {}'.format(lb))
infov(opts, 'internal upper bound: {}'.format(ub))
infov(opts, 'internal intervals: {}'.format(intervals))
infov(opts, 'internal minval: {}'.format(minv))
infov(opts, 'internal maxval: {}'.format(maxv))
v = True if opts.verbose > 1 else False
if dofr > opts.snd_threshold:
# use standard normal distribution (SND)
infov(opts, 'use standard normal distribution (SND)')
z = binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_snd)
else:
infov(opts, 'use t-{} distribution'.format(dofr))
z = binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_t, dof)
x = (1. - cl) / 2.
q = cl + x
infov(opts, '{:.3f}-quantile of t-variate with {} degrees of freedom: {:.2f}'.format(q, dofr, z))
cllb = md - z * sdmd
club = md + z * sdmd
infov(opts, '{:.1f}% confidence interval for difference: [{:3f} .. {:3f}]'.format(100.*cl, cllb, club))
crosses_zero = cllb < 0 < club
significant = not crosses_zero
infov(opts, 'crosses zero: {}'.format(crosses_zero))
infov(opts, 'reject the null hypothesis: {}'.format(significant))
# Report the result.
clp = cl * 100.
if significant:
per = 100. * abs(md) / ma
infov(opts, 'percentage: {}'.format(per))
if club < 0:
print('With {:.1f}% confidence, dataset-2 is larger than dataset-1 by about {:,.1f}%.'.format(clp, per))
else:
print('With {:.1f}% confidence, dataset-2 is smaller than dataset-1 by about {:,.1f}%.'.format(clp, per))
else:
print('With {:.1f}% confidence, there is no significant difference between the datasets.'.format(clp))
# ================================================================
#
# Options
#
# ================================================================
def getopts():
'''
Get the command line options using argparse.
'''
# Make sure that the confidence level is in the proper range.
def get_conf_level():
class GetConfLevel(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if 0. < values < 1.0:
setattr(args, self.dest, values)
else:
msg = 'argument "{}" out of range (0..1)'.format(self.dest)
parser.error(msg)
return GetConfLevel
# Trick to capitalize the built-in headers.
# Unfortunately I can't get rid of the ":" reliably.
def gettext(s):
lookup = {
'usage: ': 'USAGE:',
'positional arguments': 'POSITIONAL ARGUMENTS',
'optional arguments': 'OPTIONAL ARGUMENTS',
'show this help message and exit': 'Show this help message and exit.\n ',
}
return lookup.get(s, s)
argparse._ = gettext # to capitalize help headers
base = os.path.basename(sys.argv[0])
name = os.path.splitext(base)[0]
usage = '\n {0} [OPTIONS] <DATASET-1> [<DATASET-2>]'.format(base)
desc = 'DESCRIPTION:{0}'.format('\n '.join(__doc__.split('\n')))
epilog = r'''
EXAMPLES:
# Example 1: help
$ {0} -h
# Example 2: No significant difference with 95% confidence.
# The dataset is used.
$ ./gends.py 10 100 120 > ds-10-100-120.txt
$ {0} ds-10-100-120.txt ds-10-100-120.txt
With 95.0% confidence, there is no significant difference between the datasets.
# Example 3: Dataset-2 is slightly smaller (has faster runtime) with 95% confidence.
# Both runs have 50 samples.
# The data is specifically generated to show the difference.
$ ./gends.py 50 110 112 > ds-50-110-112.txt
$ ./gends.py 50 108 112 > ds-50-108-112.txt
$ {0} ds-50-110-112.txt ds-50-108-112.txt
With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 0.8%.
# Example 4: Dataset-2 is slightly smaller (has faster runtime) with 99% confidence.
# Both runs have 50 samples.
$ {0} ds-50-110-112.txt ds-50-108-112.txt
With 99.0% confidence, dataset-2 is smaller than dataset-1 by about 0.8%.
# Example 5: Dataset-1 and dataset-2 are in the same file.
$ cat data.txt
# v1.1 v1.2
# ======= =======
1 119.041 117.038
2 119.670 119.733
3 120.675 118.346
4 118.628 117.261
5 120.363 118.863
6 118.076 117.545
7 120.539 119.751
8 118.880 119.042
9 120.164 116.203
10 119.134 118.049
$ {0} --cols 2 3 data.txt
With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.1%.
'''.format(base)
afc = argparse.RawTextHelpFormatter
parser = argparse.ArgumentParser(formatter_class=afc,
description=desc[:-2],
usage=usage,
epilog=epilog)
parser.add_argument('-c', '--conf',
type=float,
default=0.95,
action=get_conf_level(),
metavar=('FLOAT'),
help='''The confidence level such that 0 < c < 1.
The default is %(default)s.
''')
parser.add_argument('--internal',
type=float,
nargs=4,
default=[0.00001, -3.4, 3.4, 10000],
metavar=('TOLERANCE', 'LOWER', 'UPPER', 'INTERVALS'),
help='''Factors used for internal computations.
You should never need to change these.
Defaults: %(default)s.
''')
parser.add_argument('-k', '--cols',
nargs=2,
type=int,
default=[1,1],
metavar=('COL1', 'COL2'),
help='''The columns that define each dataset.
The first column is for the first dataset.
The second column is for the second dataset.
If the value in the column is not a floating point
number it is ignored.
The default is column 1 for both datasets.
''')
parser.add_argument('-s', '--snd-threshold',
type=int,
default=32,
metavar=('UINT'),
help='''The standard normal distribution (SND) threshold.
When the number of effective degrees of freedom (DOF)
exceeds this threshold, the SND is used instead of a
t-distribution.
The default is %(default)s.
''')
parser.add_argument('-v', '--verbose',
action='count',
default=0,
help='''Increase the level of verbosity.
Specify -v to see the values that make up the computation.
Specify -v -v to internal details about the z value lookup and
values that were discarded during file reads.
''')
parser.add_argument('-V', '--version',
action='version',
version='%(prog)s v{0}'.format(VERSION),
help="""Show program's version number and exit.
""")
# Positional arguments at the end.
parser.add_argument('FILES',
nargs='+',
help='''The files with the run time data.
The data must be organized in columns with one entry per line.
Non-numeric data is ignored which allows you to add comments
and blank spaces.
You can see the ignored data in verbose mode.
If only one file is specified, is used for both datasets.
''')
opts = parser.parse_args()
if opts.cols[0] < 1:
parser.error('column 1 must be greater then 0')
if opts.cols[1] < 1:
parser.error('column 1 must be greater then 0')
if len(opts.FILES) > 2:
parser.error('only 1 or 2 files may be specified')
if opts.snd_threshold < 30:
parser.error('it does not make sense to use SND for {} elements'.format(opts.snd_threshold))
return opts
# ================================================================
#
# Read file data.
#
# ================================================================
def read_file(opts, fn, col):
'''
Read column data from the file.
'''
ds = []
try:
with open(fn, 'r') as ifp:
ln = 0
for line in ifp.readlines():
ln += 1
line = line.strip()
tokens = line.split()
if len(tokens) < col:
continue
token = tokens[col-1]
try:
f = float(token)
if f < 0.0001: # avoid divide by 0 errors
if opts.verbose > 1:
info('skipping line {} in {}: number is too small {}'.format(ln, fn, token))
continue
ds.append(f)
except ValueError:
if opts.verbose > 1:
info('skipping line {} in {}: not a number: {}'.format(ln, fn, token))
continue
except IOError:
err('could not read file: {}'.format(fn))
if len(ds) < 3:
err('too few data points at column {}, found {}, need at least 3 in file: {}'.format(col, len(ds), fn))
return ds
# ================================================================
#
# Main
#
# ================================================================
def main():
opts = getopts()
af = opts.FILES[0]
bf = opts.FILES[1] if len(opts.FILES) == 2 else af
ac = opts.cols[0]
bc = opts.cols[1]
infov(opts, 'dataset-1 file: {}'.format(af))
infov(opts, 'dataset-2 file: {}'.format(bf))
infov(opts, 'dataset-1 col: {}'.format(ac))
infov(opts, 'dataset-2 col: {}'.format(bc))
a = read_file(opts, af, ac)
b = read_file(opts, bf, bc)
ttest(a, b, opts)
if __name__ == '__main__':
main()
|
[
"math.exp",
"inspect.stack",
"argparse.ArgumentParser",
"math.sqrt",
"os.path.basename",
"os.path.splitext",
"math.log",
"datetime.datetime.now",
"sys.exit"
] |
[((5739, 5762), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5760, 5762), False, 'import datetime\n'), ((6313, 6324), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6321, 6324), False, 'import sys\n'), ((7613, 7625), 'math.exp', 'math.exp', (['x7'], {}), '(x7)\n', (7621, 7625), False, 'import math\n'), ((8981, 9003), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (8990, 9003), False, 'import math\n'), ((12407, 12422), 'math.sqrt', 'math.sqrt', (['vara'], {}), '(vara)\n', (12416, 12422), False, 'import math\n'), ((12437, 12452), 'math.sqrt', 'math.sqrt', (['varb'], {}), '(varb)\n', (12446, 12452), False, 'import math\n'), ((12763, 12789), 'math.sqrt', 'math.sqrt', (['(sa2qna + sb2qnb)'], {}), '(sa2qna + sb2qnb)\n', (12772, 12789), False, 'import math\n'), ((16470, 16499), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (16486, 16499), False, 'import os\n'), ((18259, 18359), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'afc', 'description': 'desc[:-2]', 'usage': 'usage', 'epilog': 'epilog'}), '(formatter_class=afc, description=desc[:-2], usage=\n usage, epilog=epilog)\n', (18282, 18359), False, 'import argparse\n'), ((7406, 7418), 'math.log', 'math.log', (['x1'], {}), '(x1)\n', (7414, 7418), False, 'import math\n'), ((7919, 7943), 'math.sqrt', 'math.sqrt', (['(dof * math.pi)'], {}), '(dof * math.pi)\n', (7928, 7943), False, 'import math\n'), ((8484, 8506), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (8493, 8506), False, 'import math\n'), ((16511, 16533), 'os.path.splitext', 'os.path.splitext', (['base'], {}), '(base)\n', (16527, 16533), False, 'import os\n'), ((5703, 5718), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (5716, 5718), False, 'import inspect\n')]
|
#!/usr/bin/env python
#
# Check trace components in FreeType 2 source.
# Author: <NAME>, 2009, 2013
#
# This code is explicitly into the public domain.
import sys
import os
import re
SRC_FILE_LIST = []
USED_COMPONENT = {}
KNOWN_COMPONENT = {}
SRC_FILE_DIRS = ["src"]
TRACE_DEF_FILES = ["include/freetype/internal/fttrace.h"]
# --------------------------------------------------------------
# Parse command line options
#
for i in range(1, len(sys.argv)):
if sys.argv[i].startswith("--help"):
print
"Usage: %s [option]" % sys.argv[0]
print
"Search used-but-defined and defined-but-not-used trace_XXX macros"
print
""
print
" --help:"
print
" Show this help"
print
""
print
" --src-dirs=dir1:dir2:..."
print
" Specify the directories of C source files to be checked"
print
" Default is %s" % ":".join(SRC_FILE_DIRS)
print
""
print
" --def-files=file1:file2:..."
print
" Specify the header files including FT_TRACE_DEF()"
print
" Default is %s" % ":".join(TRACE_DEF_FILES)
print
""
exit(0)
if sys.argv[i].startswith("--src-dirs="):
SRC_FILE_DIRS = sys.argv[i].replace("--src-dirs=", "", 1).split(":")
elif sys.argv[i].startswith("--def-files="):
TRACE_DEF_FILES = sys.argv[i].replace("--def-files=", "", 1).split(":")
# --------------------------------------------------------------
# Scan C source and header files using trace macros.
#
c_pathname_pat = re.compile('^.*\.[ch]$', re.IGNORECASE)
trace_use_pat = re.compile('^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_')
for d in SRC_FILE_DIRS:
for (p, dlst, flst) in os.walk(d):
for f in flst:
if c_pathname_pat.match(f) != None:
src_pathname = os.path.join(p, f)
line_num = 0
for src_line in open(src_pathname, 'r'):
line_num = line_num + 1
src_line = src_line.strip()
if trace_use_pat.match(src_line) != None:
component_name = trace_use_pat.sub('', src_line)
if component_name in USED_COMPONENT:
USED_COMPONENT[component_name].append("%s:%d" % (src_pathname, line_num))
else:
USED_COMPONENT[component_name] = ["%s:%d" % (src_pathname, line_num)]
# --------------------------------------------------------------
# Scan header file(s) defining trace macros.
#
trace_def_pat_opn = re.compile('^.*FT_TRACE_DEF[ \t]*\([ \t]*')
trace_def_pat_cls = re.compile('[ \t\)].*$')
for f in TRACE_DEF_FILES:
line_num = 0
for hdr_line in open(f, 'r'):
line_num = line_num + 1
hdr_line = hdr_line.strip()
if trace_def_pat_opn.match(hdr_line) != None:
component_name = trace_def_pat_opn.sub('', hdr_line)
component_name = trace_def_pat_cls.sub('', component_name)
if component_name in KNOWN_COMPONENT:
print
"trace component %s is defined twice, see %s and fttrace.h:%d" % \
(component_name, KNOWN_COMPONENT[component_name], line_num)
else:
KNOWN_COMPONENT[component_name] = "%s:%d" % \
(os.path.basename(f), line_num)
# --------------------------------------------------------------
# Compare the used and defined trace macros.
#
print
"# Trace component used in the implementations but not defined in fttrace.h."
cmpnt = USED_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in KNOWN_COMPONENT:
print
"Trace component %s (used in %s) is not defined." % (c, ", ".join(USED_COMPONENT[c]))
print
"# Trace component is defined but not used in the implementations."
cmpnt = KNOWN_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in USED_COMPONENT:
if c != "any":
print
"Trace component %s (defined in %s) is not used." % (c, KNOWN_COMPONENT[c])
|
[
"os.path.basename",
"os.walk",
"os.path.join",
"re.compile"
] |
[((1658, 1698), 're.compile', 're.compile', (['"""^.*\\\\.[ch]$"""', 're.IGNORECASE'], {}), "('^.*\\\\.[ch]$', re.IGNORECASE)\n", (1668, 1698), False, 'import re\n'), ((1714, 1772), 're.compile', 're.compile', (['"""^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_"""'], {}), "('^[ \\t]*#define[ \\t]+FT_COMPONENT[ \\t]+trace_')\n", (1724, 1772), False, 'import re\n'), ((2697, 2741), 're.compile', 're.compile', (['"""^.*FT_TRACE_DEF[ \t]*\\\\([ \t]*"""'], {}), "('^.*FT_TRACE_DEF[ \\t]*\\\\([ \\t]*')\n", (2707, 2741), False, 'import re\n'), ((2761, 2786), 're.compile', 're.compile', (['"""[ \t\\\\)].*$"""'], {}), "('[ \\t\\\\)].*$')\n", (2771, 2786), False, 'import re\n'), ((1825, 1835), 'os.walk', 'os.walk', (['d'], {}), '(d)\n', (1832, 1835), False, 'import os\n'), ((1939, 1957), 'os.path.join', 'os.path.join', (['p', 'f'], {}), '(p, f)\n', (1951, 1957), False, 'import os\n'), ((3484, 3503), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (3500, 3503), False, 'import os\n')]
|
import os
import torch
import utils
import torchvision.transforms as T
from torch.utils import data
from PIL import Image
#定义自己的数据集合
class MyDataSet(data.Dataset):
def __init__(self,root,transform):
# 所有图片的绝对路径
imgs=os.listdir(root)
self.imgs=[os.path.join(root,k) for k in imgs]
self.transform=transform
def __getitem__(self, index):
img_path = self.imgs[index]
# 1. Load the image
pil_img = Image.open(img_path)
# 2. Resize and normalize the images using torchvision.
img = self.transform(pil_img)
return img
def __len__(self):
return len(self.imgs)
#定义自己的数据集合
class DataSetWithSalieny(data.Dataset):
def __init__(self,root,saliency_root,transform,transform_saliency):
# 所有图片的绝对路径
self.imgs=os.listdir(root)
self.imgs=[os.path.join(root,k) for k in self.imgs]
self.root = root
self.saliency_root = saliency_root
self.transform=transform
self.transform_saliency = transform_saliency
def __getitem__(self, index):
img_path = self.imgs[index]
img_name = self.imgs[index].split('/')[-1]
saliency_name = img_name.split('.')[0]+".png"
saliency_path = os.path.join(self.saliency_root, saliency_name)
# 1. Load the image
pil_img = Image.open(img_path)
pil_saliency = Image.open(saliency_path)
# 2. Resize and normalize the images using torchvision.
img = self.transform(pil_img)
saliency_1channel = self.transform_saliency(pil_saliency)
saliency = utils.get_saleincy_2channel(saliency_1channel)
return img, saliency
def __len__(self):
return len(self.imgs)
def get_saliency_dataloader(image_dir,saliency_dir, img_size, batch_size):
compose = [
T.Resize((img_size[0], img_size[1])),
T.ToTensor(), #转到[0,1]
T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1]
]
transform = T.Compose(compose)
compose_saliency = [
T.Resize((img_size[0], img_size[1])),
T.Grayscale(num_output_channels=1),
T.ToTensor(), #转到[0,1]
]
transform_saliency = T.Compose(compose_saliency)
dataset = DataSetWithSalieny(image_dir,saliency_dir,transform,transform_saliency)
dataloader = iter(torch.utils.data.DataLoader(dataset,
batch_size,
num_workers = 1))
return dataloader #返回的是一个dataloader的迭代器
def get_gray_dataloader(image_dir, img_size, batch_size):
compose = [
T.Resize((img_size[0], img_size[1])),
T.Grayscale(num_output_channels=3),
T.ToTensor(),
T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1]
]
transform = T.Compose(compose)
dataset = MyDataSet(image_dir,transform)
dataloader = iter(torch.utils.data.DataLoader(dataset,
batch_size,
num_workers = 1))
return dataloader #返回的是一个dataloader的迭代器
# if __name__ == '__main__':
# pil_saliency = Image.open(saliency_path)
# # 2. Resize and normalize the images using torchvision.
# img = self.transform(pil_img)
# saliency_1channel = self.transform_saliency(pil_saliency)
# saliency = get_saleincy_2channel(saliency_1channel)
# return img, saliency
|
[
"torch.utils.data.DataLoader",
"utils.get_saleincy_2channel",
"PIL.Image.open",
"torchvision.transforms.ToTensor",
"torchvision.transforms.Compose",
"torchvision.transforms.Grayscale",
"torchvision.transforms.Normalize",
"os.path.join",
"os.listdir",
"torchvision.transforms.Resize"
] |
[((1995, 2013), 'torchvision.transforms.Compose', 'T.Compose', (['compose'], {}), '(compose)\n', (2004, 2013), True, 'import torchvision.transforms as T\n'), ((2192, 2219), 'torchvision.transforms.Compose', 'T.Compose', (['compose_saliency'], {}), '(compose_saliency)\n', (2201, 2219), True, 'import torchvision.transforms as T\n'), ((2821, 2839), 'torchvision.transforms.Compose', 'T.Compose', (['compose'], {}), '(compose)\n', (2830, 2839), True, 'import torchvision.transforms as T\n'), ((237, 253), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (247, 253), False, 'import os\n'), ((459, 479), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (469, 479), False, 'from PIL import Image\n'), ((825, 841), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (835, 841), False, 'import os\n'), ((1256, 1303), 'os.path.join', 'os.path.join', (['self.saliency_root', 'saliency_name'], {}), '(self.saliency_root, saliency_name)\n', (1268, 1303), False, 'import os\n'), ((1350, 1370), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1360, 1370), False, 'from PIL import Image\n'), ((1394, 1419), 'PIL.Image.open', 'Image.open', (['saliency_path'], {}), '(saliency_path)\n', (1404, 1419), False, 'from PIL import Image\n'), ((1607, 1653), 'utils.get_saleincy_2channel', 'utils.get_saleincy_2channel', (['saliency_1channel'], {}), '(saliency_1channel)\n', (1634, 1653), False, 'import utils\n'), ((1838, 1874), 'torchvision.transforms.Resize', 'T.Resize', (['(img_size[0], img_size[1])'], {}), '((img_size[0], img_size[1]))\n', (1846, 1874), True, 'import torchvision.transforms as T\n'), ((1884, 1896), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1894, 1896), True, 'import torchvision.transforms as T\n'), ((1915, 1960), 'torchvision.transforms.Normalize', 'T.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1926, 1960), True, 'import torchvision.transforms as T\n'), ((2048, 2084), 'torchvision.transforms.Resize', 'T.Resize', (['(img_size[0], img_size[1])'], {}), '((img_size[0], img_size[1]))\n', (2056, 2084), True, 'import torchvision.transforms as T\n'), ((2094, 2128), 'torchvision.transforms.Grayscale', 'T.Grayscale', ([], {'num_output_channels': '(1)'}), '(num_output_channels=1)\n', (2105, 2128), True, 'import torchvision.transforms as T\n'), ((2138, 2150), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2148, 2150), True, 'import torchvision.transforms as T\n'), ((2329, 2392), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset', 'batch_size'], {'num_workers': '(1)'}), '(dataset, batch_size, num_workers=1)\n', (2356, 2392), False, 'import torch\n'), ((2628, 2664), 'torchvision.transforms.Resize', 'T.Resize', (['(img_size[0], img_size[1])'], {}), '((img_size[0], img_size[1]))\n', (2636, 2664), True, 'import torchvision.transforms as T\n'), ((2674, 2708), 'torchvision.transforms.Grayscale', 'T.Grayscale', ([], {'num_output_channels': '(3)'}), '(num_output_channels=3)\n', (2685, 2708), True, 'import torchvision.transforms as T\n'), ((2718, 2730), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2728, 2730), True, 'import torchvision.transforms as T\n'), ((2740, 2785), 'torchvision.transforms.Normalize', 'T.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (2751, 2785), True, 'import torchvision.transforms as T\n'), ((2908, 2971), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset', 'batch_size'], {'num_workers': '(1)'}), '(dataset, batch_size, num_workers=1)\n', (2935, 2971), False, 'import torch\n'), ((273, 294), 'os.path.join', 'os.path.join', (['root', 'k'], {}), '(root, k)\n', (285, 294), False, 'import os\n'), ((861, 882), 'os.path.join', 'os.path.join', (['root', 'k'], {}), '(root, k)\n', (873, 882), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# preggy assertions
# https://github.com/heynemann/preggy
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2013 <NAME> <EMAIL>
from preggy import expect
#-----------------------------------------------------------------------------
def test_to_be_null():
expect(None).to_be_null()
try:
expect(None).not_to_be_null()
except AssertionError:
return
assert False, 'Should not have gotten this far'
def test_not_to_be_null():
expect('something').Not.to_be_null()
expect('something').not_to_be_null()
try:
expect('something').to_be_null()
except AssertionError:
return
assert False, 'Should not have gotten this far'
|
[
"preggy.expect"
] |
[((338, 350), 'preggy.expect', 'expect', (['None'], {}), '(None)\n', (344, 350), False, 'from preggy import expect\n'), ((581, 600), 'preggy.expect', 'expect', (['"""something"""'], {}), "('something')\n", (587, 600), False, 'from preggy import expect\n'), ((382, 394), 'preggy.expect', 'expect', (['None'], {}), '(None)\n', (388, 394), False, 'from preggy import expect\n'), ((540, 559), 'preggy.expect', 'expect', (['"""something"""'], {}), "('something')\n", (546, 559), False, 'from preggy import expect\n'), ((636, 655), 'preggy.expect', 'expect', (['"""something"""'], {}), "('something')\n", (642, 655), False, 'from preggy import expect\n')]
|
"""Test dtool_lookup_server.utils.dataset_info_is_valid helper function."""
# Minimum data required to register a dataset.
INFO = {
"uuid": "af6727bf-29c7-43dd-b42f-a5d7ede28337",
"type": "dataset",
"uri": "file:///tmp/a_dataset",
"name": "my-dataset",
"readme": {"description": "test dataset"},
"manifest": {
"dtoolcore_version": "3.7.0",
"hash_function": "md5sum_hexdigest",
"items": {}
},
"base_uri": "file:///tmp",
"creator_username": "olssont",
"frozen_at": 1536238185.881941,
"annotations": {"stars": 5},
"tags": ["empty", "dataset"],
}
def test_dataset_info_is_valid_returns_true_on_valid_info():
from dtool_lookup_server.utils import dataset_info_is_valid
info = INFO.copy()
assert dataset_info_is_valid(info)
def test_dataset_info_returns_false_when_key_data_is_missing():
from dtool_lookup_server.utils import dataset_info_is_valid
for key in INFO.keys():
info = INFO.copy()
del info[key]
assert not dataset_info_is_valid(info), key
def test_dataset_info_returns_false_when_type_is_not_dataset():
from dtool_lookup_server.utils import dataset_info_is_valid
info = INFO.copy()
info["type"] = "protodataset"
assert not dataset_info_is_valid(info)
def test_dataset_info_returns_false_if_uuid_looks_invalid():
from dtool_lookup_server.utils import dataset_info_is_valid
info = INFO.copy()
info["uuid"] = "af6727bf-29c7-43dd-b42f"
assert not dataset_info_is_valid(info)
def test_dataset_info_is_valid_returns_false_if_base_uri_ends_with_slash():
from dtool_lookup_server.utils import dataset_info_is_valid
info = INFO.copy()
info["base_uri"] = "file:///tmp/"
assert not dataset_info_is_valid(info)
|
[
"dtool_lookup_server.utils.dataset_info_is_valid"
] |
[((778, 805), 'dtool_lookup_server.utils.dataset_info_is_valid', 'dataset_info_is_valid', (['info'], {}), '(info)\n', (799, 805), False, 'from dtool_lookup_server.utils import dataset_info_is_valid\n'), ((1270, 1297), 'dtool_lookup_server.utils.dataset_info_is_valid', 'dataset_info_is_valid', (['info'], {}), '(info)\n', (1291, 1297), False, 'from dtool_lookup_server.utils import dataset_info_is_valid\n'), ((1509, 1536), 'dtool_lookup_server.utils.dataset_info_is_valid', 'dataset_info_is_valid', (['info'], {}), '(info)\n', (1530, 1536), False, 'from dtool_lookup_server.utils import dataset_info_is_valid\n'), ((1756, 1783), 'dtool_lookup_server.utils.dataset_info_is_valid', 'dataset_info_is_valid', (['info'], {}), '(info)\n', (1777, 1783), False, 'from dtool_lookup_server.utils import dataset_info_is_valid\n'), ((1034, 1061), 'dtool_lookup_server.utils.dataset_info_is_valid', 'dataset_info_is_valid', (['info'], {}), '(info)\n', (1055, 1061), False, 'from dtool_lookup_server.utils import dataset_info_is_valid\n')]
|
from django.contrib import admin
from .models import Activity, Fitbit
class ActivityAdmin(admin.ModelAdmin):
fieldsets = [
('Date Information', {'fields': ['entry_date']}),
('Fitbit Data', {'fields': ['steps', 'distance'], 'classes' : ['collapse']}),
]
list_display = ('entry_date' , 'steps', 'distance')
class FitbitAdmin(admin.ModelAdmin):
fieldsets = [
('Date Information', {'fields': ['entry_date']}),
('Fitbit Data', {'fields': ['steps', 'distance', 'active_minutes', 'weight'], 'classes' : ['collapse']}),
]
list_display = ('entry_date', 'steps', 'distance', 'active_minutes', 'weight')
# Register your models here.
admin.site.register(Activity)
admin.site.register(Fitbit)
|
[
"django.contrib.admin.site.register"
] |
[((640, 669), 'django.contrib.admin.site.register', 'admin.site.register', (['Activity'], {}), '(Activity)\n', (659, 669), False, 'from django.contrib import admin\n'), ((670, 697), 'django.contrib.admin.site.register', 'admin.site.register', (['Fitbit'], {}), '(Fitbit)\n', (689, 697), False, 'from django.contrib import admin\n')]
|
import logging
import os
import sys
import tempfile
from contextlib import contextmanager
from typing import Callable, Dict, List, Optional, Type, Union
import pytest
from pydantic import BaseSettings, validator
from selenium import webdriver
from .browser import BrowserError, BrowserRecorder, Chrome, Remote
from .models import Outcome, Report, ReportResult, TestResult, Timed
from .report_exporter import ReportExporter
_here = os.path.abspath(os.path.dirname(__file__))
logger = logging.getLogger(__name__)
class EnvSettings(BaseSettings):
"""
Automatically derives from environment variables and
translates truthy/falsey strings into bools. Only required
for code that must be conditionally loaded; all others
should be part of 'pytest_addoption()'
"""
# If set to True, will generate a new browser instance within every request
# for a given scope, instead of only creating a single instance and generating
# contexts for each test.
# This has a significant performance impact,
# but sometimes cannot be avoided.
disable_session_browser: Optional[bool] = False
@validator("*", pre=True, always=True)
def handle_empty_string(cls, v):
if not v:
return None
return v
_SETTINGS = EnvSettings()
def pytest_addoption(parser):
group = parser.getgroup("webdriver_recorder")
group.addoption(
"--selenium-server",
action="store",
dest="selenium_server",
default=os.environ.get("REMOTE_SELENIUM"),
help="Remote selenium webdriver to connect to (eg localhost:4444)",
)
group.addoption(
"--report-dir",
action="store",
dest="report_dir",
default=os.environ.get("REPORT_DIR", os.path.join(os.getcwd(), "webdriver-report")),
help="The path to the directory where artifacts should be stored.",
)
group.addoption(
"--jinja-template",
action="store",
dest="report_template",
default=os.path.join(_here, "report.template.html"),
)
group.addoption(
"--report-title",
action="store",
dest="report_title",
default="Webdriver Recorder Summary",
help="An optional title for your report; if not provided, a default will be used. "
"You may also provide a constant default by overriding the report_title fixture.",
)
@pytest.fixture(scope="session", autouse=True)
def clean_screenshots(report_dir):
screenshots_dir = os.path.join(report_dir, "screenshots")
if os.path.exists(screenshots_dir):
old_screenshots = os.listdir(screenshots_dir)
for png in old_screenshots:
os.remove(os.path.join(screenshots_dir, png))
@pytest.fixture(scope="session", autouse=True)
def test_report(report_title) -> Report:
args = []
if len(sys.argv) > 1:
args.extend(sys.argv[1:])
return Report(
arguments=" ".join(args),
outcome=Outcome.never_started,
title=report_title,
)
@pytest.fixture(scope="session")
def selenium_server(request) -> Optional[str]:
"""Returns a non-empty string or None"""
value = request.config.getoption("selenium_server")
if value:
return value.strip()
return None
@pytest.fixture(scope="session")
def chrome_options() -> webdriver.ChromeOptions:
"""
An extensible instance of ChromeOptions with default
options configured for a balance between performance
and test isolation.
You can extend this:
@pytest.fixture(scope='session')
def chrome_options(chrome_options) -> ChromeOptions:
chrome_options.add_argument("--option-name")
return chrome_options
or override it entirely:
@pytest.fixture(scope='session')
def chrome_options() -> ChromeOptions:
return ChromeOptions()
"""
options = webdriver.ChromeOptions()
# Our default options promote a balance between
# performance and test isolation.
options.add_argument("--headless")
options.add_argument("--incognito")
options.add_argument("--disable-application-cache")
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
return options
@pytest.fixture(scope="session")
def browser_args(selenium_server, chrome_options) -> Dict[str, Optional[Union[webdriver.ChromeOptions, str]]]:
args = {"options": chrome_options}
if selenium_server:
args["command_executor"] = f"http://{selenium_server}/wd/hub"
return args
@pytest.fixture(scope="session")
def browser_class(browser_args) -> Type[BrowserRecorder]:
if browser_args.get("command_executor"):
return Remote
return Chrome
@pytest.fixture(scope="session")
def build_browser(browser_args, browser_class) -> Callable[..., BrowserRecorder]:
logger.info(
"Browser generator will build instances using the following settings:\n"
f" Browser class: {browser_class.__name__}\n"
f" Browser args: {dict(browser_args)}"
)
def inner() -> BrowserRecorder:
return browser_class(**browser_args)
return inner
@pytest.fixture(scope="session")
def session_browser(build_browser) -> BrowserRecorder:
"""
A browser instance that is kept open for the entire test run.
Only instantiated if it is used, but by default will be used in both the
'browser' and 'class_browser' fixtures, unless "disable_session_browser=1"
is set in the environment.
"""
browser = build_browser()
try:
yield browser
finally:
browser.quit()
@pytest.fixture(scope="session")
def browser_context() -> Callable[..., Chrome]:
"""
This fixture allows you to create a fresh context for a given
browser instance.
The default behavior of the `browser` fixture is to always run in a context of the session scope, so
you only need to use this if you are not using (or are overriding) the `browser` fixture.
The fixture itself simply passes the context manager, so you can use it like so:
def test_something(browser_context):
with browser_context() as browser:
browser.get('https://www.uw.edu')
You may also provide a list of urls to visit to clear cookies at the end of your session,
if the default 'delete_all_cookies' behavior is not enough to cover your use case.
"""
@contextmanager
def inner(browser: BrowserRecorder, cookie_urls: Optional[List[str]] = None) -> BrowserRecorder:
browser.open_tab()
cookie_urls = cookie_urls or []
try:
yield browser
finally:
browser.delete_all_cookies()
for url in cookie_urls:
browser.get(url)
browser.delete_all_cookies()
browser.close_tab()
return inner
if _SETTINGS.disable_session_browser:
logger.warning("Disabling auto-use of 'session_browser', this may significantly decrease test performance.")
@pytest.fixture(scope="session")
def session_browser_disabled() -> bool:
return True
@pytest.fixture
def browser(build_browser) -> BrowserRecorder:
"""Creates a fresh instance of the browser using the configured chrome_options fixture."""
browser = build_browser()
try:
yield browser
finally:
browser.quit()
@pytest.fixture(scope="class")
def class_browser(build_browser, request) -> BrowserRecorder:
"""
Creates a fresh instance of the browser for use in the requesting class, using the configure
chrome_options fixture.
"""
browser = build_browser()
request.cls.browser = browser
try:
yield browser
finally:
browser.quit()
else:
logger.info(
"Enabling auto-use of 'session_browser'; if your tests appear stuck, try disabling "
"by setting 'disable_session_browser=1' in your environment."
)
@pytest.fixture
def browser(session_browser, browser_context) -> BrowserRecorder:
"""
Creates a function-scoped tab context for the session_browser which cleans
up after itself (to the best of its ability). If you need a fresh instance
each test, you can set `disable_session_browser=1` in your environment.
"""
with browser_context(session_browser) as browser:
yield browser
@pytest.fixture(scope="class")
def class_browser(request, session_browser, browser_context) -> BrowserRecorder:
"""
Creates a class-scoped tab context and binds it to the requesting class
as 'self.browser'; this tab will close once all tests in the class have run,
and will clean up after itself (to the best of its ability). If you need
a fresh browser instance for each class, you can set `disable_session_browser=1` in your
environment.
"""
with browser_context(session_browser) as browser:
request.cls.browser = browser
yield browser
@pytest.fixture(scope="session")
def session_browser_disabled() -> bool:
return False
@pytest.fixture(scope="session")
def report_dir(request):
dir_ = request.config.getoption("report_dir")
os.makedirs(dir_, exist_ok=True)
return dir_
@pytest.fixture(scope="session", autouse=True)
def report_generator(report_dir, test_report):
with tempfile.NamedTemporaryFile(prefix="worker.", dir=report_dir) as worker_file:
suffix = ".".join(worker_file.name.split(".")[1:])
yield
test_report.stop_timer()
exporter = ReportExporter()
workers = list(f for f in os.listdir(report_dir) if f.startswith("worker."))
worker_results = list(f for f in os.listdir(report_dir) if f.endswith(".result.json"))
if not workers:
test_report.outcome = Outcome.success
# Aggregate worker reports into this "root" report.
for result_file in [os.path.join(report_dir, f) for f in worker_results]:
worker_report = Report.parse_file(result_file)
test_report.results.extend(worker_report.results)
os.remove(result_file)
exporter.export_all(test_report, report_dir)
else:
# If there are other workers, only export the report json of the
# current worker. The last worker running will be responsible for aggregating and reporting results.
exporter.export_json(test_report, report_dir, dest_filename=f"{suffix}.result.json")
@pytest.fixture(autouse=True)
def report_test(report_generator, request, test_report):
"""
Print the results to report_file after a test run. Without this, the results of the test will not be saved.
"""
tb = None
console_logs = []
timer: Timed
with Timed() as timer:
yield
call_summary = getattr(request.node, "report_result", None)
if call_summary:
doc = call_summary.doc
test_name = call_summary.report.nodeid
outcome = Outcome.failure if call_summary.report.failed else Outcome.success
if call_summary and call_summary.excinfo and not tb:
outcome = Outcome.failure
exception: BaseException = call_summary.excinfo.value
exception_msg = f"{exception.__class__.__name__}: {str(exception)}"
if isinstance(exception, BrowserError):
if exception.orig:
tb = f"{exception_msg}\n{exception.orig=}"
console_logs = [log.get("message", "") for log in exception.logs]
if not tb:
tb = f"{exception_msg}\n(No traceback is available)"
else:
logging.error(
f"Test {request.node} reported no outcomes; "
f"this usually indicates a fixture caused an error when setting up the test."
)
doc = None
test_name = f"{request.node.name}"
outcome = Outcome.never_started
# TODO: Figure out a way to include class docs if they exist
# class TestFooBar:
# """
# When Foo is bar
# """
# def test_a_baz(self):
# """and baz is bop"""
# do_work('bop')
# The report output should then read "When foo is bar and baz is bop"
result = TestResult(
pngs=BrowserRecorder.pngs,
test_name=test_name,
test_description=doc,
outcome=outcome,
start_time=timer.start_time,
end_time=timer.end_time,
traceback=tb,
console_errors=console_logs,
)
BrowserRecorder.pngs = []
test_report.results.append(result)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""
This gives us hooks from which to report status post test-run.
"""
outcome = yield
report = outcome.get_result()
if report.when == "call":
doc = getattr(getattr(item, "function", None), "__doc__", None)
item.report_result = ReportResult(report=report, excinfo=call.excinfo, doc=doc)
@pytest.fixture(scope="session")
def report_title(request) -> str:
return request.config.getoption("report_title")
|
[
"tempfile.NamedTemporaryFile",
"logging.error",
"os.remove",
"os.makedirs",
"os.getcwd",
"pytest.hookimpl",
"os.path.dirname",
"pytest.fixture",
"os.path.exists",
"os.environ.get",
"pydantic.validator",
"selenium.webdriver.ChromeOptions",
"os.path.join",
"os.listdir",
"logging.getLogger"
] |
[((486, 513), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (503, 513), False, 'import logging\n'), ((2391, 2436), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'autouse': '(True)'}), "(scope='session', autouse=True)\n", (2405, 2436), False, 'import pytest\n'), ((2725, 2770), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'autouse': '(True)'}), "(scope='session', autouse=True)\n", (2739, 2770), False, 'import pytest\n'), ((3016, 3047), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (3030, 3047), False, 'import pytest\n'), ((3258, 3289), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (3272, 3289), False, 'import pytest\n'), ((4246, 4277), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (4260, 4277), False, 'import pytest\n'), ((4541, 4572), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (4555, 4572), False, 'import pytest\n'), ((4719, 4750), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (4733, 4750), False, 'import pytest\n'), ((5145, 5176), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (5159, 5176), False, 'import pytest\n'), ((5601, 5632), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (5615, 5632), False, 'import pytest\n'), ((9182, 9213), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (9196, 9213), False, 'import pytest\n'), ((9345, 9390), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'autouse': '(True)'}), "(scope='session', autouse=True)\n", (9359, 9390), False, 'import pytest\n'), ((10536, 10564), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (10550, 10564), False, 'import pytest\n'), ((12668, 12716), 'pytest.hookimpl', 'pytest.hookimpl', ([], {'tryfirst': '(True)', 'hookwrapper': '(True)'}), '(tryfirst=True, hookwrapper=True)\n', (12683, 12716), False, 'import pytest\n'), ((13091, 13122), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (13105, 13122), False, 'import pytest\n'), ((450, 475), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (465, 475), False, 'import os\n'), ((1127, 1164), 'pydantic.validator', 'validator', (['"""*"""'], {'pre': '(True)', 'always': '(True)'}), "('*', pre=True, always=True)\n", (1136, 1164), False, 'from pydantic import BaseSettings, validator\n'), ((2494, 2533), 'os.path.join', 'os.path.join', (['report_dir', '"""screenshots"""'], {}), "(report_dir, 'screenshots')\n", (2506, 2533), False, 'import os\n'), ((2541, 2572), 'os.path.exists', 'os.path.exists', (['screenshots_dir'], {}), '(screenshots_dir)\n', (2555, 2572), False, 'import os\n'), ((3879, 3904), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (3902, 3904), False, 'from selenium import webdriver\n'), ((7005, 7036), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (7019, 7036), False, 'import pytest\n'), ((7395, 7424), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (7409, 7424), False, 'import pytest\n'), ((8447, 8476), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (8461, 8476), False, 'import pytest\n'), ((9082, 9113), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (9096, 9113), False, 'import pytest\n'), ((9293, 9325), 'os.makedirs', 'os.makedirs', (['dir_'], {'exist_ok': '(True)'}), '(dir_, exist_ok=True)\n', (9304, 9325), False, 'import os\n'), ((2600, 2627), 'os.listdir', 'os.listdir', (['screenshots_dir'], {}), '(screenshots_dir)\n', (2610, 2627), False, 'import os\n'), ((9447, 9508), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'prefix': '"""worker."""', 'dir': 'report_dir'}), "(prefix='worker.', dir=report_dir)\n", (9474, 9508), False, 'import tempfile\n'), ((11682, 11826), 'logging.error', 'logging.error', (['f"""Test {request.node} reported no outcomes; this usually indicates a fixture caused an error when setting up the test."""'], {}), "(\n f'Test {request.node} reported no outcomes; this usually indicates a fixture caused an error when setting up the test.'\n )\n", (11695, 11826), False, 'import logging\n'), ((1493, 1526), 'os.environ.get', 'os.environ.get', (['"""REMOTE_SELENIUM"""'], {}), "('REMOTE_SELENIUM')\n", (1507, 1526), False, 'import os\n'), ((2002, 2045), 'os.path.join', 'os.path.join', (['_here', '"""report.template.html"""'], {}), "(_here, 'report.template.html')\n", (2014, 2045), False, 'import os\n'), ((9985, 10012), 'os.path.join', 'os.path.join', (['report_dir', 'f'], {}), '(report_dir, f)\n', (9997, 10012), False, 'import os\n'), ((10172, 10194), 'os.remove', 'os.remove', (['result_file'], {}), '(result_file)\n', (10181, 10194), False, 'import os\n'), ((2686, 2720), 'os.path.join', 'os.path.join', (['screenshots_dir', 'png'], {}), '(screenshots_dir, png)\n', (2698, 2720), False, 'import os\n'), ((9689, 9711), 'os.listdir', 'os.listdir', (['report_dir'], {}), '(report_dir)\n', (9699, 9711), False, 'import os\n'), ((9777, 9799), 'os.listdir', 'os.listdir', (['report_dir'], {}), '(report_dir)\n', (9787, 9799), False, 'import os\n'), ((1764, 1775), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1773, 1775), False, 'import os\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) University of St Andrews 2020-2021
# (c) University of Strathclyde 2020-2021
# (c) James Hutton Institute 2020-2021
#
# Author:
# <NAME>
#
# Contact
# <EMAIL>
#
# <NAME>,
# Biomolecular Sciences Building,
# University of St Andrews,
# <NAME>,
# St Andrews,
# KY16 9ST
# Scotland,
# UK
#
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Script containing functions to retrieve paths to files and directories"""
from pathlib import Path
def get_file_paths(directory, prefixes=None, suffixes=None):
"""Retrieve paths to all files in input dir.
:param directory: Path, path to directory from which files are to be retrieved
:param prefixes: List of Str, prefixes of the file names to be retrieved
:param suffixes: List of Str, suffixes of the file names to be retrieved
Returns list of paths to fasta files.
"""
# create empty list to store the file entries, to allow checking if no files returned
file_paths = []
# retrieve all files from input directory
files_in_entries = (entry for entry in Path(directory).iterdir() if entry.is_file())
if prefixes is None and suffixes is None:
for item in files_in_entries:
file_paths.append(item)
elif prefixes is not None and suffixes is None:
for item in files_in_entries:
for prefix in prefixes:
if item.name.startswith(prefix):
file_paths.append(item)
elif prefixes is None and suffixes is not None:
for item in files_in_entries:
for suffix in suffixes:
if item.name.endswith(suffix):
file_paths.append(item)
else:
for item in files_in_entries:
for suffix in suffixes:
for prefix in prefixes:
if item.name.startswith(prefix) and item.name.endswith(suffix):
file_paths.append(item)
return file_paths
def get_dir_paths(directory, prefixes=None, suffixes=None):
"""Retrieve paths to all directories in input dir.
:param directory: Path, path to directory from which files are to be retrieved
:param prefixes: List of Str, prefixes of the file names to be retrieved
:param suffixes: List of Str, suffixes of the file names to be retrieved
Returns list of paths to fasta files.
"""
# create empty list to store the file entries, to allow checking if no files returned
dir_paths = []
# retrieve all files from input directory
files_in_entries = (entry for entry in Path(directory).iterdir() if entry.is_dir())
if prefixes is None and suffixes is None:
for item in files_in_entries:
dir_paths.append(item)
elif prefixes is not None and suffixes is None:
for item in files_in_entries:
for prefix in prefixes:
if item.name.startswith(prefix):
dir_paths.append(item)
elif prefixes is None and suffixes is not None:
for item in files_in_entries:
for suffix in suffixes:
if item.name.endswith(suffix):
dir_paths.append(item)
else:
for item in files_in_entries:
for suffix in suffixes:
for prefix in prefixes:
if item.name.startswith(prefix) and item.name.endswith(suffix):
dir_paths.append(item)
return dir_paths
|
[
"pathlib.Path"
] |
[((2179, 2194), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (2183, 2194), False, 'from pathlib import Path\n'), ((3718, 3733), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (3722, 3733), False, 'from pathlib import Path\n')]
|
import tensorflow as tf
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True)
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_sum(loss_)/tf.reduce_sum(mask)
def accuracy_function(real, pred):
accuracies = tf.equal(real, tf.argmax(pred, axis=1))
mask = tf.math.logical_not(tf.math.equal(real, 0))
accuracies = tf.math.logical_and(mask, accuracies)
accuracies = tf.cast(accuracies, dtype=tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
return tf.reduce_sum(accuracies) / tf.reduce_sum(mask)
|
[
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.reduce_sum",
"tensorflow.math.logical_and",
"tensorflow.argmax",
"tensorflow.math.equal",
"tensorflow.cast"
] |
[((39, 102), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (84, 102), True, 'import tensorflow as tf\n'), ((241, 273), 'tensorflow.cast', 'tf.cast', (['mask'], {'dtype': 'loss_.dtype'}), '(mask, dtype=loss_.dtype)\n', (248, 273), True, 'import tensorflow as tf\n'), ((507, 544), 'tensorflow.math.logical_and', 'tf.math.logical_and', (['mask', 'accuracies'], {}), '(mask, accuracies)\n', (526, 544), True, 'import tensorflow as tf\n'), ((563, 600), 'tensorflow.cast', 'tf.cast', (['accuracies'], {'dtype': 'tf.float32'}), '(accuracies, dtype=tf.float32)\n', (570, 600), True, 'import tensorflow as tf\n'), ((612, 643), 'tensorflow.cast', 'tf.cast', (['mask'], {'dtype': 'tf.float32'}), '(mask, dtype=tf.float32)\n', (619, 643), True, 'import tensorflow as tf\n'), ((173, 195), 'tensorflow.math.equal', 'tf.math.equal', (['real', '(0)'], {}), '(real, 0)\n', (186, 195), True, 'import tensorflow as tf\n'), ((300, 320), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss_'], {}), '(loss_)\n', (313, 320), True, 'import tensorflow as tf\n'), ((321, 340), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask'], {}), '(mask)\n', (334, 340), True, 'import tensorflow as tf\n'), ((409, 432), 'tensorflow.argmax', 'tf.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (418, 432), True, 'import tensorflow as tf\n'), ((466, 488), 'tensorflow.math.equal', 'tf.math.equal', (['real', '(0)'], {}), '(real, 0)\n', (479, 488), True, 'import tensorflow as tf\n'), ((655, 680), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['accuracies'], {}), '(accuracies)\n', (668, 680), True, 'import tensorflow as tf\n'), ((683, 702), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask'], {}), '(mask)\n', (696, 702), True, 'import tensorflow as tf\n')]
|
import numpy as np
from numba import cuda, float32
import time
@cuda.jit
def matmul(A, B, C):
"""Perform square matrix multiplication of C = A * B
"""
i, j = cuda.grid(2)
if i < C.shape[0] and j < C.shape[1]:
tmp = 0.
for k in range(A.shape[1]):
tmp += A[i, k] * B[k, j]
C[i, j] = tmp
import time
start=time.time()
A, B, C
np
matmul(A, B, C)
end=time.time()
print(f"Runtime of the program is {end - start} s")
|
[
"numba.cuda.grid",
"time.time"
] |
[((360, 371), 'time.time', 'time.time', ([], {}), '()\n', (369, 371), False, 'import time\n'), ((405, 416), 'time.time', 'time.time', ([], {}), '()\n', (414, 416), False, 'import time\n'), ((172, 184), 'numba.cuda.grid', 'cuda.grid', (['(2)'], {}), '(2)\n', (181, 184), False, 'from numba import cuda, float32\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import urllib.request
import urllib.parse
import sys
from datetime import datetime
url = 'http://zzzzzz/api/upload.php'
def sendmessage(message):
print(message)
params = urllib.parse.urlencode(message)
params = params.encode("ascii")
req = urllib.request.Request(url, data=params, headers={'content-type': 'application/x-www-form-urlencoded'})
with urllib.request.urlopen(req) as response:
#print(response.read().decode("unicode_escape"))
#print(response.getcode())
pass
args = sys.argv
msg = {"act": "serverwarning",
"time": datetime.now().strftime('%Y-%m-%d %H:%M:%S'), }
for line in sys.stdin:
if line is not None and line.strip() != "":
k, v = line.split(":")
msg[k] = v.strip()
sendmessage(msg)
|
[
"datetime.datetime.now"
] |
[((630, 644), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (642, 644), False, 'from datetime import datetime\n')]
|
import numpy as np
def time_between_values(df, cols):
gap_df = df[cols].dropna(how='any')
return gap_df.index.to_series().diff(-1).dt.total_seconds().abs()
def distance_to_monitor(df):
dist = np.sqrt(
df.left_gaze_origin_in_user_coordinate_system_x ** 2
+ df.left_gaze_origin_in_user_coordinate_system_y ** 2
+ df.left_gaze_origin_in_user_coordinate_system_z ** 2
)
dist.index = df.time
return dist
def group_by_hour_of_day(series):
return series.groupby(series.index.to_series().dt.hour)
def blinks_per_minute_by_hour_of_day(df):
gaps = time_between_values(
df.set_index('time'), ['left_pupil_diameter', 'right_pupil_diameter'])
blinks = gaps[(gaps < 0.5) & (gaps > 0.1)]
blinks_per_hour_of_day = group_by_hour_of_day(blinks).count()
seconds_recorded_per_hour_of_day = (
group_by_hour_of_day(gaps).count()
/ 60 # Divide by Frequency
)
return blinks_per_hour_of_day / seconds_recorded_per_hour_of_day * 60
|
[
"numpy.sqrt"
] |
[((208, 389), 'numpy.sqrt', 'np.sqrt', (['(df.left_gaze_origin_in_user_coordinate_system_x ** 2 + df.\n left_gaze_origin_in_user_coordinate_system_y ** 2 + df.\n left_gaze_origin_in_user_coordinate_system_z ** 2)'], {}), '(df.left_gaze_origin_in_user_coordinate_system_x ** 2 + df.\n left_gaze_origin_in_user_coordinate_system_y ** 2 + df.\n left_gaze_origin_in_user_coordinate_system_z ** 2)\n', (215, 389), True, 'import numpy as np\n')]
|
import pandas as pd
from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode
class IntegrationDataNode(DataNode):
type = 'integration'
def __init__(self, integration_name, data_store):
self.integration_name = integration_name
self.data_store = data_store
def get_type(self):
return self.type
def get_tables(self):
return []
def has_table(self, tableName):
return True
def get_table_columns(self, tableName):
return []
def select(self, query):
sql_query = str(query)
dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': sql_query})
data = dso.df.to_dict(orient='records')
column_names = list(dso.df.columns)
for column_name in column_names:
if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]):
pass_data = dso.df[column_name].dt.to_pydatetime()
for i, rec in enumerate(data):
rec[column_name] = pass_data[i].timestamp()
return data, column_names
|
[
"pandas.core.dtypes.common.is_datetime_or_timedelta_dtype"
] |
[((839, 912), 'pandas.core.dtypes.common.is_datetime_or_timedelta_dtype', 'pd.core.dtypes.common.is_datetime_or_timedelta_dtype', (['dso.df[column_name]'], {}), '(dso.df[column_name])\n', (891, 912), True, 'import pandas as pd\n')]
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.http import Http404
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import messages
from horizon import tables
from openstack_dashboard import policy
from tacker_horizon.openstack_dashboard import api
from tackerclient.common.exceptions import NotFound
class VNFFGManagerItem(object):
def __init__(self, id, name, description, status):
self.id = id
self.name = name
self.description = description
self.status = status
class VNFFGManagerItemList(object):
VNFFGLIST_P = []
@classmethod
def get_obj_given_id(cls, vnffg_id):
for obj in cls.VNFFGLIST_P:
if obj.id == vnffg_id:
return obj
@classmethod
def add_item(cls, item):
cls.VNFFGLIST_P.append(item)
@classmethod
def clear_list(cls):
cls.VNFFGLIST_P = []
class MyFilterAction(tables.FilterAction):
name = "myfilter"
class VNFFGUpdateRow(tables.Row):
ajax = True
def can_be_selected(self, datum):
return datum.status != 'DELETE_COMPLETE'
def get_data(self, request, vnffg_id):
try:
item = VNFFGManagerItemList.get_obj_given_id(vnffg_id)
vnffg_instance = api.tacker.get_vnffg(request, vnffg_id)
if not vnffg_instance and not item:
# TODO(NAME) - bail with error
return None
if not vnffg_instance and item:
# API failure, just keep the current state
return item
vnffg = vnffg_instance['vnffg']
try:
vnffg_desc_str = vnffg['description']
except KeyError:
vnffg_desc_str = ""
if not item:
# Add an item entry
item = VNFFGManagerItem(vnffg['name'], vnffg_desc_str,
vnffg['status'], vnffg['id'])
else:
item.description = vnffg_desc_str
item.status = vnffg['status']
return item
except (Http404, NotFound):
raise Http404
except Exception as e:
messages.error(request, e)
raise
class DeleteVNFFG(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Terminate VNFFG",
u"Terminate VNFFGs",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Terminate VNFFG",
u"Terminate VNFFGs",
count
)
def action(self, request, obj_id):
api.tacker.delete_vnffg(request, obj_id)
class DeployVNFFG(tables.LinkAction):
name = "deployvnffg"
verbose_name = _("Deploy VNFFG")
classes = ("ajax-modal",)
icon = "plus"
url = "horizon:nfv:vnffgmanager:deployvnffg"
class VNFFGManagerTable(tables.DataTable):
STATUS_CHOICES = (
("ACTIVE", True),
("ERROR", False),
)
STACK_STATUS_DISPLAY_CHOICES = (
("init_in_progress", pgettext_lazy("current status of stack",
u"Init In Progress")),
("init_complete", pgettext_lazy("current status of stack",
u"Init Complete")),
("init_failed", pgettext_lazy("current status of stack",
u"Init Failed")),
("create_in_progress", pgettext_lazy("current status of stack",
u"Create In Progress")),
("create_complete", pgettext_lazy("current status of stack",
u"Create Complete")),
("create_failed", pgettext_lazy("current status of stack",
u"Create Failed")),
("delete_in_progress", pgettext_lazy("current status of stack",
u"Delete In Progress")),
("delete_complete", pgettext_lazy("current status of stack",
u"Delete Complete")),
("delete_failed", pgettext_lazy("current status of stack",
u"Delete Failed")),
("update_in_progress", pgettext_lazy("current status of stack",
u"Update In Progress")),
("update_complete", pgettext_lazy("current status of stack",
u"Update Complete")),
("update_failed", pgettext_lazy("current status of stack",
u"Update Failed")),
("rollback_in_progress", pgettext_lazy("current status of stack",
u"Rollback In Progress")),
("rollback_complete", pgettext_lazy("current status of stack",
u"Rollback Complete")),
("rollback_failed", pgettext_lazy("current status of stack",
u"Rollback Failed")),
("suspend_in_progress", pgettext_lazy("current status of stack",
u"Suspend In Progress")),
("suspend_complete", pgettext_lazy("current status of stack",
u"Suspend Complete")),
("suspend_failed", pgettext_lazy("current status of stack",
u"Suspend Failed")),
("resume_in_progress", pgettext_lazy("current status of stack",
u"Resume In Progress")),
("resume_complete", pgettext_lazy("current status of stack",
u"Resume Complete")),
("resume_failed", pgettext_lazy("current status of stack",
u"Resume Failed")),
("adopt_in_progress", pgettext_lazy("current status of stack",
u"Adopt In Progress")),
("adopt_complete", pgettext_lazy("current status of stack",
u"Adopt Complete")),
("adopt_failed", pgettext_lazy("current status of stack",
u"Adopt Failed")),
("snapshot_in_progress", pgettext_lazy("current status of stack",
u"Snapshot In Progress")),
("snapshot_complete", pgettext_lazy("current status of stack",
u"Snapshot Complete")),
("snapshot_failed", pgettext_lazy("current status of stack",
u"Snapshot Failed")),
("check_in_progress", pgettext_lazy("current status of stack",
u"Check In Progress")),
("check_complete", pgettext_lazy("current status of stack",
u"Check Complete")),
("check_failed", pgettext_lazy("current status of stack",
u"Check Failed")),
)
name = tables.Column("name",
link="horizon:nfv:vnffgmanager:detail",
verbose_name=_("VNFFG Name"))
description = tables.Column("description",
verbose_name=_("Description"))
status = tables.Column("status",
hidden=False,
status=True,
status_choices=STATUS_CHOICES)
class Meta(object):
name = "vnffgmanager"
verbose_name = _("VNFFGManager")
status_columns = ["status", ]
row_class = VNFFGUpdateRow
table_actions = (DeployVNFFG, DeleteVNFFG, MyFilterAction,)
|
[
"tacker_horizon.openstack_dashboard.api.tacker.delete_vnffg",
"horizon.messages.error",
"django.utils.translation.pgettext_lazy",
"horizon.tables.Column",
"django.utils.translation.ungettext_lazy",
"django.utils.translation.ugettext_lazy",
"tacker_horizon.openstack_dashboard.api.tacker.get_vnffg"
] |
[((3426, 3443), 'django.utils.translation.ugettext_lazy', '_', (['"""Deploy VNFFG"""'], {}), "('Deploy VNFFG')\n", (3427, 3443), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8015, 8101), 'horizon.tables.Column', 'tables.Column', (['"""status"""'], {'hidden': '(False)', 'status': '(True)', 'status_choices': 'STATUS_CHOICES'}), "('status', hidden=False, status=True, status_choices=\n STATUS_CHOICES)\n", (8028, 8101), False, 'from horizon import tables\n'), ((2973, 3035), 'django.utils.translation.ungettext_lazy', 'ungettext_lazy', (['u"""Terminate VNFFG"""', 'u"""Terminate VNFFGs"""', 'count'], {}), "(u'Terminate VNFFG', u'Terminate VNFFGs', count)\n", (2987, 3035), False, 'from django.utils.translation import ungettext_lazy\n'), ((3144, 3206), 'django.utils.translation.ungettext_lazy', 'ungettext_lazy', (['u"""Terminate VNFFG"""', 'u"""Terminate VNFFGs"""', 'count'], {}), "(u'Terminate VNFFG', u'Terminate VNFFGs', count)\n", (3158, 3206), False, 'from django.utils.translation import ungettext_lazy\n'), ((3301, 3341), 'tacker_horizon.openstack_dashboard.api.tacker.delete_vnffg', 'api.tacker.delete_vnffg', (['request', 'obj_id'], {}), '(request, obj_id)\n', (3324, 3341), False, 'from tacker_horizon.openstack_dashboard import api\n'), ((8256, 8273), 'django.utils.translation.ugettext_lazy', '_', (['"""VNFFGManager"""'], {}), "('VNFFGManager')\n", (8257, 8273), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1873, 1912), 'tacker_horizon.openstack_dashboard.api.tacker.get_vnffg', 'api.tacker.get_vnffg', (['request', 'vnffg_id'], {}), '(request, vnffg_id)\n', (1893, 1912), False, 'from tacker_horizon.openstack_dashboard import api\n'), ((3733, 3794), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Init In Progress"""'], {}), "('current status of stack', u'Init In Progress')\n", (3746, 3794), False, 'from django.utils.translation import pgettext_lazy\n'), ((3866, 3924), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Init Complete"""'], {}), "('current status of stack', u'Init Complete')\n", (3879, 3924), False, 'from django.utils.translation import pgettext_lazy\n'), ((3991, 4047), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Init Failed"""'], {}), "('current status of stack', u'Init Failed')\n", (4004, 4047), False, 'from django.utils.translation import pgettext_lazy\n'), ((4119, 4182), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Create In Progress"""'], {}), "('current status of stack', u'Create In Progress')\n", (4132, 4182), False, 'from django.utils.translation import pgettext_lazy\n'), ((4258, 4318), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Create Complete"""'], {}), "('current status of stack', u'Create Complete')\n", (4271, 4318), False, 'from django.utils.translation import pgettext_lazy\n'), ((4389, 4447), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Create Failed"""'], {}), "('current status of stack', u'Create Failed')\n", (4402, 4447), False, 'from django.utils.translation import pgettext_lazy\n'), ((4521, 4584), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Delete In Progress"""'], {}), "('current status of stack', u'Delete In Progress')\n", (4534, 4584), False, 'from django.utils.translation import pgettext_lazy\n'), ((4660, 4720), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Delete Complete"""'], {}), "('current status of stack', u'Delete Complete')\n", (4673, 4720), False, 'from django.utils.translation import pgettext_lazy\n'), ((4791, 4849), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Delete Failed"""'], {}), "('current status of stack', u'Delete Failed')\n", (4804, 4849), False, 'from django.utils.translation import pgettext_lazy\n'), ((4923, 4986), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Update In Progress"""'], {}), "('current status of stack', u'Update In Progress')\n", (4936, 4986), False, 'from django.utils.translation import pgettext_lazy\n'), ((5062, 5122), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Update Complete"""'], {}), "('current status of stack', u'Update Complete')\n", (5075, 5122), False, 'from django.utils.translation import pgettext_lazy\n'), ((5193, 5251), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Update Failed"""'], {}), "('current status of stack', u'Update Failed')\n", (5206, 5251), False, 'from django.utils.translation import pgettext_lazy\n'), ((5327, 5392), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Rollback In Progress"""'], {}), "('current status of stack', u'Rollback In Progress')\n", (5340, 5392), False, 'from django.utils.translation import pgettext_lazy\n'), ((5472, 5534), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Rollback Complete"""'], {}), "('current status of stack', u'Rollback Complete')\n", (5485, 5534), False, 'from django.utils.translation import pgettext_lazy\n'), ((5609, 5669), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Rollback Failed"""'], {}), "('current status of stack', u'Rollback Failed')\n", (5622, 5669), False, 'from django.utils.translation import pgettext_lazy\n'), ((5746, 5810), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Suspend In Progress"""'], {}), "('current status of stack', u'Suspend In Progress')\n", (5759, 5810), False, 'from django.utils.translation import pgettext_lazy\n'), ((5888, 5949), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Suspend Complete"""'], {}), "('current status of stack', u'Suspend Complete')\n", (5901, 5949), False, 'from django.utils.translation import pgettext_lazy\n'), ((6022, 6081), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Suspend Failed"""'], {}), "('current status of stack', u'Suspend Failed')\n", (6035, 6081), False, 'from django.utils.translation import pgettext_lazy\n'), ((6156, 6219), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Resume In Progress"""'], {}), "('current status of stack', u'Resume In Progress')\n", (6169, 6219), False, 'from django.utils.translation import pgettext_lazy\n'), ((6295, 6355), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Resume Complete"""'], {}), "('current status of stack', u'Resume Complete')\n", (6308, 6355), False, 'from django.utils.translation import pgettext_lazy\n'), ((6426, 6484), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Resume Failed"""'], {}), "('current status of stack', u'Resume Failed')\n", (6439, 6484), False, 'from django.utils.translation import pgettext_lazy\n'), ((6557, 6619), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Adopt In Progress"""'], {}), "('current status of stack', u'Adopt In Progress')\n", (6570, 6619), False, 'from django.utils.translation import pgettext_lazy\n'), ((6693, 6752), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Adopt Complete"""'], {}), "('current status of stack', u'Adopt Complete')\n", (6706, 6752), False, 'from django.utils.translation import pgettext_lazy\n'), ((6821, 6878), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Adopt Failed"""'], {}), "('current status of stack', u'Adopt Failed')\n", (6834, 6878), False, 'from django.utils.translation import pgettext_lazy\n'), ((6953, 7018), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Snapshot In Progress"""'], {}), "('current status of stack', u'Snapshot In Progress')\n", (6966, 7018), False, 'from django.utils.translation import pgettext_lazy\n'), ((7098, 7160), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Snapshot Complete"""'], {}), "('current status of stack', u'Snapshot Complete')\n", (7111, 7160), False, 'from django.utils.translation import pgettext_lazy\n'), ((7235, 7295), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Snapshot Failed"""'], {}), "('current status of stack', u'Snapshot Failed')\n", (7248, 7295), False, 'from django.utils.translation import pgettext_lazy\n'), ((7370, 7432), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Check In Progress"""'], {}), "('current status of stack', u'Check In Progress')\n", (7383, 7432), False, 'from django.utils.translation import pgettext_lazy\n'), ((7506, 7565), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Check Complete"""'], {}), "('current status of stack', u'Check Complete')\n", (7519, 7565), False, 'from django.utils.translation import pgettext_lazy\n'), ((7634, 7691), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""current status of stack"""', 'u"""Check Failed"""'], {}), "('current status of stack', u'Check Failed')\n", (7647, 7691), False, 'from django.utils.translation import pgettext_lazy\n'), ((7875, 7890), 'django.utils.translation.ugettext_lazy', '_', (['"""VNFFG Name"""'], {}), "('VNFFG Name')\n", (7876, 7890), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7984, 8000), 'django.utils.translation.ugettext_lazy', '_', (['"""Description"""'], {}), "('Description')\n", (7985, 8000), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2796, 2822), 'horizon.messages.error', 'messages.error', (['request', 'e'], {}), '(request, e)\n', (2810, 2822), False, 'from horizon import messages\n')]
|
import torch
import Config
class Buffer:
# Since the enviroment we use has multiple agents that work in parallel and PPO requires to store whole episodes in
# buffer so the advantage can be calculated, each agent will have separate episode buffer in which will store each
# step of only its episode. When episode for certain agent ends, whole episode buffer is inserted to the main buffer
def __init__(self, num_workers, state_shape, action_shape, episode_length):
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.buffer_index = 0
self.episode_length = episode_length
#------------------------------------------------- MAIN BUFFER -------------------------------------------------
self.states = torch.zeros(Config.batch_size, state_shape).to(self.device)
self.actions = torch.zeros(Config.batch_size, action_shape).to(self.device)
self.logprob = torch.zeros(Config.batch_size, action_shape).to(self.device)
self.rewards = torch.zeros(Config.batch_size).to(self.device)
self.new_states = torch.zeros(Config.batch_size, state_shape).to(self.device)
self.dones = torch.zeros(Config.batch_size).to(self.device)
#----------------------------------------------- EPISODE BUFFER ------------------------------------------------
self.states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device)
self.actions_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device)
self.logprob_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device)
self.rewards_episode = torch.zeros(num_workers, self.episode_length).to(self.device)
self.new_states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device)
self.dones_episode = torch.zeros(num_workers, self.episode_length).to(self.device)
self.episode_step = torch.zeros(num_workers, dtype=torch.long).to(self.device)
self.gt = torch.zeros(Config.batch_size + 1).to(self.device)
self.advantages = torch.zeros(Config.batch_size + 1).to(self.device)
self.full = False
def add_old(self, decision_steps, actions, logprob):
cnt = 0
actionsTensor = torch.Tensor(actions).to(self.device)
for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id):
self.states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs)
self.actions_episode[a_id, self.episode_step[a_id]] = actionsTensor[cnt]
self.logprob_episode[a_id, self.episode_step[a_id]] = logprob[cnt]
cnt += 1
def add(self, decision_steps, terminal_steps):
for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id):
if decision_steps.reward[a_id] == 0: # TERMINALNI JE KORAK, SKIPUJ OVO
continue
self.rewards_episode[a_id, self.episode_step[a_id]] = 0.1
self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs)
self.dones_episode[a_id, self.episode_step[a_id]] = 0
self.episode_step[a_id] += 1
for obs, a_id in zip(terminal_steps.obs[0], terminal_steps.agent_id):
self.rewards_episode[a_id, self.episode_step[a_id]] = -1
self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs)
self.dones_episode[a_id, self.episode_step[a_id]] = 1
self.episode_step[a_id] += 1
if not self.full:
last_index = min(self.buffer_index + self.episode_step[a_id], Config.batch_size)
self.states[self.buffer_index: last_index] = self.states_episode[a_id, : last_index - self.buffer_index]
self.actions[self.buffer_index: last_index] = self.actions_episode[a_id, : last_index - self.buffer_index]
self.logprob[self.buffer_index: last_index] = self.logprob_episode[a_id, : last_index - self.buffer_index]
self.rewards[self.buffer_index: last_index] = self.rewards_episode[a_id, : last_index - self.buffer_index]
self.new_states[self.buffer_index: last_index] = self.new_states_episode[a_id, : last_index - self.buffer_index]
self.dones[self.buffer_index: last_index] = self.dones_episode[a_id, : last_index - self.buffer_index]
self.buffer_index = last_index % Config.batch_size
if self.buffer_index == 0:
self.full = True
self.episode_step[a_id] = 0
def advantage(self, state_values, last_state_value):
self.full = False
gt = last_state_value
for i in reversed(range(Config.batch_size)):
gt = self.rewards[i] + Config.gamma * gt * (1 - self.dones[i])
self.gt[i] = gt
self.advantages[i] = gt - state_values[i]
def gae_advantage(self, state_values, new_state_values):
self.full = False
self.gt[Config.batch_size] = new_state_values[-1]
for i in reversed(range(Config.batch_size)):
delta = self.rewards[i] + Config.gamma * new_state_values[i] * (1 - self.dones[i]) - state_values[i]
self.advantages[i] = delta + Config.gae_lambda * Config.gamma * self.advantages[i+1] * (1 - self.dones[i])
# For critic
self.gt[i] = self.rewards[i] + Config.gamma * self.gt[i+1] * (1 - self.dones[i])
def reset(self, full=False):
if full:
self.buffer_index = 0
self.episode_step[self.episode_step != 0] = 0
|
[
"torch.zeros",
"torch.Tensor",
"torch.cuda.is_available",
"torch.from_numpy"
] |
[((524, 549), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (547, 549), False, 'import torch\n'), ((2530, 2551), 'torch.from_numpy', 'torch.from_numpy', (['obs'], {}), '(obs)\n', (2546, 2551), False, 'import torch\n'), ((3125, 3146), 'torch.from_numpy', 'torch.from_numpy', (['obs'], {}), '(obs)\n', (3141, 3146), False, 'import torch\n'), ((3477, 3498), 'torch.from_numpy', 'torch.from_numpy', (['obs'], {}), '(obs)\n', (3493, 3498), False, 'import torch\n'), ((783, 826), 'torch.zeros', 'torch.zeros', (['Config.batch_size', 'state_shape'], {}), '(Config.batch_size, state_shape)\n', (794, 826), False, 'import torch\n'), ((867, 911), 'torch.zeros', 'torch.zeros', (['Config.batch_size', 'action_shape'], {}), '(Config.batch_size, action_shape)\n', (878, 911), False, 'import torch\n'), ((952, 996), 'torch.zeros', 'torch.zeros', (['Config.batch_size', 'action_shape'], {}), '(Config.batch_size, action_shape)\n', (963, 996), False, 'import torch\n'), ((1037, 1067), 'torch.zeros', 'torch.zeros', (['Config.batch_size'], {}), '(Config.batch_size)\n', (1048, 1067), False, 'import torch\n'), ((1111, 1154), 'torch.zeros', 'torch.zeros', (['Config.batch_size', 'state_shape'], {}), '(Config.batch_size, state_shape)\n', (1122, 1154), False, 'import torch\n'), ((1193, 1223), 'torch.zeros', 'torch.zeros', (['Config.batch_size'], {}), '(Config.batch_size)\n', (1204, 1223), False, 'import torch\n'), ((1393, 1451), 'torch.zeros', 'torch.zeros', (['num_workers', 'self.episode_length', 'state_shape'], {}), '(num_workers, self.episode_length, state_shape)\n', (1404, 1451), False, 'import torch\n'), ((1500, 1559), 'torch.zeros', 'torch.zeros', (['num_workers', 'self.episode_length', 'action_shape'], {}), '(num_workers, self.episode_length, action_shape)\n', (1511, 1559), False, 'import torch\n'), ((1608, 1667), 'torch.zeros', 'torch.zeros', (['num_workers', 'self.episode_length', 'action_shape'], {}), '(num_workers, self.episode_length, action_shape)\n', (1619, 1667), False, 'import torch\n'), ((1716, 1761), 'torch.zeros', 'torch.zeros', (['num_workers', 'self.episode_length'], {}), '(num_workers, self.episode_length)\n', (1727, 1761), False, 'import torch\n'), ((1813, 1871), 'torch.zeros', 'torch.zeros', (['num_workers', 'self.episode_length', 'state_shape'], {}), '(num_workers, self.episode_length, state_shape)\n', (1824, 1871), False, 'import torch\n'), ((1918, 1963), 'torch.zeros', 'torch.zeros', (['num_workers', 'self.episode_length'], {}), '(num_workers, self.episode_length)\n', (1929, 1963), False, 'import torch\n'), ((2009, 2051), 'torch.zeros', 'torch.zeros', (['num_workers'], {'dtype': 'torch.long'}), '(num_workers, dtype=torch.long)\n', (2020, 2051), False, 'import torch\n'), ((2089, 2123), 'torch.zeros', 'torch.zeros', (['(Config.batch_size + 1)'], {}), '(Config.batch_size + 1)\n', (2100, 2123), False, 'import torch\n'), ((2167, 2201), 'torch.zeros', 'torch.zeros', (['(Config.batch_size + 1)'], {}), '(Config.batch_size + 1)\n', (2178, 2201), False, 'import torch\n'), ((2347, 2368), 'torch.Tensor', 'torch.Tensor', (['actions'], {}), '(actions)\n', (2359, 2368), False, 'import torch\n')]
|
import os
from concurrent.futures import ProcessPoolExecutor
import itertools
import yaml
import sys
import copy
import numpy as np
import pandas as pd
from lib.constants import *
from lib.utils import *
TOP_N = 15
loader = yaml.SafeLoader
loader.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(u'''^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
parser = argparse.ArgumentParser()
parser.add_argument('--config_file','-c',
default="config.yaml",
type=str,
help="Configuration file.")
args = parser.parse_args()
f = open(args.config_file)
config = yaml.load(f,Loader=loader)
to_search = {
'pheromony_policies': {'AntSystem':{"rho": [0.3,0.5,0.7],
"Q": [75, 100, 125]}},
"selection":{"beta": [3,5,7]},
'parameters':{
# "instance_name": ['lau15','sgb128'],
"eid": list(range(1,NUM_EXECUTIONS+1))},
}
# parameters_names=['rho','Q','betas','eid']
keys_to_value, combinations=utils.get_names_combinations(config,to_search)
result_df = pd.DataFrame(columns=
[keys[-1] for keys in keys_to_value])
parameters_names = [i[-1] for i in keys_to_value]
i = 0
for combination in combinations:
for keys, v in zip(keys_to_value,combination):
tmp = config
for k in keys[:-1]:
tmp = tmp[k]
tmp[keys[-1]] = v
result_df.loc[i,keys[-1]] = v
ac = AntColony(pheromony_kwargs=config['pheromony_policies'][config['parameters']['pheromony_policy']],
selection_policy_kwargs=config['selection'],
**config['parameters'])
df = ac.load_results()
result_df.loc[i,parameters_names] = combination
result_df.loc[i,'Best fitness global'] = df.iloc[-1]['Best fitness global']
result_df.loc[i,'Best fitness'] = df.iloc[-1]['Best fitness']
result_df.loc[i,'Mean fitness'] = df.iloc[-1]['Mean fitness']
result_df.loc[i,'Median fitness'] = df.iloc[-1]['Median fitness']
result_df.loc[i,'Worst fitness'] = df.iloc[-1]['Worst fitness']
i += 1
result_df['eid']=pd.to_numeric(result_df['eid'])
# print('Top best fitness')
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(result_df)
pd.set_option('display.expand_frame_repr', False)
tmp = copy.copy(parameters_names)
tmp.remove('eid')
a=result_df.groupby(list(set(result_df.columns)-{'Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'})).\
agg({i: ['mean','std'] for i in {'Best fitness global', 'Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'}}).\
sort_values(by=[('Best fitness global','mean')],ascending=True).reset_index()[tmp+['Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness',]].head(TOP_N)
open(f"../doc/{config['parameters']['instance_name']}_output.tex",'w').write(a.to_latex())
# print('Top mean fitness')
# print(result_df.groupby(list(set(result_df.columns)-{'Best fitness','Mean fitness', 'eid'})).\
# agg({i: ['mean','median','std'] for i in {'Best fitness','Mean fitness', 'eid'}}).\
# sort_values(by=[('Mean fitness','mean')],ascending=True).reset_index()[list(set(to_update.keys())-{'eid'})+['Best fitness','Mean fitness']].head(TOP_N))
|
[
"pandas.DataFrame",
"yaml.load",
"pandas.option_context",
"copy.copy",
"pandas.set_option",
"pandas.to_numeric"
] |
[((895, 922), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'loader'}), '(f, Loader=loader)\n', (904, 922), False, 'import yaml\n'), ((1358, 1416), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': '[keys[-1] for keys in keys_to_value]'}), '(columns=[keys[-1] for keys in keys_to_value])\n', (1370, 1416), True, 'import pandas as pd\n'), ((2405, 2436), 'pandas.to_numeric', 'pd.to_numeric', (["result_df['eid']"], {}), "(result_df['eid'])\n", (2418, 2436), True, 'import pandas as pd\n'), ((2470, 2542), 'pandas.option_context', 'pd.option_context', (['"""display.max_rows"""', 'None', '"""display.max_columns"""', 'None'], {}), "('display.max_rows', None, 'display.max_columns', None)\n", (2487, 2542), True, 'import pandas as pd\n'), ((2571, 2620), 'pandas.set_option', 'pd.set_option', (['"""display.expand_frame_repr"""', '(False)'], {}), "('display.expand_frame_repr', False)\n", (2584, 2620), True, 'import pandas as pd\n'), ((2631, 2658), 'copy.copy', 'copy.copy', (['parameters_names'], {}), '(parameters_names)\n', (2640, 2658), False, 'import copy\n')]
|
from Main_algorithm_GCN.CR_MGC import CR_MGC
from Configurations import *
import matplotlib.pyplot as plt
from copy import deepcopy
from torch.optim import Adam
import Utils
# the range of the number of remained UAVs
meta_type = [i for i in range(2, 201)]
print("Meta Learning Starts...")
print("-----------------------------------")
for mt in meta_type:
meta_cr_gcm_n = CR_MGC()
# list of tuples [('', ...), ('',...)]
meta_params = dict(meta_cr_gcm_n.gcn_network.named_parameters())
# param name list
param_name = meta_cr_gcm_n.gcn_network.state_dict().keys()
# meta training
num_remain = mt
meta_seed = 0
loss_list = []
for epi in range(config_meta_training_epi):
# create the training gcn
training_cr_gcm_n = CR_MGC()
training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.001)
# decrease the learning rate as the meta learning moves on
if epi > 100:
training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.0001)
if epi > 250:
training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.00001)
# generate the support set of the training task
meta_training_support = np.zeros((num_remain, 3))
while True:
meta_training_support[:, 0] = np.random.rand(num_remain) * config_width
meta_training_support[:, 1] = np.random.rand(num_remain) * config_length
meta_training_support[:, 2] = np.random.rand(num_remain) * config_height
meta_seed += 1
np.random.seed(meta_seed)
cf, nc = Utils.check_if_a_connected_graph(meta_training_support, num_remain)
if not cf:
# print(cf)
break
# endow the initial values of the GCN with the meta parameter
for key in training_cr_gcm_n.gcn_network.state_dict().keys():
training_cr_gcm_n.gcn_network.state_dict()[key].copy_(meta_params[key].data)
# train the network on the support set
training_cr_gcm_n.train_support_set_single(meta_training_support, num_remain)
# generate the query set of the training task
meta_training_query = np.zeros((num_remain, 3))
while True:
meta_training_query[:, 0] = np.random.rand(num_remain) * config_width
meta_training_query[:, 1] = np.random.rand(num_remain) * config_length
meta_training_query[:, 2] = np.random.rand(num_remain) * config_height
meta_seed += 1
np.random.seed(meta_seed)
cf, nc = Utils.check_if_a_connected_graph(meta_training_query, num_remain)
if not cf:
# print(cf)
break
# train on the query set and return the gradient
gradient, loss = training_cr_gcm_n.train_query_set_single(meta_training_query, num_remain)
print("%d episode %d remain UAVs -- destroy %d UAVs -- loss %f" % (
epi, num_remain, config_num_of_agents - num_remain, loss))
loss_list.append(deepcopy(loss))
# update the meta parameter
for key in param_name:
meta_params[key].data += gradient[key].data
if epi >= 1:
x_axis = [i for i in range(epi + 1)]
fig = plt.figure()
plt.plot(x_axis, loss_list, linewidth=2.0)
plt.xlim((0, epi + 1))
plt.ylim((0, 1400))
plt.savefig('Meta_Learning_Results/meta_loss_pic/meta_%d.png' % num_remain)
plt.close()
# plt.show()
for key in meta_params.keys():
meta_params[key] = meta_params[key].cpu().data.numpy()
np.save('Meta_Learning_Results/meta_parameters/meta_%d.npy' % num_remain, meta_params)
|
[
"matplotlib.pyplot.xlim",
"copy.deepcopy",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"Utils.check_if_a_connected_graph",
"matplotlib.pyplot.close",
"Main_algorithm_GCN.CR_MGC.CR_MGC",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig"
] |
[((378, 386), 'Main_algorithm_GCN.CR_MGC.CR_MGC', 'CR_MGC', ([], {}), '()\n', (384, 386), False, 'from Main_algorithm_GCN.CR_MGC import CR_MGC\n'), ((772, 780), 'Main_algorithm_GCN.CR_MGC.CR_MGC', 'CR_MGC', ([], {}), '()\n', (778, 780), False, 'from Main_algorithm_GCN.CR_MGC import CR_MGC\n'), ((1669, 1736), 'Utils.check_if_a_connected_graph', 'Utils.check_if_a_connected_graph', (['meta_training_support', 'num_remain'], {}), '(meta_training_support, num_remain)\n', (1701, 1736), False, 'import Utils\n'), ((2644, 2709), 'Utils.check_if_a_connected_graph', 'Utils.check_if_a_connected_graph', (['meta_training_query', 'num_remain'], {}), '(meta_training_query, num_remain)\n', (2676, 2709), False, 'import Utils\n'), ((3115, 3129), 'copy.deepcopy', 'deepcopy', (['loss'], {}), '(loss)\n', (3123, 3129), False, 'from copy import deepcopy\n'), ((3342, 3354), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3352, 3354), True, 'import matplotlib.pyplot as plt\n'), ((3367, 3409), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'loss_list'], {'linewidth': '(2.0)'}), '(x_axis, loss_list, linewidth=2.0)\n', (3375, 3409), True, 'import matplotlib.pyplot as plt\n'), ((3422, 3444), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, epi + 1)'], {}), '((0, epi + 1))\n', (3430, 3444), True, 'import matplotlib.pyplot as plt\n'), ((3457, 3476), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1400)'], {}), '((0, 1400))\n', (3465, 3476), True, 'import matplotlib.pyplot as plt\n'), ((3489, 3564), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Meta_Learning_Results/meta_loss_pic/meta_%d.png' % num_remain)"], {}), "('Meta_Learning_Results/meta_loss_pic/meta_%d.png' % num_remain)\n", (3500, 3564), True, 'import matplotlib.pyplot as plt\n'), ((3577, 3588), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3586, 3588), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python
__author__ = '<NAME>'
import os
import argparse
import yaml
from collections import namedtuple
import logging
from wielder.util.arguer import LogLevel, convert_log_level
from wielder.util.log_util import setup_logging
class Conf:
def __init__(self):
self.template_ignore_dirs = []
def attr_list(self, should_print=False):
items = self.__dict__.items()
if should_print:
logging.debug("Conf items:\n______\n")
[logging.debug(f"attribute: {k} value: {v}") for k, v in items]
return items
def get_datalake_parser():
parser = argparse.ArgumentParser(description=
'Data Orchestration Reactive Framework.')
parser.add_argument(
'-cf', '--conf_file',
type=str,
help='Full path to config file with all arguments.\nCommandline args override those in the file.'
)
parser.add_argument(
'-pl', '--plan',
type=bool,
default=False,
help='plan means to create template instances/files but not deploy them e.g. conf.yml.tmpl => conf.yml.'
)
parser.add_argument(
'-e', '--env',
type=str,
default='qe',
help='Deployment environment local means dev refers to git branches ...'
)
parser.add_argument(
'-re', '--runtime_env',
type=str,
default='local-docker',
help='Runtime environment eg local-docker, local, gcp, gcp-shared-vpc etc...'
)
parser.add_argument(
'-cpr', '--cloud_provider',
type=str,
choices=['gcp', 'aws', 'azure'],
help='Cloud provider will only mean something if not local:'
)
parser.add_argument(
'-edb', '--enable_debug',
type=bool,
help='Enabling Debug ports for remote debugging:'
)
parser.add_argument(
'-ll', '--log_level',
type=LogLevel,
choices=list(LogLevel),
help='LogLevel: as in Python logging',
default=LogLevel.INFO
)
return parser
def extract_gcp_to_conf(conf):
raw = conf.raw_config_args['gcp']
gcp = Conf()
gcp.gcp_project = raw['project']
gcp.gcp_image_repo_zone = raw['image_repo_zone']
gcp.is_shared_vpc = raw['is_shared_vpc']
gcp.region = raw['region']
gcp.zone = raw['zone']
gcp.image_repo_zone = raw['image_repo_zone']
gcp.service_accounts = raw['service_accounts']
gcp.network = raw['network']
gcp.subnetwork = raw['subnetwork']
conf.gcp = gcp
gcp_services = raw['services']
if 'dataproc' in gcp_services:
raw_dataproc = gcp_services['dataproc']
dataproc = Conf()
dataproc.high_availability = raw_dataproc['high_availability']
dataproc.extra_tags = raw_dataproc['extra_tags']
dataproc.region = raw_dataproc['region']
dataproc.zone = raw_dataproc['zone']
dataproc.internal_ip_only = raw_dataproc['internal_ip_only']
dataproc.master_machine_type = raw_dataproc['master_machine_type']
dataproc.worker_machine_type = raw_dataproc['worker_machine_type']
dataproc.master_boot_disk_size = raw_dataproc['master_boot_disk_size']
dataproc.worker_boot_disk_size = raw_dataproc['worker_boot_disk_size']
dataproc.num_worker_nodes = raw_dataproc['num_worker_nodes']
conf.gcp.dataproc = dataproc
def process_args(cmd_args):
if cmd_args.conf_file is None:
dir_path = os.path.dirname(os.path.realpath(__file__))
cmd_args.conf_file = dir_path + '/data_conf.yaml'
log_level = convert_log_level(cmd_args.log_level)
logging.basicConfig(
format='%(asctime)s %(levelname)s :%(message)s',
level=log_level,
datefmt='%m/%d/%Y %I:%M:%S %p'
)
with open(cmd_args.conf_file, 'r') as yaml_file:
conf_args = yaml.load(yaml_file, Loader=yaml.FullLoader)
if not hasattr(conf_args, 'plan'):
conf_args['plan'] = False
logging.debug('Configuration File Arguments:')
config_items = cmd_args.__dict__.items()
for k, v in config_items:
if v is not None:
conf_args[k] = v
named_tuple = namedtuple("Conf1", conf_args.keys())(*conf_args.values())
conf = Conf()
conf.plan = named_tuple.plan
conf.conf_file = named_tuple.conf_file
conf.deploy_env = named_tuple.deploy_env
conf.enable_debug = named_tuple.enable_debug
conf.enable_dev = named_tuple.enable_dev
conf.deploy_strategy = named_tuple.deploy_strategy
conf.supported_deploy_envs = named_tuple.supported_deploy_envs
conf.cloud_provider = named_tuple.cloud_provider
conf.template_ignore_dirs = named_tuple.template_ignore_dirs
conf.template_variables = named_tuple.template_variables
conf.script_variables = named_tuple.script_variables
conf.git_super_repo = named_tuple.git_super_repo
conf.git_branch = named_tuple.git_branch
conf.git_commit = named_tuple.git_commit
conf.raw_config_args = conf_args
if conf.cloud_provider == 'gcp':
extract_gcp_to_conf(conf)
conf.attr_list(True)
return conf
if __name__ == "__main__":
setup_logging(log_level=logging.DEBUG)
datalake_args, other_args = get_datalake_parser().parse_known_args()
_conf = process_args(datalake_args)
logging.debug('break point')
logging.info(f"datalake_args:\n{datalake_args}\n")
logging.info(f"other_args:\n{other_args}")
|
[
"yaml.load",
"logging.debug",
"argparse.ArgumentParser",
"logging.basicConfig",
"os.path.realpath",
"logging.info",
"wielder.util.log_util.setup_logging",
"wielder.util.arguer.convert_log_level"
] |
[((628, 705), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Data Orchestration Reactive Framework."""'}), "(description='Data Orchestration Reactive Framework.')\n", (651, 705), False, 'import argparse\n'), ((3613, 3650), 'wielder.util.arguer.convert_log_level', 'convert_log_level', (['cmd_args.log_level'], {}), '(cmd_args.log_level)\n', (3630, 3650), False, 'from wielder.util.arguer import LogLevel, convert_log_level\n'), ((3656, 3778), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s :%(message)s"""', 'level': 'log_level', 'datefmt': '"""%m/%d/%Y %I:%M:%S %p"""'}), "(format='%(asctime)s %(levelname)s :%(message)s', level=\n log_level, datefmt='%m/%d/%Y %I:%M:%S %p')\n", (3675, 3778), False, 'import logging\n'), ((4002, 4048), 'logging.debug', 'logging.debug', (['"""Configuration File Arguments:"""'], {}), "('Configuration File Arguments:')\n", (4015, 4048), False, 'import logging\n'), ((5185, 5223), 'wielder.util.log_util.setup_logging', 'setup_logging', ([], {'log_level': 'logging.DEBUG'}), '(log_level=logging.DEBUG)\n', (5198, 5223), False, 'from wielder.util.log_util import setup_logging\n'), ((5344, 5372), 'logging.debug', 'logging.debug', (['"""break point"""'], {}), "('break point')\n", (5357, 5372), False, 'import logging\n'), ((5378, 5430), 'logging.info', 'logging.info', (['f"""datalake_args:\n{datalake_args}\n"""'], {}), '(f"""datalake_args:\n{datalake_args}\n""")\n', (5390, 5430), False, 'import logging\n'), ((5433, 5478), 'logging.info', 'logging.info', (['f"""other_args:\n{other_args}"""'], {}), '(f"""other_args:\n{other_args}""")\n', (5445, 5478), False, 'import logging\n'), ((3878, 3922), 'yaml.load', 'yaml.load', (['yaml_file'], {'Loader': 'yaml.FullLoader'}), '(yaml_file, Loader=yaml.FullLoader)\n', (3887, 3922), False, 'import yaml\n'), ((445, 485), 'logging.debug', 'logging.debug', (['"""Conf items:\n______\n"""'], {}), '("""Conf items:\n______\n""")\n', (458, 485), False, 'import logging\n'), ((3509, 3535), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3525, 3535), False, 'import os\n'), ((497, 543), 'logging.debug', 'logging.debug', (['f"""attribute: {k} value: {v}"""'], {}), "(f'attribute: {k} value: {v}')\n", (510, 543), False, 'import logging\n')]
|
"""Kernels for advecting particles in Parcels"""
from parcels import (JITParticle, Variable)
import numpy as np
class unbeachableBoundedParticle(JITParticle):
# Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts
# beached : 0 sea, 1 beached, 2 after non-beach dyn, 3 after beach dyn, 4 please unbeach
beached = Variable('beached', dtype=np.int32, initial=0.)
unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.)
# inBounds : 1 yes, 0 no
inBounds = Variable('inBounds', dtype=np.int32, initial=1.)
class unbeachableParticle(JITParticle):
# Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts
# beached : 0 sea, 1 beached, 2 after non-beach dyn, 3 after beach dyn, 4 please unbeach
beached = Variable('beached', dtype=np.int32, initial=0.)
unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.)
class boundedParticle(JITParticle):
# inBounds : 1 yes, 0 no
inBounds = Variable('inBounds', dtype=np.int32, initial=1.)
# Kernels for circular boundary
def wrapLon(particle, fieldset, time):
if particle.lon > 180.:
particle.lon = particle.lon - 360.
if particle.lon < -180.:
particle.lon = particle.lon + 360.
def northPolePushBack(particle, fieldset, time):
if particle.lat > 89.915:
particle.lat = 89.915
# Freeze particles that get out of bounds
def freezeOutOfBoundsWedge(particle, fieldset, time):
lon, lat = (particle.lon, particle.lat)
if lon > 65. or lon < -45. or lat > 85. or lat < 60.:
particle.inBounds = 0
# Freeze particles that get out of bounds
def freezeOutOfBoundsArctic(particle, fieldset, time):
lon, lat = (particle.lon, particle.lat)
if lat < 60.:
particle.inBounds = 0
def freezeOutOfBoundsArctic65(particle, fieldset, time):
lon, lat = (particle.lon, particle.lat)
if lat < 65.:
particle.inBounds = 0
def freezeOutOfBoundsArctic70(particle, fieldset, time):
lon, lat = (particle.lon, particle.lat)
if lat < 70.:
particle.inBounds = 0
# Advection kernel. Checks first whether a particle is within bounds and whether it is not beached.
def UnbeachBoundedAdvectionRK4(particle, fieldset, time):
if particle.inBounds == 1:
if particle.beached == 0:
(u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon]
lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt)
(u2, v2) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat1, lon1]
lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt)
(u3, v3) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat2, lon2]
lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt)
(u4, v4) = fieldset.UV[time + particle.dt, particle.depth, lat3, lon3]
particle.lon += (u1 + 2*u2 + 2*u3 + u4) / 6. * particle.dt
particle.lat += (v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt
particle.beached = 2
def UnbeachAdvectionRK4(particle, fieldset, time):
if particle.beached == 0:
(u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon]
lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt)
(u2, v2) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat1, lon1]
lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt)
(u3, v3) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat2, lon2]
lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt)
(u4, v4) = fieldset.UV[time + particle.dt, particle.depth, lat3, lon3]
particle.lon += (u1 + 2*u2 + 2*u3 + u4) / 6. * particle.dt
particle.lat += (v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt
particle.beached = 2
def BoundedAdvectionRK4(particle, fieldset, time):
if particle.inBounds == 1:
(u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon]
lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt)
(u2, v2) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat1, lon1]
lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt)
(u3, v3) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat2, lon2]
lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt)
(u4, v4) = fieldset.UV[time + particle.dt, particle.depth, lat3, lon3]
particle.lon += (u1 + 2*u2 + 2*u3 + u4) / 6. * particle.dt
particle.lat += (v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt
def deleteParticle(particle, fieldset, time):
print(f"Particle {particle.id} deleted: ({particle.lon}, {particle.lat} at {particle.time})")
particle.delete()
# Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts
def beachTesting(particle, fieldset, time):
if particle.beached == 2 or particle.beached == 3:
(u, v) = fieldset.UV[time, particle.depth, particle.lat, particle.lon]
if fabs(u) < 1e-14 and fabs(v) < 1e-14:
if particle.beached == 2:
particle.beached = 4
else:
particle.beached = 1
else:
particle.beached = 0
# Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts
def unBeaching(particle, fieldset, time):
if particle.beached == 4:
(ub, vb) = fieldset.UVunbeach[time, particle.depth, particle.lat, particle.lon]
particle.lon += ub * particle.dt
particle.lat += vb * particle.dt
particle.beached = 0
particle.unbeachCount += 1
|
[
"parcels.Variable"
] |
[((380, 428), 'parcels.Variable', 'Variable', (['"""beached"""'], {'dtype': 'np.int32', 'initial': '(0.0)'}), "('beached', dtype=np.int32, initial=0.0)\n", (388, 428), False, 'from parcels import JITParticle, Variable\n'), ((451, 504), 'parcels.Variable', 'Variable', (['"""unbeachCount"""'], {'dtype': 'np.int32', 'initial': '(0.0)'}), "('unbeachCount', dtype=np.int32, initial=0.0)\n", (459, 504), False, 'from parcels import JITParticle, Variable\n'), ((556, 605), 'parcels.Variable', 'Variable', (['"""inBounds"""'], {'dtype': 'np.int32', 'initial': '(1.0)'}), "('inBounds', dtype=np.int32, initial=1.0)\n", (564, 605), False, 'from parcels import JITParticle, Variable\n'), ((866, 914), 'parcels.Variable', 'Variable', (['"""beached"""'], {'dtype': 'np.int32', 'initial': '(0.0)'}), "('beached', dtype=np.int32, initial=0.0)\n", (874, 914), False, 'from parcels import JITParticle, Variable\n'), ((937, 990), 'parcels.Variable', 'Variable', (['"""unbeachCount"""'], {'dtype': 'np.int32', 'initial': '(0.0)'}), "('unbeachCount', dtype=np.int32, initial=0.0)\n", (945, 990), False, 'from parcels import JITParticle, Variable\n'), ((1079, 1128), 'parcels.Variable', 'Variable', (['"""inBounds"""'], {'dtype': 'np.int32', 'initial': '(1.0)'}), "('inBounds', dtype=np.int32, initial=1.0)\n", (1087, 1128), False, 'from parcels import JITParticle, Variable\n')]
|
import click
import configparser
import hmac
import os
from flask import Flask
from flask import request
from flask import render_template
from .ghia_patterns import GhiaPatterns
from .ghia_requests import GhiaRequests
from .ghia_issue import Issue
BAD_REQUEST = 400
ALLOWED_ACTIONS = ["opened", "edited", "transferred", "reopened", "assigned", "unassigned", "labeled", "unlabeled"]
def prepare_app():
env_conf = os.getenv('GHIA_CONFIG')
if env_conf is None:
raise click.BadParameter("GHIA_CONFIG is missing from the environment.")
conf_paths = env_conf.split(":")
config_content = ""
for path in conf_paths:
with open(path, 'r') as file:
config_content += file.read() + "\n"
config = configparser.ConfigParser()
config.optionxform = str # maintain case sensitivity in keys
config.read_string(config_content)
if "github" not in config or "token" not in config["github"]:
raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR)
if "patterns" not in config:
raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR)
if "secret" not in config["github"]:
secret = None
else:
secret = config["github"]["secret"]
token = config["github"]["token"]
return token, secret, config
def prepare_app_test(conf):
session = conf["session"]
config = conf["config"]
token = conf["TOKEN"]
secret = conf["SECRET"]
return token, secret, config, session
def create_app(conf):
app = Flask(__name__)
if conf and "test" in conf and conf["test"]:
token, secret, config, session = prepare_app_test(conf)
else:
token, secret, config = prepare_app()
session = None
ghia_patterns = GhiaPatterns(config)
ghia_patterns.set_strategy('append')
req = GhiaRequests(token, session=session)
user = req.get_user()
def github_verify_request():
github_signed = request.headers.get('X-Hub-Signature')
if github_signed is None and secret is None:
# Signature check is skipped only if the secret is missing in the ghia-config and in the webhook config
return True
elif github_signed is None or secret is None:
# GitHub request has signature but ghia-config is missing the secret
# or ghia-config has secret but webhook doesn't send signed request
raise ValueError("Signature verification failed.")
try:
hash_name, hash_value = github_signed.split('=', maxsplit=2)
except ValueError:
raise ValueError("Signature header has incorrect format.")
if hash_name != 'sha1':
raise ValueError("GitHub signatures are expected to use SHA1.")
computed_hash = hmac.new(
bytearray(secret, "utf-8"), # get the secret as bytes
digestmod='sha1',
msg=request.get_data()
)
if computed_hash.hexdigest() != hash_value:
raise RuntimeError("The request signature is wrong.")
def process_issues():
data = request.get_json(silent=True)
if data is None:
return "Webhook request missing JSON data.", BAD_REQUEST
if data["issue"]["state"] == "closed":
return "Closed issue is ignored."
action = data["action"]
if action not in ALLOWED_ACTIONS:
return "This issue action is ignored."
issue = Issue(data["issue"])
req.slug = data["repository"]["full_name"]
updated_issue = ghia_patterns.apply_to(issue)
if updated_issue:
req.update_issue(updated_issue)
return "Issue update done."
def process_webhook():
event_type = request.headers.get('X-Github-Event')
try:
github_verify_request()
except RuntimeError as e:
return str(e), BAD_REQUEST
if event_type == "issues":
return process_issues()
elif event_type == "ping":
return "Ping OK"
else:
return "Event type ignored."
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
return process_webhook()
return render_template('index.html', user=user, patterns=ghia_patterns)
return app
|
[
"click.BadParameter",
"flask.request.headers.get",
"flask.Flask",
"flask.request.get_json",
"flask.request.get_data",
"flask.render_template",
"configparser.ConfigParser",
"os.getenv"
] |
[((420, 444), 'os.getenv', 'os.getenv', (['"""GHIA_CONFIG"""'], {}), "('GHIA_CONFIG')\n", (429, 444), False, 'import os\n'), ((742, 769), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (767, 769), False, 'import configparser\n'), ((1522, 1537), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1527, 1537), False, 'from flask import Flask\n'), ((484, 550), 'click.BadParameter', 'click.BadParameter', (['"""GHIA_CONFIG is missing from the environment."""'], {}), "('GHIA_CONFIG is missing from the environment.')\n", (502, 550), False, 'import click\n'), ((956, 1010), 'click.BadParameter', 'click.BadParameter', (['GhiaPatterns.CONFIG_VALIDATION_ERR'], {}), '(GhiaPatterns.CONFIG_VALIDATION_ERR)\n', (974, 1010), False, 'import click\n'), ((1059, 1113), 'click.BadParameter', 'click.BadParameter', (['GhiaPatterns.CONFIG_VALIDATION_ERR'], {}), '(GhiaPatterns.CONFIG_VALIDATION_ERR)\n', (1077, 1113), False, 'import click\n'), ((1946, 1984), 'flask.request.headers.get', 'request.headers.get', (['"""X-Hub-Signature"""'], {}), "('X-Hub-Signature')\n", (1965, 1984), False, 'from flask import request\n'), ((3090, 3119), 'flask.request.get_json', 'request.get_json', ([], {'silent': '(True)'}), '(silent=True)\n', (3106, 3119), False, 'from flask import request\n'), ((3733, 3770), 'flask.request.headers.get', 'request.headers.get', (['"""X-Github-Event"""'], {}), "('X-Github-Event')\n", (3752, 3770), False, 'from flask import request\n'), ((4239, 4303), 'flask.render_template', 'render_template', (['"""index.html"""'], {'user': 'user', 'patterns': 'ghia_patterns'}), "('index.html', user=user, patterns=ghia_patterns)\n", (4254, 4303), False, 'from flask import render_template\n'), ((2900, 2918), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (2916, 2918), False, 'from flask import request\n')]
|
import numpy as np
from datetime import datetime
from astropy.io import ascii
from astropy.time import Time
from argparse import ArgumentParser
from antares_client.search import search, download
def build_query(ra0, dec0, fov, date):
"""Generate a query (a Python dictionary) to submit to the ANTARES client.
Parameters
----------
ra0 : float or None
Central RA for object search, in deg.
dec0 : float or None
Central declination for object search, in deg.
fov : float
Side length of box for search, in deg.
date : str
Start date for search; format is YYYY-MM-DD.
Returns
-------
query : dict
An ElasticSearch dictionary.
"""
# Build up the query.
query = { 'query': { 'bool': { 'must': [] } } }
# desi_candidate_test data stream:
# snfilter_last_proc_status should have a string like "Locus has two or
# more detections and is in DESI brightness range. Triggering."
query['query']['bool']['must'].append(
{ 'match':{ 'properties.snfilter_last_proc_status': '*DESI*' } })
# Set up the declination search.
if dec0 is not None:
ddec = 0.5 * fov
# dra / cos(dec) ensures an equal-area search rectangle.
dra = 0.5*fov / np.cos(np.radians(dec0))
query['query']['bool']['must'].append(
{'range': {'dec':{ 'gte':dec0-ddec, 'lte':dec0+ddec, } } })
else:
dra = 0.5*fov
# Set up the RA search.
if ra0 is not None:
query['query']['bool']['must'].append(
{'range': {'ra':{ 'gte':(ra0-dra)%360., 'lte':(ra0+dra)%360., } } })
# Set up the cumulative date search.
if date is not None:
tobs = Time(date).mjd
query['query']['bool']['must'].append(
{'range': {'mjd':{ 'gte':tobs, } } })
return query
if __name__ == '__main__':
today = datetime.today()
parser = ArgumentParser(description='Client API to query ANTARES alert DB')
parser.add_argument('--ra', default=None, type=float,
help='RA (J2000), in deg')
parser.add_argument('--dec', default=None, type=float,
help='Dec (J2000), in deg')
parser.add_argument('--tobs', default=datetime.today().strftime('%Y-%m-%d'),
help='Obs date [YYYY-MM-DD]')
args = parser.parse_args()
# Create query dict for ANTARES stream search.
query = build_query(ra0=args.ra, dec0=args.dec, fov=3.2, date=args.tobs)
print(query)
#result_set = search(query)
#print(result_set)
outfile = 'results_antares'
if args.ra is not None:
outfile = '{}_ra{:03.1f}'.format(outfile, args.ra)
if args.dec is not None:
outfile = '{}_dec{:03.1f}'.format(outfile, args.dec)
if args.tobs is not None:
outfile = '{}_{}'.format(outfile, args.tobs)
outfile += '.csv'
result_set = download(query, outfile, output_format='csv', decompress=True)
|
[
"numpy.radians",
"antares_client.search.download",
"datetime.datetime.today",
"argparse.ArgumentParser",
"astropy.time.Time"
] |
[((1882, 1898), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1896, 1898), False, 'from datetime import datetime\n'), ((1913, 1979), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Client API to query ANTARES alert DB"""'}), "(description='Client API to query ANTARES alert DB')\n", (1927, 1979), False, 'from argparse import ArgumentParser\n'), ((2901, 2963), 'antares_client.search.download', 'download', (['query', 'outfile'], {'output_format': '"""csv"""', 'decompress': '(True)'}), "(query, outfile, output_format='csv', decompress=True)\n", (2909, 2963), False, 'from antares_client.search import search, download\n'), ((1711, 1721), 'astropy.time.Time', 'Time', (['date'], {}), '(date)\n', (1715, 1721), False, 'from astropy.time import Time\n'), ((1278, 1294), 'numpy.radians', 'np.radians', (['dec0'], {}), '(dec0)\n', (1288, 1294), True, 'import numpy as np\n'), ((2242, 2258), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (2256, 2258), False, 'from datetime import datetime\n')]
|
"""
Creates a MobileNetV2 Model as defined in:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>. (2018).
MobileNetV2: Inverted Residuals and Linear Bottlenecks
arXiv preprint arXiv:1801.04381.
import from https://github.com/tonylins/pytorch-mobilenet-v2
"""
import torch.nn as nn
import math
import torch
from . import my_op
__all__ = ['mobilenetv2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def conv_3x3_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, block_id, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.expand_ratio = expand_ratio
self.identity = stride == 1 and inp == oup
self.ReLU = nn.ReLU6(inplace=True)
if expand_ratio == 1:
self.conv1 = nn.Conv2d(inp, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False)
self.bn1 = nn.BatchNorm2d(hidden_dim)
self.conv2 = nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)
self.bn2 = nn.BatchNorm2d(oup)
else:
self.block_id = block_id
self.activation_size_list = [112, 56, 56, 28, 28, 28, 14, 14, 14, 14, 14, 14, 14, 7, 7, 7]
self.AP = my_op.APLayer(hidden_dim, hidden_dim, activation_size=self.activation_size_list[block_id], max_ks=2,
layer_id=block_id)
# hidden layer of each block
# 96, 144, 144, 192, 192, 192, 384, 384, 384, 384, 576, 576, 576, 960, 960, 960]
self.conv1 = nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)
self.bn1 = nn.BatchNorm2d(hidden_dim)
self.conv2 = nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False)
self.bn2 = nn.BatchNorm2d(hidden_dim)
self.conv3 = nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)
self.bn3 = nn.BatchNorm2d(oup)
self.index_code = [] # the generated index code
self.scale_factor = 0.1
self.channel_index = [] # binary index code for evaluation
def forward(self, x):
output = x
if self.expand_ratio == 1:
x = self.ReLU(self.bn1(self.conv1(x)))
x = self.bn2(self.conv2(x))
else:
x = self.ReLU(self.bn1(self.conv1(x)))
x_scale = self.AP(x, self.scale_factor, self.channel_index)
self.index_code = x_scale
x = my_op.MyScale.apply(x, x_scale)
x = self.ReLU(self.bn2(self.conv2(x)))
x = my_op.MyScale.apply(x, x_scale)
x = self.bn3(self.conv3(x))
if self.identity:
return x + output
else:
return x
class MobileNetV2(nn.Module):
def __init__(self, model_path, num_classes=1000, width_mult=1.):
super(MobileNetV2, self).__init__()
# setting of inverted residual blocks
self.cfgs = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# building first layer
input_channel = _make_divisible(32 * width_mult, 4 if width_mult == 0.1 else 8)
layers = [conv_3x3_bn(3, input_channel, 2)]
# building inverted residual blocks
block = InvertedResidual
block_id = -1
for t, c, n, s in self.cfgs:
output_channel = _make_divisible(c * width_mult, 4 if width_mult == 0.1 else 8)
for i in range(n):
layers.append(block(block_id, input_channel, output_channel, s if i == 0 else 1, t))
input_channel = output_channel
block_id += 1
self.features = nn.Sequential(*layers)
# building last several layers
output_channel = _make_divisible(1280 * width_mult, 4 if width_mult == 0.1 else 8) if width_mult > 1.0 else 1280
self.conv = conv_1x1_bn(input_channel, output_channel)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(output_channel, num_classes)
self._initialize_weights(model_path)
def forward(self, x, scale_factor=1.0, channel_index=None):
self.set_scale_factor(scale_factor)
if not self.training:
self.set_channel_index(channel_index)
x = self.features(x)
x = self.conv(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
index_code = self.get_index_code()
return x, index_code
def set_scale_factor(self, scale_factor):
for item in self.features._modules:
if item == '0' or item == '1':
continue # pass the first two blocks
block = self.features._modules[item]
block.scale_factor = scale_factor
def set_channel_index(self, channel_index):
for item in self.features._modules:
if item == '0' or item == '1':
continue # pass the first two blocks
block = self.features._modules[item]
block.channel_index = channel_index
def get_index_code(self):
index_code = []
for item in self.features._modules:
if item == '0' or item == '1':
continue # pass the first two blocks
block = self.features._modules[item]
index_code.append(block.index_code)
return index_code
def _initialize_weights(self, model_path):
model_weight = torch.load(model_path)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
my_weight = self.state_dict()
my_keys = list(my_weight)
new_keys = []
for item in my_keys:
if 'AP' not in item:
new_keys.append(item)
for i, (k, v) in enumerate(model_weight.items()):
my_weight[new_keys[i]] = v
self.load_state_dict(my_weight)
def mobilenetv2(**kwargs):
"""
Constructs a MobileNet V2 model
"""
return MobileNetV2(**kwargs)
if __name__ == '__main__':
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
model_path = '/mnt/data3/luojh/project/6_CURL/Journal/pretrained_model/ImageNet/mobilenetv2_1.0-0c6065bc.pth'
model = MobileNetV2(model_path).cuda()
input = torch.zeros((1, 3, 224, 224)).cuda()
output = model(input)
a=1
|
[
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU6",
"math.sqrt",
"torch.nn.Sequential",
"torch.load",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.zeros"
] |
[((1058, 1103), 'torch.nn.Conv2d', 'nn.Conv2d', (['inp', 'oup', '(3)', 'stride', '(1)'], {'bias': '(False)'}), '(inp, oup, 3, stride, 1, bias=False)\n', (1067, 1103), True, 'import torch.nn as nn\n'), ((1113, 1132), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['oup'], {}), '(oup)\n', (1127, 1132), True, 'import torch.nn as nn\n'), ((1142, 1164), 'torch.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1150, 1164), True, 'import torch.nn as nn\n'), ((1234, 1274), 'torch.nn.Conv2d', 'nn.Conv2d', (['inp', 'oup', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(inp, oup, 1, 1, 0, bias=False)\n', (1243, 1274), True, 'import torch.nn as nn\n'), ((1284, 1303), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['oup'], {}), '(oup)\n', (1298, 1303), True, 'import torch.nn as nn\n'), ((1313, 1335), 'torch.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1321, 1335), True, 'import torch.nn as nn\n'), ((1687, 1709), 'torch.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1695, 1709), True, 'import torch.nn as nn\n'), ((4724, 4746), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (4737, 4746), True, 'import torch.nn as nn\n'), ((4993, 5021), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1, 1)'], {}), '((1, 1))\n', (5013, 5021), True, 'import torch.nn as nn\n'), ((5048, 5086), 'torch.nn.Linear', 'nn.Linear', (['output_channel', 'num_classes'], {}), '(output_channel, num_classes)\n', (5057, 5086), True, 'import torch.nn as nn\n'), ((6501, 6523), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (6511, 6523), False, 'import torch\n'), ((1766, 1837), 'torch.nn.Conv2d', 'nn.Conv2d', (['inp', 'hidden_dim', '(3)', 'stride', '(1)'], {'groups': 'hidden_dim', 'bias': '(False)'}), '(inp, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False)\n', (1775, 1837), True, 'import torch.nn as nn\n'), ((1861, 1887), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['hidden_dim'], {}), '(hidden_dim)\n', (1875, 1887), True, 'import torch.nn as nn\n'), ((1913, 1960), 'torch.nn.Conv2d', 'nn.Conv2d', (['hidden_dim', 'oup', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(hidden_dim, oup, 1, 1, 0, bias=False)\n', (1922, 1960), True, 'import torch.nn as nn\n'), ((1984, 2003), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['oup'], {}), '(oup)\n', (1998, 2003), True, 'import torch.nn as nn\n'), ((2495, 2542), 'torch.nn.Conv2d', 'nn.Conv2d', (['inp', 'hidden_dim', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(inp, hidden_dim, 1, 1, 0, bias=False)\n', (2504, 2542), True, 'import torch.nn as nn\n'), ((2566, 2592), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['hidden_dim'], {}), '(hidden_dim)\n', (2580, 2592), True, 'import torch.nn as nn\n'), ((2618, 2696), 'torch.nn.Conv2d', 'nn.Conv2d', (['hidden_dim', 'hidden_dim', '(3)', 'stride', '(1)'], {'groups': 'hidden_dim', 'bias': '(False)'}), '(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False)\n', (2627, 2696), True, 'import torch.nn as nn\n'), ((2720, 2746), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['hidden_dim'], {}), '(hidden_dim)\n', (2734, 2746), True, 'import torch.nn as nn\n'), ((2772, 2819), 'torch.nn.Conv2d', 'nn.Conv2d', (['hidden_dim', 'oup', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(hidden_dim, oup, 1, 1, 0, bias=False)\n', (2781, 2819), True, 'import torch.nn as nn\n'), ((2843, 2862), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['oup'], {}), '(oup)\n', (2857, 2862), True, 'import torch.nn as nn\n'), ((7762, 7791), 'torch.zeros', 'torch.zeros', (['(1, 3, 224, 224)'], {}), '((1, 3, 224, 224))\n', (7773, 7791), False, 'import torch\n'), ((6712, 6730), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (6721, 6730), False, 'import math\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.