content
stringlengths 5
1.05M
|
---|
"""
Created on 16:58, Apr. 22nd, 2021
Author: fassial
Filename: AlphaSyn.py
"""
import brainpy as bp
__all__ = [
"AlphaSyn",
]
class AlphaSyn(bp.TwoEndConn):
target_backend = "general"
@staticmethod
def derivative(s, x, t, tau):
dxdt = (-2 * tau * x - s) / (tau ** 2)
dsdt = x
return dsdt, dxdt
def __init__(self, pre, post, conn, weight = .2, delay = 0., tau = 2., **kwargs):
# init params
self.tau = tau
self.delay = delay
self.weight = weight
# init connections
self.conn = conn(pre.size, post.size)
self.conn_mat = self.conn.requires("conn_mat")
self.size = bp.ops.shape(self.conn_mat)
# init vars
self.s = bp.ops.zeros(self.size)
self.x = bp.ops.zeros(self.size)
self.w = bp.ops.ones(self.size) * self.weight
self.Isyn = self.register_constant_delay("Isyn",
size = self.size,
delay_time = self.delay
)
# init integral
self.integral = bp.odeint(
f = AlphaSyn.derivative,
method = "euler"
)
# init super
super(AlphaSyn, self).__init__(pre = pre, post = post, **kwargs)
def update(self, _t):
self.s, self.x = self.integral(self.s, self.x, _t, self.tau)
self.x += bp.ops.unsqueeze(self.pre.spike, 1) * self.conn_mat
self.Isyn.push(self.w * self.s)
self.post.input += bp.ops.sum(self.Isyn.pull(), axis = 0)
|
import tellcore.constants as const
import json
def __parseDeviceData__(device):
lastCommand = device.last_sent_command(const.TELLSTICK_TURNON | const.TELLSTICK_TURNOFF | const.TELLSTICK_DIM)
return {'id': device.id, 'name': device.name, 'switchedOn': lastCommand == const.TELLSTICK_TURNON}
def __getDevice__(deviceId, devices):
filteredDevices = filter(lambda d: d.id == deviceId, devices)
return next(filteredDevices)
def __turn_on_device__(deviceId, devices):
device = __getDevice__(deviceId, devices)
device.turn_on()
def __turn_off_device__(deviceId, devices):
device = __getDevice__(deviceId, devices)
device.turn_off()
def turn_off(deviceIds, devices):
for deviceId in deviceIds:
__turn_off_device__(deviceId, devices)
def turn_on(deviceIds, devices):
for deviceId in deviceIds:
__turn_on_device__(deviceId, devices)
def list_devices(devices):
individualDevices = filter(lambda d: d.type != const.TELLSTICK_TYPE_GROUP, devices)
deviceGroups = filter(lambda d: d.type == const.TELLSTICK_TYPE_GROUP, devices)
individualDevicesData = list(map(__parseDeviceData__, individualDevices))
deviceGroupsData = list(map(__parseDeviceData__, deviceGroups))
allDevicesData = {'devices': individualDevicesData, 'groups': deviceGroupsData}
return json.dumps(allDevicesData, ensure_ascii=False).encode('utf8')
|
# Code for custom code recipe acf_amazon_cloud_watch (imported from a Python recipe)
# J.L.G.
# import the classes for accessing DSS objects from the recipe
import dataiku
# Import the helpers for custom recipes
from dataiku.customrecipe import *
# Output dataset, containing the token retrieved from last API call to Amazon CloudWatch:
output_feedbacks = get_output_names_for_role('feedback_output')
feedback_output_dataset = [dataiku.Dataset(name) for name in output_feedbacks]
# The configuration consists of the parameters set up by the user in the recipe Settings tab.
# Retrieve parameter values from the of map of parameters
access_key = get_recipe_config()['access_key']
secret_key = get_recipe_config()['secret_key']
##############################################
# Recipe
##############################################
# -*- coding: utf-8 -*-
import dataiku
import pandas as pd, numpy as np
import boto3
import datetime
import ast
# Initialization of boto3's cloudwatch object to push data to Amazon CloudWatch
cloudwatch = boto3.client('cloudwatch',
region_name='eu-west-1', # Change the Amazon CloudWatch Location when required.
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
# Class to process rows that need to be sent to CloudWatch
class row_content:
namespace = ""
timestamp = ""
metric_name = ""
value = ""
dimensions = ""
##############################################
# Function to put metrics to CloudWatch
##############################################
def put_metric_to_cloudwatch(row_data):
response = cloudwatch.put_metric_data(
Namespace = row_data.namespace,
MetricData = [
{
'Timestamp': row_data.timestamp,
'MetricName': row_data.metric_name,
'Dimensions': row_data.dimensions,
'Value': row_data.value,
'Unit': 'None'
},
]
)
##############################################
# Function to get token value
##############################################
def get_token(dataset_df, key):
if dataset_df == None:
return None
key_condition = (dataset_df['key'] == key)
dataset_row = dataset_df[key_condition]
if len(dataset_row.value) == 1:
token_value = dataset_row.value.tolist()[0]
else:
token_value = -1
return token_value
##############################################
# Function to process data
##############################################
def process_dataset (dataset_df, token_value):
if token_value is not None:
filter_cond = (dataset_df['Timestamp'] >= token_value)
procesed_dataset_df = dataset_df[filter_cond]
else:
procesed_dataset_df = dataset_df
print('Processing columns: %s' % list(dataset_df.columns))
count = 0
errors = 0
row_data = row_content()
for index, row in procesed_dataset_df.iterrows():
# Manipulate dataset here, and call the put_metric_to_cloudwatch function for each row.
row_data.namespace = row.Namespace
row_data.timestamp = row.Timestamp
row_data.metric_name = row.MetricName
row_data.value = row.MetricValue
try:
row_data.dimensions = ast.literal_eval(row.MetricDimensions)
except:
row_data.dimensions = []
try:
put_metric_to_cloudwatch(row_data)
count += 1
except Exception as ex:
print(ex)
print('[ACF_CLOUDWATCH plugin] - %d errors' % errors)
return (count, errors)
#############################################################
# Function to copnvert all non-standard columns as dimensions
#############################################################
def build_dimensions(indsdf):
# Basic columns. Value and Unite prefixed with Metric
metricColumns = \
["Namespace","MetricName","Timestamp","MetricValue","MetricUnit"]
metricDimensions = \
indsdf[[c for c in indsdf.columns if not c in metricColumns]]
print('metricColumns: %s' % metricColumns)
print('metricDimensions: %s' % list(metricDimensions.columns))
# Dimensions generator
def process_dimensions(row):
arr = [json.dumps({"Name": c, "Value": str(row[c])}) \
for ix,c \
in enumerate(metricDimensions.columns) if str(row[c]) != "nan"]
dms = "[%s]" % ",".join(arr)
return dms
indsdf["MetricDimensions"] = indsdf.apply(process_dimensions, axis=1)
# Drop dimension columns
indsdf = indsdf.drop(metricDimensions.columns, axis=1)
return indsdf
def load_feedback_dataset():
try:
input_feedback = get_output_names_for_role('feedback_output')
if len(input_feedback) != 0:
feedback = dataiku.Dataset(input_feedback[0])
return feedback.get_dataframe()
except:
pass
return None
#############################################################
# Function to iterate over all inputs
#############################################################
def process_inputs():
# Get feedback
feedback_df = load_feedback_dataset()
names = get_input_names_for_role('metrics')
for name in names:
process_input(name, feedback_df)
# Write the output to the output dataset
feedback_output_dataset[0].write_with_schema(pd.DataFrame(output_feedback))
def process_input(dsname, feedback_df):
print('Processing dataset: %s' % dsname)
token_value = get_token(feedback_df, dsname)
df = dataiku.Dataset(dsname).get_dataframe()
count = 0
errors = 0
if not df.empty:
df = build_dimensions(df)
count, errors = process_dataset(df, token_value)
token_value = datetime.datetime.now().isoformat()
print('[ACF_CLOUDWATCH plugin] - ' + str(count) + ' rows successfully processed. Token value: ' + token_value)
output_feedback.append({ "key": dsname, "value": token_value, "count": count, "errors": errors })
print('%s input processed correctly.' % dsname)
##############################################
# Main
##############################################
output_feedback = []
process_inputs()
|
import responses
import unittest
from tests.support import with_resource, with_fixture, characters
from twitter_ads.account import Account
from twitter_ads.creative import Tweets
from twitter_ads.client import Client
from twitter_ads.cursor import Cursor
from twitter_ads.enum import TWEET_TYPE
from twitter_ads import API_VERSION
@responses.activate
def test_tweets_get_all():
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph'),
body=with_fixture('accounts_load'),
content_type='application/json')
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph/tweets'),
body=with_fixture('tweets_get'),
content_type='application/json')
client = Client(
characters(40),
characters(40),
characters(40),
characters(40)
)
account = Account.load(client, '2iqph')
tweets = Tweets.all(
account,
tweet_ids=['1166476031668015104'],
tweet_type=TWEET_TYPE.PUBLISHED,
trim_user=True
)
assert tweets is not None
assert isinstance(tweets, Cursor)
assert tweets.count == 1
assert tweets.first['tweet_id'] == '1166476031668015104'
|
import os
import urllib2
import time
import multiprocessing.dummy as multiprocessing
import string
from random import choice
import socket
from ctypes import c_int
import tempfile
import dummy
from logger import log
"Smart Downloading Module. Written by Itay Brandes."
shared_bytes_var = multiprocessing.Value(c_int, 0) # a ctypes var that counts the bytes already downloaded
def DownloadFile(url, path, startByte=0, endByte=None, ShowProgress=True):
'''
Function downloads file.
@param url: File url address.
@param path: Destination file path.
@param startByte: Start byte.
@param endByte: End byte. Will work only if server supports HTTPRange headers.
@param ShowProgress: If true, shows textual progress bar.
@return path: Destination file path.
'''
url = url.replace(' ', '%20')
headers = {}
if endByte is not None:
headers['Range'] = 'bytes=%d-%d' % (startByte,endByte)
req = urllib2.Request(url, headers=headers)
try:
urlObj = urllib2.urlopen(req, timeout=4)
except urllib2.HTTPError, e:
if "HTTP Error 416" in str(e):
# HTTP 416 Error: Requested Range Not Satisfiable. Happens when we ask
# for a range that is not available on the server. It will happen when
# the server will try to send us a .html page that means something like
# "you opened too many connections to our server". If this happens, we
# will wait for the other threads to finish their connections and try again.
log.warning("Thread didn't got the file it was expecting. Retrying...")
time.sleep(5)
return DownloadFile(url, path, startByte, endByte, ShowProgress)
else:
raise e
f = open(path, 'wb')
meta = urlObj.info()
try:
filesize = int(meta.getheaders("Content-Length")[0])
except IndexError:
log.warning("Server did not send Content-Length.")
ShowProgress=False
filesize_dl = 0
block_sz = 8192
while True:
try:
buff = urlObj.read(block_sz)
except (socket.timeout, socket.error, urllib2.HTTPError), e:
dummy.shared_bytes_var.value -= filesize_dl
raise e
if not buff:
break
filesize_dl += len(buff)
try:
dummy.shared_bytes_var.value += len(buff)
except AttributeError:
pass
f.write(buff)
if ShowProgress:
status = r"%.2f MB / %.2f MB %s [%3.2f%%]" % (filesize_dl / 1024.0 / 1024.0,
filesize / 1024.0 / 1024.0, progress_bar(1.0*filesize_dl/filesize),
filesize_dl * 100.0 / filesize)
status += chr(8)*(len(status)+1)
print status,
if ShowProgress:
print "\n"
f.close()
return path
def DownloadFile_Parall(url, path=None, processes=6,
minChunkFile=1024**2, nonBlocking=False):
'''
Function downloads file parally.
@param url: File url address.
@param path: Destination file path.
@param processes: Number of processes to use in the pool.
@param minChunkFile: Minimum chunk file in bytes.
@param nonBlocking: If true, returns (mapObj, pool). A list of file parts will be returned
from the mapObj results, and the developer must join them himself.
Developer also must close and join the pool.
@return mapObj: Only if nonBlocking is True. A multiprocessing.pool.AsyncResult object.
@return pool: Only if nonBlocking is True. A multiprocessing.pool object.
'''
from HTTPQuery import Is_ServerSupportHTTPRange
global shared_bytes_var
shared_bytes_var.value = 0
url = url.replace(' ', '%20')
if not path:
path = get_rand_filename(os.environ['temp'])
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
log.debug("Downloading to %s..." % path)
urlObj = urllib2.urlopen(url)
meta = urlObj.info()
filesize = int(meta.getheaders("Content-Length")[0])
if filesize/processes > minChunkFile and Is_ServerSupportHTTPRange(url):
args = []
pos = 0
chunk = filesize/processes
for i in range(processes):
startByte = pos
endByte = pos + chunk
if endByte > filesize-1:
endByte = filesize-1
args.append([url, path+".%.3d" % i, startByte, endByte, False])
pos += chunk+1
else:
args = [[url, path+".000", None, None, False]]
log.debug("Running %d processes..." % processes)
pool = multiprocessing.Pool(processes, initializer=_initProcess,initargs=(shared_bytes_var,))
mapObj = pool.map_async(lambda x: DownloadFile(*x) , args)
if nonBlocking:
return mapObj, pool
while not mapObj.ready():
status = r"%.2f MB / %.2f MB %s [%3.2f%%]" % (shared_bytes_var.value / 1024.0 / 1024.0,
filesize / 1024.0 / 1024.0, progress_bar(1.0*shared_bytes_var.value/filesize),
shared_bytes_var.value * 100.0 / filesize)
status = status + chr(8)*(len(status)+1)
print status,
time.sleep(0.1)
file_parts = mapObj.get()
pool.terminate()
pool.join()
combine_files(file_parts, path)
def combine_files(parts, path):
'''
Function combines file parts.
@param parts: List of file paths.
@param path: Destination path.
'''
with open(path,'wb') as output:
for part in parts:
with open(part,'rb') as f:
output.writelines(f.readlines())
os.remove(part)
def progress_bar(progress, length=20):
'''
Function creates a textual progress bar.
@param progress: Float number between 0 and 1 describes the progress.
@param length: The length of the progress bar in chars. Default is 20.
'''
length -= 2 # The bracket are 2 chars long.
return "[" + "#"*int(progress*length) + "-"*(length-int(progress*length)) + "]"
def get_rand_filename(dir_=os.getcwd()):
"Function returns a non-existent random filename."
return tempfile.mkstemp('.tmp', '', dir_)[1]
def _initProcess(x):
dummy.shared_bytes_var = x
|
def secondhighest(listA):
best = listA[1]
secbest = listA[1]
for i in range(len(listA)):
if listA[i]>secbest:
if listA[i] > best:
secbest = best
best = listA[i]
else:
secbest = listA[i]
return secbest
if __name__ == "__main__":
A = [1,2,3,1,2,5,6,6]
print("second highest in array A = ",A, "is ",secondhighest(A))
|
# Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import numpy as np
from nnabla.ext_utils import get_extension_context
from nnabla.monitor import Monitor, MonitorImage
from tqdm import tqdm
from dataset import DTUMVSDataSource
from helper import generate_raydir_camloc, generate_all_pixels
from network import render as _render
def render(pose, intrinsic, mask_obj, conf):
assert conf.height % conf.batch_height == 0, \
f"conf.height ({conf.height}) % conf.batch_height ({conf.batch_height}) != 0"
W, H = conf.width, conf.height
bh = conf.batch_height
xy = generate_all_pixels(W, H)
xy = xy.reshape((1, H, W, 2))
camloc = nn.Variable([1, 3])
raydir = nn.Variable([1, bh * W, 3])
with nn.auto_forward(False):
color_pred = _render(camloc, raydir, conf).reshape((1, bh, W, 3))
rimage = np.ndarray([1, H, W, 3])
for h in tqdm(range(0, H, bh), desc="Rendering"):
xy_h = xy[:, h:h+bh, :, :].reshape((1, bh * W, 2))
raydir.d, camloc.d = generate_raydir_camloc(pose, intrinsic, xy_h)
color_pred.forward(clear_buffer=True)
rimage[0, h:h+bh, :, :] = color_pred.d.copy()
rimage = rimage * mask_obj
return rimage.transpose((0, 3, 1, 2)) # NCHW
def main(args):
# Setting
device_id = args.device_id
conf = args.conf
path = conf.data_path
B = conf.batch_size
R = conf.n_rays
L = conf.layers
D = conf.depth
feature_size = conf.feature_size
ctx = get_extension_context('cudnn', device_id=device_id)
nn.set_default_context(ctx)
# Dataset
ds = DTUMVSDataSource(path, R, shuffle=True)
# Monitor
monitor_path = "/".join(args.model_load_path.split("/")[0:-1])
monitor = Monitor(monitor_path)
monitor_image = MonitorImage(
f"Rendered image synthesis", monitor, interval=1)
# Load model
nn.load_parameters(args.model_load_path)
# Render
pose = ds.poses[conf.valid_index:conf.valid_index+1, ...]
intrinsic = ds.intrinsics[conf.valid_index:conf.valid_index+1, ...]
mask_obj = ds.masks[conf.valid_index:conf.valid_index+1, ...]
image = render(pose, intrinsic, mask_obj, conf)
monitor_image.add(conf.valid_index, image)
if __name__ == '__main__':
import argparse
from ruamel.yaml import YAML
from collections import namedtuple
parser = argparse.ArgumentParser(
description="Implicit Differentiable Renderer Training.")
parser.add_argument('--device-id', '-d', type=int, default="0")
parser.add_argument('--model-load-path', type=str, required=True)
parser.add_argument('--config', type=str,
default="conf/default.yaml", required=True)
args = parser.parse_args()
with open(args.config, "r") as f:
conf = YAML(typ='safe').load(f)
conf = namedtuple("Conf", conf)(**conf)
args.conf = conf
main(args)
|
from setuptools import setup, find_packages
with open('README.rst', encoding='UTF-8') as f:
readme = f.read()
setup(
name='hr',
version='1.0.0',
description='Command line user export utility',
long_description=readme,
author='Your Name',
author_email='[email protected]',
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=[],
entry_points={
'console_scripts': 'hr=hr.cli:main',
},
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 12 08:41:10 2019
@author: yuhanyao
"""
import numpy as np
import pandas as pd
import extinction
from copy import deepcopy
from astropy.io import fits
import astropy.io.ascii as asci
from astropy.table import Table, vstack
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0=70., Om0=0.275)
from collections import OrderedDict as odict
from helper.app2abs import get_date_span, add_datecol, add_physcol
def get_at2019dge(colorplt=False):
t_max = 58583.2
z = 0.0213
ebv = 0.022
tspecs = np.array([58583.59659, # Keck spec JD at midpoint
58597.46300334, # DBSP spec
58582.146159,
58583.129278,
58595.213889,
58668.492577])
# LT, SEDM, P48
tb = pd.read_csv('../data/otherSN/Yao2020/lc_at2019dge.csv')
result = odict([('z', z),
('ebv', ebv),
('t_max', t_max),
('tspecs', tspecs),
("tb", tb)])
tb = tb[tb.instrument!="P60+SEDM"]
if colorplt==False:
return result
else:
ix = np.any([tb["instrument"].values == "P48",
tb["instrument"].values == "LT+IOO"], axis=0)
tb = tb[ix]
ix = np.in1d(tb["filter"].values, np.array(['g', 'r', 'i', 'z']))
tb = tb[ix]
dates = get_date_span(tb)
datesave = []
for i in range(len(dates)):
x = dates[i]
ix = tb["date"].values == x
tbsub = tb[ix]
if len(tbsub)!=0:
flts = tbsub['filter'].values
if "r" in flts and np.sum(np.unique(flts))!=1:
datesave.append(x)
datesave = np.array(datesave)
mcolor = []
mcolor_unc = []
mjds = []
colorname = []
for i in range(len(datesave)):
rmag = 99
gmag = 99
imag = 99
zmag = 99
x = datesave[i]
ix = tb["date"].values == x
tbsub = tb[ix]
gtb = tbsub[tbsub["filter"].values=="g"]
rtb = tbsub[tbsub["filter"].values=="r"]
itb = tbsub[tbsub["filter"].values=="i"]
ztb = tbsub[tbsub["filter"].values=="z"]
if len(gtb)!=0:
gmjds = gtb["mjd"].values
gmags = gtb["mag0"].values
gemags = gtb["emag"].values
gwtgs = 1/gemags**2
gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)
gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)
gemag = 1/ np.sqrt(np.sum(gwtgs))
if len(rtb)!=0:
rmjds = rtb["mjd"].values
rmags = rtb["mag0"].values
remags = rtb["emag"].values
rwtgs = 1/remags**2
rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)
rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)
remag = 1/ np.sqrt(np.sum(rwtgs))
if len(itb)!=0:
imjds = itb["mjd"].values
imags = itb["mag0"].values
iemags = itb["emag"].values
iwtgs = 1/iemags**2
imag = np.sum(imags * iwtgs) / np.sum(iwtgs)
imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)
iemag = 1/ np.sqrt(np.sum(iwtgs))
if len(ztb)!=0:
zmjds = ztb["mjd"].values
zmags = ztb["mag0"].values
zemags = ztb["emag"].values
zwtgs = 1/zemags**2
zmag = np.sum(zmags * zwtgs) / np.sum(zwtgs)
zmjd = np.sum(zmjds * zwtgs) / np.sum(zwtgs)
zemag = 1/ np.sqrt(np.sum(zwtgs))
if len(gtb)!=0 and len(rtb)!=0:
mcolor.append(gmag - rmag)
mjds.append( 0.5 * (gmjd + rmjd) )
mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )
colorname.append("gmr")
if len(rtb)!=0 and len(itb)!=0:
mcolor.append(rmag - imag)
mjds.append( 0.5 * (rmjd + imjd) )
mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )
colorname.append("rmi")
if len(itb)!=0 and len(ztb)!=0:
mcolor.append(imag - zmag)
mjds.append( 0.5 * (imjd + zmjd) )
mcolor_unc.append( np.sqrt(iemag**2 + zemag**2) )
colorname.append("imz")
ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],
names = ["mjd", "c", "ec", "cname"])
ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)
ctb = ctb.to_pandas()
result.update({"ctb": ctb})
return result
def get_iPTF14gqr(colorplt=False):
"""
De+18, Table S1, already corrected for extinction
"""
z = 0.063
# ebv = 0.082
D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc
dis_mod = 5*np.log10(D / 10)
t_exp = 56943.74 #
t_max = 56950.26 # g band max light + 3
tb = Table(fits.open('../data/otherSN/De2018/tables1.fit')[1].data)
tb.rename_column('MJD' , 'mjd')
tb['texp_rf'] = (tb['mjd'] - t_exp) / (1+z)
tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)
# tb = tb[tb["Filt"]=="g "]
tb = tb[~np.isnan(tb['e_mag'])]
tb.rename_column('Filt' , 'filter')
tb.rename_column('e_mag' , 'emag')
tb.rename_column('mag' , 'mag0')
ixg = tb['filter']=="g "
ixB = tb['filter']=="B "
ixV = tb['filter']=="V "
ixr = tb['filter']=="r "
ixi = tb['filter']=="i "
ixUVW1 = tb['filter']=="UVW1"
ixUVW2 = tb['filter']=="UVW2"
tb['wave'] = np.zeros(len(tb))
tb['wave'][ixUVW2] = 2079
tb['wave'][ixUVW1] = 2614
tb['wave'][ixB] = 4359
tb['wave'][ixg] = 4814
tb['wave'][ixV] = 5430
tb['wave'][ixr] = 6422
tb['wave'][ixi] = 7883
tb['mag0_abs'] = tb['mag0'] - dis_mod
tb = tb.to_pandas()
tb["texp_rf"] = tb["Phase"]
tb = tb.drop(columns=["recno", "Phase", "l_mag"])
"""
ix = np.any([tb['Tel'].values=="P60 ",
tb["filter"].values=='g '], axis=0)
tb = tb[ix]
"""
tb = add_datecol(tb)
tb = add_physcol(tb)
tt = tb["tmax_rf"].values
epochs = [" " for x in range(len(tt))]
epochs = np.array(epochs)
"""
ix = (tt>-5.6)&(tt<-5.55)
epochs[ix] = "epoch 01"
"""
ix = (tt>-5.55)&(tt<-5.50)
epochs[ix] = "epoch 02"
ix = (tt>-5.50)&(tt<-5.45)
epochs[ix] = "epoch 03"
ix = (tt>-5.2)&(tt<-5.0)
epochs[ix] = "epoch 04"
ix = (tt>-5.0)&(tt<-4.7)
epochs[ix] = "epoch 05"
ix = (tt>-4.7)&(tt<-4.5)
epochs[ix] = "epoch 06"
ix = (tt>-4.5)&(tt<-3.5)
epochs[ix] = "epoch 07"
ix = (tt>-3.5)&(tt<-2.5)
epochs[ix] = "epoch 08"
ix = (tt>-1.5)&(tt<-1)
epochs[ix] = "epoch 09"
ix = (tt>-1)&(tt<-0.82)
epochs[ix] = "epoch 10"
ix = (tt>-0.82)&(tt<-0.6)
epochs[ix] = "epoch 11"
ix = (tt>-0.5)&(tt<0.5)
epochs[ix] = "epoch 12"
ix = (tt>0.5)&(tt<1.5)
epochs[ix] = "epoch 13"
ix = (tt>1.5)&(tt<2.5)
epochs[ix] = "epoch 14"
ix = (tt>3.5)&(tt<4.5)
epochs[ix] = "epoch 15"
ix = (tt>4.5)&(tt<5)
epochs[ix] = "epoch 16"
ix = (tt>5)&(tt<5.6)
epochs[ix] = "epoch 17"
ix = (tt>5.6)&(tt<5.8)
epochs[ix] = "epoch 18"
ix = (tt>6)&(tt<7)
epochs[ix] = "epoch 19"
ix = (tt>7)&(tt<8)
epochs[ix] = "epoch 20"
ix = (tt>8)&(tt<9)
epochs[ix] = "epoch 21"
tb["epoch"] = epochs
if colorplt==False:
return tb
else:
tb = add_datecol(tb)
ix = np.in1d(tb["filter"].values, np.array(['g ', 'r ', 'i ']))
tb = tb[ix]
dates = get_date_span(tb)
datesave = []
for i in range(len(dates)):
x = dates[i]
ix = tb["date"].values == x
tbsub = tb[ix]
if len(tbsub)!=0:
flts = tbsub['filter'].values
if "r " in flts and np.sum(np.unique(flts))!=1:
datesave.append(x)
datesave = np.array(datesave)
mcolor = []
mcolor_unc = []
mjds = []
colorname = []
for i in range(len(datesave)):
x = datesave[i]
ix = tb["date"].values == x
tbsub = tb[ix]
gtb = tbsub[tbsub["filter"].values=="g "]
rtb = tbsub[tbsub["filter"].values=="r "]
itb = tbsub[tbsub["filter"].values=="i "]
if len(gtb)!=0:
gmjds = gtb["mjd"].values
gmags = gtb["mag0"].values
gemags = gtb["emag"].values
gwtgs = 1/gemags**2
gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)
gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)
gemag = 1/ np.sqrt(np.sum(gwtgs))
if len(rtb)!=0:
rmjds = rtb["mjd"].values
rmags = rtb["mag0"].values
remags = rtb["emag"].values
rwtgs = 1/remags**2
rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)
rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)
remag = 1/ np.sqrt(np.sum(rwtgs))
if len(itb)!=0:
imjds = itb["mjd"].values
imags = itb["mag0"].values
iemags = itb["emag"].values
iwtgs = 1/iemags**2
imag = np.sum(imags * iwtgs) / np.sum(iwtgs)
imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)
iemag = 1/ np.sqrt(np.sum(iwtgs))
if len(gtb)!=0 and len(rtb)!=0:
mcolor.append(gmag - rmag)
mjds.append( 0.5 * (gmjd + rmjd) )
mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )
colorname.append("gmr")
if len(rtb)!=0 and len(itb)!=0:
mcolor.append(rmag - imag)
mjds.append( 0.5 * (rmjd + imjd) )
mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )
colorname.append("rmi")
ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],
names = ["mjd", "c", "ec", "cname"])
ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)
ctb = ctb.to_pandas()
return ctb
def get_sn2005ek(colorplt=False):
"""
Drout+13, Table 1, not corrected for extinction
"""
z = 0.016551
ebv = 0.210
D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc
dis_mod = 5*np.log10(D / 10)
t_max = 53639.9
print ("adopt r band t_max from Drout+13")
# tb = pd.read_csv('/Users/yuhanyao/Desktop/ZTF18abfcmjw/data/Drout2013/table1', sep='\t')
# tb = tb.drop(columns=["Unnamed: 6"])
mjds = np.array([53639.3, 53640.3, 53641.3, 53642.2, 53643.2, 53645.3,
53646.5, 53648.0, 53649.2, 53650.4, 53651.3, 53652.5,
53654.2, 53655.2, 53656.2, 53657.2])
Bmags = np.array([18.25, 18.38, 18.65, np.nan, 19.10, 19.71,
20.07, np.nan, 20.67, 20.90, 21.05, np.nan,
21.74, np.nan, np.nan, np.nan])
Bmag_uncs = np.array([0.02, 0.03, 0.02, np.nan, 0.05, 0.07,
0.07, np.nan, 0.04, 0.04, 0.04, np.nan,
0.12, np.nan, np.nan, np.nan])
Vmags = np.array([17.83, 18.03, 17.92, np.nan, 18.24, 18.66,
18.93, 19.48, 19.63, 19.86, 19.98, 20.35,
20.60, 20.74, 20.88, 21.22])
Vmag_uncs = np.array([0.02, 0.03, 0.01, np.nan, 0.02, 0.02,
0.02, 0.06, 0.03, 0.03, 0.04, 0.05,
0.08, 0.10, 0.08, 0.13])
Rmags = np.array([17.46, 17.41, 17.60, 17.69, 17.86, 18.18,
np.nan, 18.83, 19.03, 19.26, 19.48, 19.75,
20.08, np.nan, 20.47, np.nan])
Rmag_uncs = np.array([0.01, 0.02, 0.01, 0.02, 0.01, 0.01,
np.nan, 0.03, 0.02, 0.02, 0.02, 0.04,
0.05, np.nan, 0.08, np.nan])
Imags = np.array([17.20, 17.13, 17.18, np.nan, 17.47, 17.71,
np.nan, 18.13, 18.26, 18.51, 18.61, 18.74,
19.01, np.nan, 19.47, np.nan])
Imag_uncs = np.array([0.02, 0.04, 0.02, np.nan, 0.03, 0.02,
np.nan, 0.06, 0.02, 0.02, 0.02, 0.03,
0.05, np.nan, 0.06, np.nan])
mymjds = np.hstack([mjds, mjds, mjds, mjds])
mymags = np.hstack([Bmags, Vmags, Rmags, Imags])
myemags = np.hstack([Bmag_uncs, Vmag_uncs, Rmag_uncs, Imag_uncs])
myfilts = np.hstack([ np.repeat("B", len(Bmags)),
np.repeat("V", len(Bmags)),
np.repeat("R", len(Rmags)),
np.repeat("I", len(Imags)) ])
ix = ~np.isnan(mymags)
tb = pd.DataFrame({'mjd': mymjds[ix],
'mag': mymags[ix],
'emag': myemags[ix],
"filter": myfilts[ix]})
ixB = tb['filter'].values=="B"
ixV = tb['filter'].values=="V"
ixR = tb['filter'].values=="R"
ixI = tb['filter'].values=="I"
tb['wave'] = np.zeros(len(tb))
tb['wave'].values[ixB] = 4359
tb['wave'].values[ixV] = 5430
tb['wave'].values[ixR] = 6349
tb['wave'].values[ixI] = 8797
tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)
tb['mag0_abs'] = tb['mag0'] - dis_mod
tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)
if colorplt==False:
return tb
else:
tb = add_datecol(tb)
ix = np.in1d(tb["filter"].values, np.array(['B', 'R', 'I']))
tb = tb[ix]
dates = get_date_span(tb)
datesave = []
for i in range(len(dates)):
x = dates[i]
ix = tb["date"].values == x
tbsub = tb[ix]
if len(tbsub)!=0:
flts = tbsub['filter'].values
if "R" in flts and np.sum(np.unique(flts))!=1:
datesave.append(x)
datesave = np.array(datesave)
mcolor = []
mcolor_unc = []
mjds = []
colorname = []
for i in range(len(datesave)):
x = datesave[i]
ix = tb["date"].values == x
tbsub = tb[ix]
gtb = tbsub[tbsub["filter"].values=="B"]
rtb = tbsub[tbsub["filter"].values=="R"]
itb = tbsub[tbsub["filter"].values=="I"]
if len(gtb)!=0:
gmjds = gtb["mjd"].values
gmags = gtb["mag0"].values
gemags = gtb["emag"].values
gwtgs = 1/gemags**2
gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)
gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)
gemag = 1/ np.sqrt(np.sum(gwtgs))
if len(rtb)!=0:
rmjds = rtb["mjd"].values
rmags = rtb["mag0"].values
remags = rtb["emag"].values
rwtgs = 1/remags**2
rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)
rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)
remag = 1/ np.sqrt(np.sum(rwtgs))
if len(itb)!=0:
imjds = itb["mjd"].values
imags = itb["mag0"].values
iemags = itb["emag"].values
iwtgs = 1/iemags**2
imag = np.sum(imags * iwtgs) / np.sum(iwtgs)
imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)
iemag = 1/ np.sqrt(np.sum(iwtgs))
if len(gtb)!=0 and len(rtb)!=0:
mcolor.append(gmag - rmag)
mjds.append( 0.5 * (gmjd + rmjd) )
mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )
colorname.append("BmR")
if len(rtb)!=0 and len(itb)!=0:
mcolor.append(rmag - imag)
mjds.append( 0.5 * (rmjd + imjd) )
mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )
colorname.append("RmI")
ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],
names = ["mjd", "c", "ec", "cname"])
ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)
ctb = ctb.to_pandas()
return ctb
def get_sn2018gep():
z = 0.03154
ebv = 0.01
D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc
dis_mod = 5*np.log10(D / 10)
tb = asci.read('../data/otherSN/SN2018gep/table5.txt')
tb = tb.to_pandas()
tb = tb.rename(columns={'col1' : 'jd',
'col2': 'phase',
'col3': 'instrument',
'col4': 'filter',
'col5': 'mag',
'col6': 'emag'})
tb = tb[tb.instrument == "P48+ZTF"]
ixg = tb['filter'].values == "g"
ixr = tb['filter'].values == "r"
ixi = tb['filter'].values == "i"
tb['wave'] = np.zeros(len(tb))
tb['wave'].values[ixg] = 4814
tb['wave'].values[ixr] = 6422
tb['wave'].values[ixi] = 7883
tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)
tb['mag0_abs'] = tb['mag0'] - dis_mod
tb = tb[tb.wave!=0]
tb["mjd"] = tb["jd"] - 2400000.5
t_max = 2458374.6845 - 2400000.5 # from my eye-inspection
tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)
return tb
def get_iPTF16asu():
"""
table already corrected for galactic extinction
"""
z = 0.187
ebv = 0.0
D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc
dis_mod = 5*np.log10(D / 10)
tb = asci.read('../data/otherSN/Whitesides2017/table1.txt')
tb = tb.to_pandas()
tb = tb[tb["col4"].values!=">"]
tb = tb.rename(columns={'col1' : 'mjd',
'col2': 'tmax_rf',
'col3': 'filter',
"col4": 'mag',
'col5': 'emag',
'col6': 'instrument'})
ixg = tb['filter'].values == "g"
ixr = tb['filter'].values == "r"
ixi = tb['filter'].values == "i"
tb['wave'] = np.zeros(len(tb))
tb['wave'].values[ixg] = 4814
tb['wave'].values[ixr] = 6422
tb['wave'].values[ixi] = 7883
tb["mag"] = np.array(tb["mag"].values, dtype = np.float)
#tb["emag"] = np.array(tb["emag"].values, dtype = np.float)
tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)
tb['mag0_abs'] = tb['mag0'] - dis_mod
tb = tb[tb.wave!=0]
return tb
def get_iPTF16hgs(colorplt = False):
"""
De+18, Table 1, already corrected for extinction
"""
z = 0.017
ebv = 0
D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc
dis_mod = 5*np.log10(D / 10)
tb = pd.read_csv('../data/otherSN/iPTF16hgs/table1.txt', sep="\t")
tb = tb.drop(columns=["Unnamed: 5"])
tb = tb.rename(columns={'Filter' : 'filter',
'MJD': 'mjd'})
tb = tb[~np.array([x[0]=='>' for x in tb['Magnitude'].values])]
tb['mag'] = np.array([float(x.split(" +or-")[0]) for x in tb['Magnitude'].values])
tb['emag'] = np.array([float(x.split(" +or-")[1]) for x in tb['Magnitude'].values])
tb = tb.drop(columns=["Magnitude"])
ixg = tb['filter'].values == "g"
ixr = tb['filter'].values == "r"
ixi = tb['filter'].values == "i"
tb['wave'] = np.zeros(len(tb))
tb['wave'].values[ixg] = 4814
tb['wave'].values[ixr] = 6422
tb['wave'].values[ixi] = 7883
tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)
tb['mag0_abs'] = tb['mag0'] - dis_mod
t_max = 57691.59 # from the paper
tb['tmax_of'] = tb['mjd'] - t_max
tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)
"""
plt.errorbar(tb["tmax_rf"].values[ixg], tb["mag"].values[ixg], tb["emag"].values[ixg], fmt=".g")
plt.errorbar(tb["tmax_rf"].values[ixr], tb["mag"].values[ixr], tb["emag"].values[ixr], fmt=".r")
plt.errorbar(tb["tmax_rf"].values[ixi], tb["mag"].values[ixi], tb["emag"].values[ixi], fmt=".y")
"""
tb = add_datecol(tb)
tb = add_physcol(tb)
#tb = tb.drop(columns=["datetime64"])
if colorplt==False:
return tb
else:
#tb = tb[tb.mjd > 55352.5]
#tb = tb[tb.mjd < 55593.5]
dates = get_date_span(tb)
datesave = []
for i in range(len(dates)):
x = dates[i]
ix = tb["date"].values == x
tbsub = tb[ix]
if len(tbsub)!=0:
flts = tbsub['filter'].values
if "r" in flts and np.sum(np.unique(flts))!=1:
datesave.append(x)
datesave = np.array(datesave)
mcolor = []
mcolor_unc = []
mjds = []
colorname = []
for i in range(len(datesave)):
x = datesave[i]
ix = tb["date"].values == x
tbsub = tb[ix]
gtb = tbsub[tbsub["filter"].values=="g"]
rtb = tbsub[tbsub["filter"].values=="r"]
itb = tbsub[tbsub["filter"].values=="i"]
if len(gtb)!=0:
gmjds = gtb["mjd"].values
gmags = gtb["mag0"].values
gemags = gtb["emag"].values
gwtgs = 1/gemags**2
gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)
gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)
gemag = 1/ np.sqrt(np.sum(gwtgs))
if len(rtb)!=0:
rmjds = rtb["mjd"].values
rmags = rtb["mag0"].values
remags = rtb["emag"].values
rwtgs = 1/remags**2
rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)
rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)
remag = 1/ np.sqrt(np.sum(rwtgs))
if len(itb)!=0:
imjds = itb["mjd"].values
imags = itb["mag0"].values
iemags = itb["emag"].values
iwtgs = 1/iemags**2
imag = np.sum(imags * iwtgs) / np.sum(iwtgs)
imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)
iemag = 1/ np.sqrt(np.sum(iwtgs))
if len(gtb)!=0 and len(rtb)!=0:
mcolor.append(gmag - rmag)
mjds.append( 0.5 * (gmjd + rmjd) )
mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )
colorname.append("gmr")
if len(rtb)!=0 and len(itb)!=0:
mcolor.append(rmag - imag)
mjds.append( 0.5 * (rmjd + imjd) )
mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )
colorname.append("rmi")
ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],
names = ["mjd", "c", "ec", "cname"])
ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)
ctb = ctb.to_pandas()
return ctb
def get_ptf10iuv(colorplt = False):
"""
Kasliwal+12, Table 3, not corrected for extinction
"""
z = 0.0251485
ebv = 0.0371 # SFD
D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc
dis_mod = 5*np.log10(D / 10)
print ("adopt g band t_max estimated by myself")
t_max = 55357.387
tb = pd.read_csv('../data/otherSN/Kasliwal2012/PTF10iuv', sep='\t')
tb = tb.drop(columns=["Unnamed: 4"])
tb = tb.rename(columns={'Filter' : 'filter',
'MJD': 'mjd'})
tb = tb[~np.array([x[0]=='>' for x in tb['Mag'].values])]
tb['mag'] = np.array([float(x.split(" +or-")[0]) for x in tb['Mag'].values])
tb['emag'] = np.array([float(x.split(" +or-")[1]) for x in tb['Mag'].values])
tb = tb.drop(columns=["Mag"])
ixg = tb['filter'].values == "g"
ixr = tb['filter'].values == "r"
ixi = tb['filter'].values == "i"
ixz = tb['filter'].values == "z"
ixB = tb['filter'].values == "B"
tb['wave'] = np.zeros(len(tb))
tb['wave'].values[ixB] = 4359
tb['wave'].values[ixg] = 4814
tb['wave'].values[ixr] = 6422
tb['wave'].values[ixi] = 7883
tb['wave'].values[ixz] = 9670
tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)
tb['mag0_abs'] = tb['mag0'] - dis_mod
tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)
tb = tb.sort_values(by = "mjd")
if colorplt==False:
return tb
else:
tb = add_datecol(tb)
ix = np.in1d(tb["filter"].values, np.array(['g', 'r', 'i']))
tb = tb[ix]
tb = tb[tb.mjd > 55352.5]
tb = tb[tb.mjd < 55593.5]
dates = get_date_span(tb)
datesave = []
for i in range(len(dates)):
x = dates[i]
ix = tb["date"].values == x
tbsub = tb[ix]
if len(tbsub)!=0:
flts = tbsub['filter'].values
if "r" in flts and np.sum(np.unique(flts))!=1:
datesave.append(x)
datesave = np.array(datesave)
mcolor = []
mcolor_unc = []
mjds = []
colorname = []
for i in range(len(datesave)):
x = datesave[i]
ix = tb["date"].values == x
tbsub = tb[ix]
gtb = tbsub[tbsub["filter"].values=="g"]
rtb = tbsub[tbsub["filter"].values=="r"]
itb = tbsub[tbsub["filter"].values=="i"]
if len(gtb)!=0:
gmjds = gtb["mjd"].values
gmags = gtb["mag0"].values
gemags = gtb["emag"].values
gwtgs = 1/gemags**2
gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)
gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)
gemag = 1/ np.sqrt(np.sum(gwtgs))
if len(rtb)!=0:
rmjds = rtb["mjd"].values
rmags = rtb["mag0"].values
remags = rtb["emag"].values
rwtgs = 1/remags**2
rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)
rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)
remag = 1/ np.sqrt(np.sum(rwtgs))
if len(itb)!=0:
imjds = itb["mjd"].values
imags = itb["mag0"].values
iemags = itb["emag"].values
iwtgs = 1/iemags**2
imag = np.sum(imags * iwtgs) / np.sum(iwtgs)
imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)
iemag = 1/ np.sqrt(np.sum(iwtgs))
if len(gtb)!=0 and len(rtb)!=0:
mcolor.append(gmag - rmag)
mjds.append( 0.5 * (gmjd + rmjd) )
mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )
colorname.append("gmr")
if len(rtb)!=0 and len(itb)!=0:
mcolor.append(rmag - imag)
mjds.append( 0.5 * (rmjd + imjd) )
mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )
colorname.append("rmi")
ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],
names = ["mjd", "c", "ec", "cname"])
ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)
ctb = ctb.to_pandas()
return ctb
def get_sn2010X(colorplt = False):
"""
Kasliwal+10
"""
ebv = 0.1249 # SFD
z = 0.015
D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc
dis_mod = 5*np.log10(D / 10)
t_max = 55239.2
tb = pd.read_csv("../data/otherSN/Kasliwal2010/photometry.csv")
tb = tb.drop(columns=["source", "event", "instrument"])
tb = tb[tb.upperlimit=="F"]
tb = tb.drop(columns=["upperlimit"])
tb = tb.rename(columns={'magnitude' : 'mag',
'e_magnitude': 'emag',
'band': 'filter',
'time': 'mjd'})
ixr = tb['filter'].values == "r"
ixg = tb['filter'].values == "g"
ixi = tb['filter'].values == "i"
tb['wave'] = np.zeros(len(tb))
tb['wave'].values[ixg] = 4814
tb['wave'].values[ixr] = 6422
tb['wave'].values[ixi] = 7883
tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)
tb['mag0_abs'] = tb['mag0'] - dis_mod
tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)
if colorplt==False:
return tb
else:
tb = add_datecol(tb)
dates = get_date_span(tb)
datesave = []
for i in range(len(dates)):
x = dates[i]
ix = tb["date"].values == x
tbsub = tb[ix]
if len(tbsub)!=0:
flts = tbsub['filter'].values
if "r" in flts and np.sum(np.unique(flts))!=1:
datesave.append(x)
datesave = np.array(datesave)
mcolor = []
mcolor_unc = []
mjds = []
colorname = []
for i in range(len(datesave)):
x = datesave[i]
ix = tb["date"].values == x
tbsub = tb[ix]
gtb = tbsub[tbsub["filter"].values=="g"]
rtb = tbsub[tbsub["filter"].values=="r"]
itb = tbsub[tbsub["filter"].values=="i"]
if len(gtb)!=0:
gmjds = gtb["mjd"].values
gmags = gtb["mag"].values
gemags = gtb["emag"].values
gwtgs = 1/gemags**2
gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)
gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)
gemag = 1/ np.sqrt(np.sum(gwtgs))
else:
gmag = 0
if len(rtb)!=0:
rmjds = rtb["mjd"].values
rmags = rtb["mag"].values
remags = rtb["emag"].values
rwtgs = 1/remags**2
rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)
rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)
remag = 1/ np.sqrt(np.sum(rwtgs))
else:
rmag = 0
if len(itb)!=0:
imjds = itb["mjd"].values
imags = itb["mag"].values
iemags = itb["emag"].values
iwtgs = 1/iemags**2
imag = np.sum(imags * iwtgs) / np.sum(iwtgs)
imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)
iemag = 1/ np.sqrt(np.sum(iwtgs))
else:
imag = 0
if gmag and rmag:
mcolor.append(gmag - rmag)
mjds.append( 0.5 * (gmjd + rmjd) )
mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )
colorname.append("gmr")
if rmag and imag:
mcolor.append(rmag - imag)
mjds.append( 0.5 * (rmjd + imjd) )
mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )
colorname.append("rmi")
ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],
names = ["mjd", "c", "ec", "cname"])
ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)
ctb = ctb.to_pandas()
return ctb
def digital_latex(mjds, phases, rs, insts):
ix = np.array(["\\pm" in x for x in rs])
mjds = np.array(mjds)[ix]
phases = np.array(phases)[ix]
insts = np.array(insts)[ix]
rs = np.array(rs)[ix]
mags = np.array([float(x.split("\\pm")[0]) for x in rs])
emags = np.array([float(x.split("\\pm")[1][:5]) for x in rs])
ix1 = np.array([x.split(" ")[1]!="LCOGT" for x in insts])
ix2 = np.array([x.split(" ")[1]!="P60" for x in insts])
ix3 = emags<0.5
ix = ix1&ix2&ix3
return mjds[ix], phases[ix], mags[ix], emags[ix]
def get_sn2018kzr(colorplt = False):
"""
Owen R. Mcbrien 2019
"""
ebv = 0.113/3.1
z = 0.053
D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc
dis_mod = 5*np.log10(D / 10)
t_max = 58480.422+0.1
f = open('../data/otherSN/Mcbrien2019/table1.tex')
lines = f.readlines()
f.close()
lines = lines[:-4]
dates = [x.split("&")[0] for x in lines]
mjds = [float(x.split("&")[1]) for x in lines]
phases = [float(x.split("&")[2].replace('$', '').replace('\t', '')) for x in lines]
gs = [x.split("&")[3].replace('$', '') for x in lines]
rs = [x.split("&")[4].replace('$', '') for x in lines]
iis = [x.split("&")[5].replace('$', '') for x in lines]
zs = [x.split("&")[6].replace('$', '') for x in lines]
insts = [x.split("&")[7] for x in lines]
dtg = digital_latex(mjds, phases, gs, insts)
dtr = digital_latex(mjds, phases, rs, insts)
dti = digital_latex(mjds, phases, iis, insts)
filt = np.hstack([np.repeat("g", len(dtg[0])),
np.repeat("r", len(dtr[0])),
np.repeat("i", len(dti[0]))])
phase = np.hstack([dtg[1], dtr[1], dti[1]])
mag = np.hstack([dtg[2], dtr[2], dti[2]])
emag = np.hstack([dtg[3], dtr[3], dti[3]])
mjd = np.hstack([dtg[0], dtr[0], dti[0]])
tb = Table(data = [(mjd - t_max) / (1+z), mag, emag, filt],
names = ['tmax_rf', 'mag', 'emag', 'filter'])
ixr = tb['filter'] == "r"
ixg = tb['filter'] == "g"
ixi = tb['filter'] == "i"
tb['wave'] = np.zeros(len(tb))
tb['wave'][ixg] = 4814
tb['wave'][ixr] = 6422
tb['wave'][ixi] = 7883
tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'], 3.1*ebv, 3.1)
tb['mag0_abs'] = tb['mag0'] - dis_mod
tb = tb.to_pandas()
return tb
def _read_2019bkc_band():
tb = pd.read_csv('../data/otherSN/Chen2020/table2', sep='\t')
tb = tb.drop(columns = ['JD -', 'Telescope', 'Unnamed: 13', 'J', 'H'])
tb = tb.rename(columns={'#Date': 'Date'})
colpool = ['B', 'V', 'R', 'I', 'g', 'r', 'i']
for magcol in colpool:
tb1 = tb[tb[magcol].values!='cdots']
tb1.insert(2, "filter", np.repeat(magcol,len(tb1)))
mags= tb1[magcol]
xx = [float(x.split("(")[0]) for x in mags]
exx = [float(x.split("(")[1].split(")")[0])/100 for x in mags]
tb1.insert(2, "mag", xx)
tb1.insert(2, "emag", exx)
tb1 = tb1.drop(columns = colpool)
if magcol == "B":
tb1['wave'] = np.ones(len(tb1))* 4450
tb2 = deepcopy(tb1)
else:
if magcol == "r" or magcol == "R":
tb1['wave'] = np.ones(len(tb1))* 6422
elif magcol == "i" or magcol == "I":
tb1['wave'] = np.ones(len(tb1))* 7500
elif magcol == "g":
df = pd.DataFrame({"Date":['2019 Feb 28'],
"Phase":[-5.5],
"emag":[0.03],
"mag": [18.7],
"filter": ["g"]})
tb1 = tb1.append(df)
tb1['wave'] = np.ones(len(tb1))* 4810
elif magcol == "V":
tb1['wave'] = np.ones(len(tb1))* 5510
tb2 = pd.concat([tb2, tb1])
return tb2
def get_sn2019bkc(colorplt = False):
"""
Chen 2019, Figure 5
"""
ebv = 0.06 # SFD2011
z = 0.020
D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc
dis_mod = 5*np.log10(D / 10)
tb = _read_2019bkc_band()
tb['mag0'] = tb['mag'].values- extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)
tb['mag0_abs'] = tb['mag0'].values - dis_mod
tb['tmax_rf'] = tb['Phase'].values / (1+z)
if colorplt==False:
return tb
else:
#tb = add_datecol(tb)
tb['date'] = np.floor(tb['tmax_rf'].values)
datesave = np.array(tb['date'].values)
mcolor = []
mcolor_unc = []
mjds = []
colorname = []
for i in range(len(datesave)):
x = datesave[i]
ix = tb["date"].values == x
tbsub = tb[ix]
gtb = tbsub[tbsub["filter"].values=="g"]
rtb = tbsub[tbsub["filter"].values=="r"]
itb = tbsub[tbsub["filter"].values=="i"]
if len(gtb)!=0:
gmjds = gtb["tmax_rf"].values
gmags = gtb["mag0"].values
gemags = np.ones(len(gtb)) * 0.1
gwtgs = 1/gemags**2
gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)
gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)
gemag = 1/ np.sqrt(np.sum(gwtgs))
else:
gmag = 0
if len(rtb)!=0:
rmjds = rtb["tmax_rf"].values
rmags = rtb["mag0"].values
remags = np.ones(len(rtb)) * 0.1
rwtgs = 1/remags**2
rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)
rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)
remag = 1/ np.sqrt(np.sum(rwtgs))
else:
rmag = 0
if len(itb)!=0:
imjds = itb["tmax_rf"].values
imags = itb["mag0"].values
iemags = np.ones(len(itb)) * 0.1
iwtgs = 1/iemags**2
imag = np.sum(imags * iwtgs) / np.sum(iwtgs)
imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)
iemag = 1/ np.sqrt(np.sum(iwtgs))
else:
imag = 0
if gmag and rmag:
mcolor.append(gmag - rmag)
mjds.append( 0.5 * (gmjd + rmjd) )
mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )
colorname.append("gmr")
if rmag and imag:
mcolor.append(rmag - imag)
mjds.append( 0.5 * (rmjd + imjd) )
mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )
colorname.append("rmi")
ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],
names = ["tmax_rf", "c", "ec", "cname"])
ctb = ctb.to_pandas()
return ctb
def get_ptf09dav():
tb = asci.read('../data/otherSN/Sullivan2011/ptf09dav')
tb.rename_column('band', 'filter')
tb.rename_column('magnitude', 'mag')
tb.rename_column('e_magnitude', 'emag')
tb.remove_column("instrument")
tb = tb[tb['mag']>19.7]
ix = np.any([tb['filter']=='r', tb['filter']=='R'], axis=0)
tb = tb[ix]
tb['filter']=='r'
z = 0.0359
D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc
ebv = 0.044
dis_mod = 5*np.log10(D / 10)
t_max = 55054 # r band maximum
tb['wave'] = np.ones(len(tb))* 6422
tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'], 3.1*ebv, 3.1)
tb['mag0_abs'] = tb['mag0'] - dis_mod
tb['tmax_rf'] = (tb['time'] - t_max) / (1+z)
tb['emag'] = np.ones(len(tb))*0.1
tb.remove_row(2)
tb['mag0_abs'][1] = -15.4
tb = tb.to_pandas()
return tb
def get_sn2002bj(colorplt = False):
z = 0.012029
D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc
ebv = 0.0787
dis_mod = 5*np.log10(D / 10)
tb = asci.read('../data/otherSN/Poznanski2010/sn2002bj')
tb.remove_columns(['source', 'upperlimit', 'event', 'instrument'])
tb.rename_column('band', 'filter')
tb.rename_column('magnitude', 'mag')
tb.rename_column('e_magnitude', 'emag')
tb.rename_column('time', 'mjd')
ix = tb['filter']=='R'
tb["filter"][ix] = "r"
ix = np.any([tb['filter']=='r', tb['filter']=='B', tb['filter']=='I'], axis=0)
tb = tb[ix]
tb = tb[~tb['emag'].mask]
t_max = 52335.79-2 # r band maximum
ixr = tb["filter"] == "r"
ixB = tb["filter"] == "B"
ixI = tb["filter"] == "I"
tb['wave'] = np.ones(len(tb))
tb['wave'][ixr] = 6422
tb['wave'][ixB] = 4450
tb['wave'][ixI] = 8060
tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'], 3.1*ebv, 3.1)
tb['mag0_abs'] = tb['mag0'] - dis_mod
tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)
tb = tb.to_pandas()
if colorplt == False:
return tb
else:
tb = add_datecol(tb)
dates = get_date_span(tb)
datesave = []
for i in range(len(dates)):
x = dates[i]
ix = tb["date"].values == x
tbsub = tb[ix]
if len(tbsub)!=0:
flts = tbsub['filter'].values
if "r" in flts and np.sum(np.unique(flts))!=1:
datesave.append(x)
datesave = np.array(datesave)
mcolor = []
mcolor_unc = []
mjds = []
colorname = []
for i in range(len(datesave)):
x = datesave[i]
ix = tb["date"].values == x
tbsub = tb[ix]
gtb = tbsub[tbsub["filter"].values=="B"]
rtb = tbsub[tbsub["filter"].values=="r"]
itb = tbsub[tbsub["filter"].values=="I"]
if len(gtb)!=0:
gmjds = gtb["mjd"].values
gmags = gtb["mag0"].values
gemags = gtb["emag"].values
gwtgs = 1/gemags**2
gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)
gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)
gemag = 1/ np.sqrt(np.sum(gwtgs))
else:
gmag=0
if len(rtb)!=0:
rmjds = rtb["mjd"].values
rmags = rtb["mag0"].values
remags = rtb["emag"].values
rwtgs = 1/remags**2
rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)
rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)
remag = 1/ np.sqrt(np.sum(rwtgs))
else:
rmag = 0
if len(itb)!=0:
imjds = itb["mjd"].values
imags = itb["mag0"].values
iemags = itb["emag"].values
iwtgs = 1/iemags**2
imag = np.sum(imags * iwtgs) / np.sum(iwtgs)
imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)
iemag = 1/ np.sqrt(np.sum(iwtgs))
else:
imag = 0
if gmag and rmag:
mcolor.append(gmag - rmag)
mjds.append( 0.5 * (gmjd + rmjd) )
mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )
colorname.append("BmR")
if rmag and imag:
mcolor.append(rmag - imag)
mjds.append( 0.5 * (rmjd + imjd) )
mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )
colorname.append("RmI")
ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],
names = ["mjd", "c", "ec", "cname"])
ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)
ctb = ctb.to_pandas()
return ctb
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
# elevacion de 50mm en 1.5 s
# retorno en 2 s con el esquema de mov cicloidal
# detencion
Ts = [1.5, 2.0, 0.75]
# tiempo total0
SigmaT = 0
for i in range(len(Ts)):
SigmaT += Ts[i]
# tiempo Ti - intervalo
# SigmaT = 4.25
SigmaTi = np.linspace(0, SigmaT, SigmaT*100)
# velocidad angular de la leva: rpm
wleva = 1 / SigmaT
wleva_rpm = wleva * (60)
# rotacion de la leva para cada intervalo de movimiento del seguidor
Betas = []
for i in range(len(Ts)):
Betas.append(wleva * Ts[i])
#-----------mostrando resultados--------
print("Tiempos T:")
for i in range(len(Ts)):
print("T{} = {}".format(i, Ts[i]))
print("\nSuma total T = {}".format(SigmaT))
print("\nW de leva: {}rpm".format(wleva_rpm))
print("\nRotaciones de levas en intervalos Ti:")
for i in range(len(Betas)):
angle = Betas[i] * 360 # conversion de rpm a grados(angulo)
print("B{} = {} rpm -> {}°".format(i, Betas[i], angle))
#-----------graficando los intervalos
# primer intervalo
H1 = 50 #mm
DeltaR1 = (H1*SigmaTi) / Ts[0]
### segundo intervalo -> descenso cicloidal#############
H2 = 50 #mm
DeltaR2 = (H2 * (1-(SigmaTi/Ts[1])+((1/(2*np.pi))*
np.sin((2*np.pi*SigmaTi)/Ts[1])) )) + 50
########################################################
# tercer intervalo : dentecion
DeltaR3 = 0
def convertion(d1,d2,d3,s):
lim1 = 0
lim2 = 0
for i in range(len(d1)):
#print("d2[{}]: {}".format(i, d2[i]))
if SigmaTi[i] <= Ts[0]:
lim1 = i
#print(SigmaTi[i])
if SigmaTi[i]> Ts[0] and SigmaTi[i] <= (Ts[0]+Ts[1]):
lim2 = i
#print(SigmaTi[i])
#print("-----indice de Ts[0]: {}".format(lim1))
result = np.array(d1[0:lim1])
# agregando los 50 de avance en y que dimos
result2 = np.array(d2[lim1+50:lim2+51])
result3 = np.array([0 for i in range(0, len(SigmaTi)-lim2-1)])
res = np.concatenate((result, result2), axis = None)
res = np.concatenate((res, result3), axis = None)
return res
res = convertion(DeltaR1, DeltaR2, DeltaR3, SigmaTi)
for i in range(len(res)):
#print(res[i])
pass
#for i in SigmaTi:
# print(i)
#print(len(res))
#print(len(SigmaTi))
# graficacion
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
ax2 = fig.add_subplot(111)
line1, = ax.plot(SigmaTi, res, 'b-')
line2, = ax2.plot(SigmaTi, res, 'r-')
timing = 0
try:
while True:
for t in np.linspace(0, SigmaT, SigmaT*100):
line1.set_ydata(res[timing])
line2.set_ydata(res)
fig.canvas.draw()
timing+=1;
# reiniciar
timing = 0
except Exception as e:
print("some error:{}".format(e))
|
# Chap02-03/twitter_get_user_timeline_daterange.py
import sys
import json
from tweepy import Cursor
from twitter_client import get_twitter_client
from argparse import ArgumentParser
from datetime import datetime
from datetime import timezone
def get_parser():
parser = ArgumentParser("Clustering of followers")
parser.add_argument('--username')
parser.add_argument('--startdate')
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
print("Length of Arguments {}".format(args))
user = args.username
startdate = datetime.strptime(args.startdate, "%Y-%m-%d").date()
client = get_twitter_client()
fname = "user_timeline_{}.jsonl".format(user)
with open(fname, 'w') as f:
for page in Cursor(client.user_timeline, screen_name=user, count=200).pages(16):
for status in page:
created_at_time = datetime.strptime(status._json['created_at'], '%a %b %d %H:%M:%S %z %Y').replace(tzinfo=timezone.utc).astimezone(tz=None).date()
if created_at_time >= startdate:
f.write(json.dumps(status._json)+"\n")
|
"""Parse size notations."""
import re
import spacy
from traiter.const import FLOAT_RE
from traiter.const import INT_RE
from traiter.patterns.matcher_patterns import MatcherPatterns
from traiter.util import to_positive_float
from traiter.util import to_positive_int
from anoplura.pylib.const import COMMON_PATTERNS
from anoplura.pylib.const import REPLACE
from anoplura.pylib.const import TERMS
def list_to_re_choice(values):
"""Convert a list of values into a regex choice."""
values = sorted(values, key=lambda v: -len(v))
values = [re.escape(v) for v in values]
pattern = "|".join(values)
pattern = rf"({pattern})"
return pattern
UNITS_RE = [t["pattern"] for t in TERMS if t["label"] == "metric_length"]
UNITS_RE = "(?<![A-Za-z])" + list_to_re_choice(UNITS_RE) + r"\b"
BODY_PART_ENTITIES = """ body_part setae setae_abbrev seta seta_abbrev """.split()
LENGTH_ENTITIES = """ measurement mean sample """.split()
LENGTH_WORDS = """ length len """.split()
MAXIMUM = """ maximum max """.split()
WIDTH = """ width """.split()
DECODER = COMMON_PATTERNS | {
"bar": {"LOWER": {"IN": ["bar", "bars"]}},
"mean_word": {"LOWER": "mean"},
"punct": {"IS_PUNCT": True},
"n": {"LOWER": "n"},
"measurement": {"ENT_TYPE": "measurement"},
"mean": {"ENT_TYPE": "mean"},
"sample": {"ENT_TYPE": "sample"},
"total": {"LOWER": "total", "OP": "?"},
"part": {"ENT_TYPE": {"IN": BODY_PART_ENTITIES}},
"len": {"LOWER": {"IN": LENGTH_WORDS}},
"non_ent": {"ENT_TYPE": ""},
"max": {"LOWER": {"IN": MAXIMUM}},
"width": {"LOWER": {"IN": WIDTH}},
}
MEASUREMENT = MatcherPatterns(
"measurement",
decoder=DECODER,
patterns=[
"99.9 cm",
"99.9 - 99.9 cm",
],
)
MEAN = MatcherPatterns(
"mean",
decoder=DECODER,
patterns=["mean_word punct? 99.9 cm?"],
)
SAMPLE = MatcherPatterns(
"sample",
decoder=DECODER,
patterns=["n = 99"],
)
LENGTH = MatcherPatterns(
"length",
on_match="anoplura.length.v1",
decoder=DECODER,
patterns=[
"part len punct? measurement punct? mean? punct* sample? punct?",
(
"total? part len non_ent? non_ent? bar? punct* "
"measurement punct? mean? punct* sample? punct?"
),
],
)
MAX_WIDTH = MatcherPatterns(
"max_width",
on_match="anoplura.max_width.v1",
decoder=DECODER,
patterns=[
(
"max part width non_ent? non_ent? bar? punct* "
"measurement punct? mean? punct* sample? punct?"
),
],
)
@spacy.registry.misc(MAX_WIDTH.on_match)
def max_width(ent):
"""Enrich the match."""
measurement_parts(ent)
@spacy.registry.misc(LENGTH.on_match)
def length(ent):
"""Enrich a size match."""
measurement_parts(ent)
if ent.text.lower().find("total") > -1:
ent._.new_label = "total_length"
def measurement_parts(ent):
"""Fill in the measurement parts."""
data = {}
for token in ent:
label = token._.cached_label
if label in BODY_PART_ENTITIES:
data |= {label: REPLACE.get(token.lower_, token.lower_)}
if label == "measurement":
data |= measurement(token)
elif label == "mean":
data |= mean(token)
elif label == "sample":
data |= sample(token)
ent._.data = data
def measurement(token):
"""Enrich a measurement match."""
values = re.findall(FLOAT_RE, token.text)
values = [to_positive_float(v) for v in values]
data = {k: v for k, v in zip(["low", "high"], values)}
match = re.search(UNITS_RE, token.text)
units = match.group(0)
data["length_units"] = units
return data
def mean(token):
"""Convert the span into a single float."""
match = re.search(FLOAT_RE, token.text)
value = match.group(0)
match = re.search(UNITS_RE, token.text.lower())
units = match.group(0) if match else None
data = {"mean": to_positive_float(value)}
if units:
data["mean_units"] = units
return data
def sample(token):
"""Convert the span into a single integer."""
match = re.search(INT_RE, token.text)
value = match.group(0)
return {"n": to_positive_int(value)}
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: terra/wasm/v1beta1/query.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from cosmos.base.v1beta1 import coin_pb2 as cosmos_dot_base_dot_v1beta1_dot_coin__pb2
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from terra.wasm.v1beta1 import wasm_pb2 as terra_dot_wasm_dot_v1beta1_dot_wasm__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="terra/wasm/v1beta1/query.proto",
package="terra.wasm.v1beta1",
syntax="proto3",
serialized_options=b"Z(github.com/terra-money/core/x/wasm/types",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1eterra/wasm/v1beta1/query.proto\x12\x12terra.wasm.v1beta1\x1a\x14gogoproto/gogo.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1dterra/wasm/v1beta1/wasm.proto\x1a\x1e\x63osmos/base/v1beta1/coin.proto"1\n\x14QueryCodeInfoRequest\x12\x0f\n\x07\x63ode_id\x18\x01 \x01(\x04:\x08\xe8\xa0\x1f\x00\x88\xa0\x1f\x00"N\n\x15QueryCodeInfoResponse\x12\x35\n\tcode_info\x18\x01 \x01(\x0b\x32\x1c.terra.wasm.v1beta1.CodeInfoB\x04\xc8\xde\x1f\x00"1\n\x14QueryByteCodeRequest\x12\x0f\n\x07\x63ode_id\x18\x01 \x01(\x04:\x08\xe8\xa0\x1f\x00\x88\xa0\x1f\x00"*\n\x15QueryByteCodeResponse\x12\x11\n\tbyte_code\x18\x01 \x01(\x0c">\n\x18QueryContractInfoRequest\x12\x18\n\x10\x63ontract_address\x18\x01 \x01(\t:\x08\xe8\xa0\x1f\x00\x88\xa0\x1f\x00"Z\n\x19QueryContractInfoResponse\x12=\n\rcontract_info\x18\x01 \x01(\x0b\x32 .terra.wasm.v1beta1.ContractInfoB\x04\xc8\xde\x1f\x00"p\n\x19QueryContractStoreRequest\x12\x18\n\x10\x63ontract_address\x18\x01 \x01(\t\x12/\n\tquery_msg\x18\x02 \x01(\x0c\x42\x1c\xfa\xde\x1f\x18\x65ncoding/json.RawMessage:\x08\xe8\xa0\x1f\x00\x88\xa0\x1f\x00"P\n\x1aQueryContractStoreResponse\x12\x32\n\x0cquery_result\x18\x01 \x01(\x0c\x42\x1c\xfa\xde\x1f\x18\x65ncoding/json.RawMessage"G\n\x14QueryRawStoreRequest\x12\x18\n\x10\x63ontract_address\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\x0c:\x08\xe8\xa0\x1f\x00\x88\xa0\x1f\x00"%\n\x15QueryRawStoreResponse\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c"\x14\n\x12QueryParamsRequest"G\n\x13QueryParamsResponse\x12\x30\n\x06params\x18\x01 \x01(\x0b\x32\x1a.terra.wasm.v1beta1.ParamsB\x04\xc8\xde\x1f\x00\x32\xab\x07\n\x05Query\x12\x8c\x01\n\x08\x43odeInfo\x12(.terra.wasm.v1beta1.QueryCodeInfoRequest\x1a).terra.wasm.v1beta1.QueryCodeInfoResponse"+\x82\xd3\xe4\x93\x02%\x12#/terra/wasm/v1beta1/codes/{code_id}\x12\x96\x01\n\x08\x42yteCode\x12(.terra.wasm.v1beta1.QueryByteCodeRequest\x1a).terra.wasm.v1beta1.QueryByteCodeResponse"5\x82\xd3\xe4\x93\x02/\x12-/terra/wasm/v1beta1/codes/{code_id}/byte_code\x12\xa5\x01\n\x0c\x43ontractInfo\x12,.terra.wasm.v1beta1.QueryContractInfoRequest\x1a-.terra.wasm.v1beta1.QueryContractInfoResponse"8\x82\xd3\xe4\x93\x02\x32\x12\x30/terra/wasm/v1beta1/contracts/{contract_address}\x12\xad\x01\n\rContractStore\x12-.terra.wasm.v1beta1.QueryContractStoreRequest\x1a..terra.wasm.v1beta1.QueryContractStoreResponse"=\x82\xd3\xe4\x93\x02\x37\x12\x35/terra/wasm/v1beta1/contract/{contract_address}/store\x12\xa2\x01\n\x08RawStore\x12(.terra.wasm.v1beta1.QueryRawStoreRequest\x1a).terra.wasm.v1beta1.QueryRawStoreResponse"A\x82\xd3\xe4\x93\x02;\x12\x39/terra/wasm/v1beta1/contract/{contract_address}/store/raw\x12}\n\x06Params\x12&.terra.wasm.v1beta1.QueryParamsRequest\x1a\'.terra.wasm.v1beta1.QueryParamsResponse""\x82\xd3\xe4\x93\x02\x1c\x12\x1a/terra/wasm/v1beta1/paramsB*Z(github.com/terra-money/core/x/wasm/typesb\x06proto3',
dependencies=[
gogoproto_dot_gogo__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
terra_dot_wasm_dot_v1beta1_dot_wasm__pb2.DESCRIPTOR,
cosmos_dot_base_dot_v1beta1_dot_coin__pb2.DESCRIPTOR,
],
)
_QUERYCODEINFOREQUEST = _descriptor.Descriptor(
name="QueryCodeInfoRequest",
full_name="terra.wasm.v1beta1.QueryCodeInfoRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="code_id",
full_name="terra.wasm.v1beta1.QueryCodeInfoRequest.code_id",
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\350\240\037\000\210\240\037\000",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=169,
serialized_end=218,
)
_QUERYCODEINFORESPONSE = _descriptor.Descriptor(
name="QueryCodeInfoResponse",
full_name="terra.wasm.v1beta1.QueryCodeInfoResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="code_info",
full_name="terra.wasm.v1beta1.QueryCodeInfoResponse.code_info",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\310\336\037\000",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=220,
serialized_end=298,
)
_QUERYBYTECODEREQUEST = _descriptor.Descriptor(
name="QueryByteCodeRequest",
full_name="terra.wasm.v1beta1.QueryByteCodeRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="code_id",
full_name="terra.wasm.v1beta1.QueryByteCodeRequest.code_id",
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\350\240\037\000\210\240\037\000",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=300,
serialized_end=349,
)
_QUERYBYTECODERESPONSE = _descriptor.Descriptor(
name="QueryByteCodeResponse",
full_name="terra.wasm.v1beta1.QueryByteCodeResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="byte_code",
full_name="terra.wasm.v1beta1.QueryByteCodeResponse.byte_code",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=351,
serialized_end=393,
)
_QUERYCONTRACTINFOREQUEST = _descriptor.Descriptor(
name="QueryContractInfoRequest",
full_name="terra.wasm.v1beta1.QueryContractInfoRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="contract_address",
full_name="terra.wasm.v1beta1.QueryContractInfoRequest.contract_address",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\350\240\037\000\210\240\037\000",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=395,
serialized_end=457,
)
_QUERYCONTRACTINFORESPONSE = _descriptor.Descriptor(
name="QueryContractInfoResponse",
full_name="terra.wasm.v1beta1.QueryContractInfoResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="contract_info",
full_name="terra.wasm.v1beta1.QueryContractInfoResponse.contract_info",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\310\336\037\000",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=459,
serialized_end=549,
)
_QUERYCONTRACTSTOREREQUEST = _descriptor.Descriptor(
name="QueryContractStoreRequest",
full_name="terra.wasm.v1beta1.QueryContractStoreRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="contract_address",
full_name="terra.wasm.v1beta1.QueryContractStoreRequest.contract_address",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="query_msg",
full_name="terra.wasm.v1beta1.QueryContractStoreRequest.query_msg",
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\372\336\037\030encoding/json.RawMessage",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\350\240\037\000\210\240\037\000",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=551,
serialized_end=663,
)
_QUERYCONTRACTSTORERESPONSE = _descriptor.Descriptor(
name="QueryContractStoreResponse",
full_name="terra.wasm.v1beta1.QueryContractStoreResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="query_result",
full_name="terra.wasm.v1beta1.QueryContractStoreResponse.query_result",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\372\336\037\030encoding/json.RawMessage",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=665,
serialized_end=745,
)
_QUERYRAWSTOREREQUEST = _descriptor.Descriptor(
name="QueryRawStoreRequest",
full_name="terra.wasm.v1beta1.QueryRawStoreRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="contract_address",
full_name="terra.wasm.v1beta1.QueryRawStoreRequest.contract_address",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="key",
full_name="terra.wasm.v1beta1.QueryRawStoreRequest.key",
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\350\240\037\000\210\240\037\000",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=747,
serialized_end=818,
)
_QUERYRAWSTORERESPONSE = _descriptor.Descriptor(
name="QueryRawStoreResponse",
full_name="terra.wasm.v1beta1.QueryRawStoreResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="data",
full_name="terra.wasm.v1beta1.QueryRawStoreResponse.data",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=820,
serialized_end=857,
)
_QUERYPARAMSREQUEST = _descriptor.Descriptor(
name="QueryParamsRequest",
full_name="terra.wasm.v1beta1.QueryParamsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=859,
serialized_end=879,
)
_QUERYPARAMSRESPONSE = _descriptor.Descriptor(
name="QueryParamsResponse",
full_name="terra.wasm.v1beta1.QueryParamsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="params",
full_name="terra.wasm.v1beta1.QueryParamsResponse.params",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\310\336\037\000",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=881,
serialized_end=952,
)
_QUERYCODEINFORESPONSE.fields_by_name[
"code_info"
].message_type = terra_dot_wasm_dot_v1beta1_dot_wasm__pb2._CODEINFO
_QUERYCONTRACTINFORESPONSE.fields_by_name[
"contract_info"
].message_type = terra_dot_wasm_dot_v1beta1_dot_wasm__pb2._CONTRACTINFO
_QUERYPARAMSRESPONSE.fields_by_name[
"params"
].message_type = terra_dot_wasm_dot_v1beta1_dot_wasm__pb2._PARAMS
DESCRIPTOR.message_types_by_name["QueryCodeInfoRequest"] = _QUERYCODEINFOREQUEST
DESCRIPTOR.message_types_by_name["QueryCodeInfoResponse"] = _QUERYCODEINFORESPONSE
DESCRIPTOR.message_types_by_name["QueryByteCodeRequest"] = _QUERYBYTECODEREQUEST
DESCRIPTOR.message_types_by_name["QueryByteCodeResponse"] = _QUERYBYTECODERESPONSE
DESCRIPTOR.message_types_by_name["QueryContractInfoRequest"] = _QUERYCONTRACTINFOREQUEST
DESCRIPTOR.message_types_by_name[
"QueryContractInfoResponse"
] = _QUERYCONTRACTINFORESPONSE
DESCRIPTOR.message_types_by_name[
"QueryContractStoreRequest"
] = _QUERYCONTRACTSTOREREQUEST
DESCRIPTOR.message_types_by_name[
"QueryContractStoreResponse"
] = _QUERYCONTRACTSTORERESPONSE
DESCRIPTOR.message_types_by_name["QueryRawStoreRequest"] = _QUERYRAWSTOREREQUEST
DESCRIPTOR.message_types_by_name["QueryRawStoreResponse"] = _QUERYRAWSTORERESPONSE
DESCRIPTOR.message_types_by_name["QueryParamsRequest"] = _QUERYPARAMSREQUEST
DESCRIPTOR.message_types_by_name["QueryParamsResponse"] = _QUERYPARAMSRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
QueryCodeInfoRequest = _reflection.GeneratedProtocolMessageType(
"QueryCodeInfoRequest",
(_message.Message,),
{
"DESCRIPTOR": _QUERYCODEINFOREQUEST,
"__module__": "terra.wasm.v1beta1.query_pb2"
# @@protoc_insertion_point(class_scope:terra.wasm.v1beta1.QueryCodeInfoRequest)
},
)
_sym_db.RegisterMessage(QueryCodeInfoRequest)
QueryCodeInfoResponse = _reflection.GeneratedProtocolMessageType(
"QueryCodeInfoResponse",
(_message.Message,),
{
"DESCRIPTOR": _QUERYCODEINFORESPONSE,
"__module__": "terra.wasm.v1beta1.query_pb2"
# @@protoc_insertion_point(class_scope:terra.wasm.v1beta1.QueryCodeInfoResponse)
},
)
_sym_db.RegisterMessage(QueryCodeInfoResponse)
QueryByteCodeRequest = _reflection.GeneratedProtocolMessageType(
"QueryByteCodeRequest",
(_message.Message,),
{
"DESCRIPTOR": _QUERYBYTECODEREQUEST,
"__module__": "terra.wasm.v1beta1.query_pb2"
# @@protoc_insertion_point(class_scope:terra.wasm.v1beta1.QueryByteCodeRequest)
},
)
_sym_db.RegisterMessage(QueryByteCodeRequest)
QueryByteCodeResponse = _reflection.GeneratedProtocolMessageType(
"QueryByteCodeResponse",
(_message.Message,),
{
"DESCRIPTOR": _QUERYBYTECODERESPONSE,
"__module__": "terra.wasm.v1beta1.query_pb2"
# @@protoc_insertion_point(class_scope:terra.wasm.v1beta1.QueryByteCodeResponse)
},
)
_sym_db.RegisterMessage(QueryByteCodeResponse)
QueryContractInfoRequest = _reflection.GeneratedProtocolMessageType(
"QueryContractInfoRequest",
(_message.Message,),
{
"DESCRIPTOR": _QUERYCONTRACTINFOREQUEST,
"__module__": "terra.wasm.v1beta1.query_pb2"
# @@protoc_insertion_point(class_scope:terra.wasm.v1beta1.QueryContractInfoRequest)
},
)
_sym_db.RegisterMessage(QueryContractInfoRequest)
QueryContractInfoResponse = _reflection.GeneratedProtocolMessageType(
"QueryContractInfoResponse",
(_message.Message,),
{
"DESCRIPTOR": _QUERYCONTRACTINFORESPONSE,
"__module__": "terra.wasm.v1beta1.query_pb2"
# @@protoc_insertion_point(class_scope:terra.wasm.v1beta1.QueryContractInfoResponse)
},
)
_sym_db.RegisterMessage(QueryContractInfoResponse)
QueryContractStoreRequest = _reflection.GeneratedProtocolMessageType(
"QueryContractStoreRequest",
(_message.Message,),
{
"DESCRIPTOR": _QUERYCONTRACTSTOREREQUEST,
"__module__": "terra.wasm.v1beta1.query_pb2"
# @@protoc_insertion_point(class_scope:terra.wasm.v1beta1.QueryContractStoreRequest)
},
)
_sym_db.RegisterMessage(QueryContractStoreRequest)
QueryContractStoreResponse = _reflection.GeneratedProtocolMessageType(
"QueryContractStoreResponse",
(_message.Message,),
{
"DESCRIPTOR": _QUERYCONTRACTSTORERESPONSE,
"__module__": "terra.wasm.v1beta1.query_pb2"
# @@protoc_insertion_point(class_scope:terra.wasm.v1beta1.QueryContractStoreResponse)
},
)
_sym_db.RegisterMessage(QueryContractStoreResponse)
QueryRawStoreRequest = _reflection.GeneratedProtocolMessageType(
"QueryRawStoreRequest",
(_message.Message,),
{
"DESCRIPTOR": _QUERYRAWSTOREREQUEST,
"__module__": "terra.wasm.v1beta1.query_pb2"
# @@protoc_insertion_point(class_scope:terra.wasm.v1beta1.QueryRawStoreRequest)
},
)
_sym_db.RegisterMessage(QueryRawStoreRequest)
QueryRawStoreResponse = _reflection.GeneratedProtocolMessageType(
"QueryRawStoreResponse",
(_message.Message,),
{
"DESCRIPTOR": _QUERYRAWSTORERESPONSE,
"__module__": "terra.wasm.v1beta1.query_pb2"
# @@protoc_insertion_point(class_scope:terra.wasm.v1beta1.QueryRawStoreResponse)
},
)
_sym_db.RegisterMessage(QueryRawStoreResponse)
QueryParamsRequest = _reflection.GeneratedProtocolMessageType(
"QueryParamsRequest",
(_message.Message,),
{
"DESCRIPTOR": _QUERYPARAMSREQUEST,
"__module__": "terra.wasm.v1beta1.query_pb2"
# @@protoc_insertion_point(class_scope:terra.wasm.v1beta1.QueryParamsRequest)
},
)
_sym_db.RegisterMessage(QueryParamsRequest)
QueryParamsResponse = _reflection.GeneratedProtocolMessageType(
"QueryParamsResponse",
(_message.Message,),
{
"DESCRIPTOR": _QUERYPARAMSRESPONSE,
"__module__": "terra.wasm.v1beta1.query_pb2"
# @@protoc_insertion_point(class_scope:terra.wasm.v1beta1.QueryParamsResponse)
},
)
_sym_db.RegisterMessage(QueryParamsResponse)
DESCRIPTOR._options = None
_QUERYCODEINFOREQUEST._options = None
_QUERYCODEINFORESPONSE.fields_by_name["code_info"]._options = None
_QUERYBYTECODEREQUEST._options = None
_QUERYCONTRACTINFOREQUEST._options = None
_QUERYCONTRACTINFORESPONSE.fields_by_name["contract_info"]._options = None
_QUERYCONTRACTSTOREREQUEST.fields_by_name["query_msg"]._options = None
_QUERYCONTRACTSTOREREQUEST._options = None
_QUERYCONTRACTSTORERESPONSE.fields_by_name["query_result"]._options = None
_QUERYRAWSTOREREQUEST._options = None
_QUERYPARAMSRESPONSE.fields_by_name["params"]._options = None
_QUERY = _descriptor.ServiceDescriptor(
name="Query",
full_name="terra.wasm.v1beta1.Query",
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=955,
serialized_end=1894,
methods=[
_descriptor.MethodDescriptor(
name="CodeInfo",
full_name="terra.wasm.v1beta1.Query.CodeInfo",
index=0,
containing_service=None,
input_type=_QUERYCODEINFOREQUEST,
output_type=_QUERYCODEINFORESPONSE,
serialized_options=b"\202\323\344\223\002%\022#/terra/wasm/v1beta1/codes/{code_id}",
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name="ByteCode",
full_name="terra.wasm.v1beta1.Query.ByteCode",
index=1,
containing_service=None,
input_type=_QUERYBYTECODEREQUEST,
output_type=_QUERYBYTECODERESPONSE,
serialized_options=b"\202\323\344\223\002/\022-/terra/wasm/v1beta1/codes/{code_id}/byte_code",
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name="ContractInfo",
full_name="terra.wasm.v1beta1.Query.ContractInfo",
index=2,
containing_service=None,
input_type=_QUERYCONTRACTINFOREQUEST,
output_type=_QUERYCONTRACTINFORESPONSE,
serialized_options=b"\202\323\344\223\0022\0220/terra/wasm/v1beta1/contracts/{contract_address}",
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name="ContractStore",
full_name="terra.wasm.v1beta1.Query.ContractStore",
index=3,
containing_service=None,
input_type=_QUERYCONTRACTSTOREREQUEST,
output_type=_QUERYCONTRACTSTORERESPONSE,
serialized_options=b"\202\323\344\223\0027\0225/terra/wasm/v1beta1/contract/{contract_address}/store",
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name="RawStore",
full_name="terra.wasm.v1beta1.Query.RawStore",
index=4,
containing_service=None,
input_type=_QUERYRAWSTOREREQUEST,
output_type=_QUERYRAWSTORERESPONSE,
serialized_options=b"\202\323\344\223\002;\0229/terra/wasm/v1beta1/contract/{contract_address}/store/raw",
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name="Params",
full_name="terra.wasm.v1beta1.Query.Params",
index=5,
containing_service=None,
input_type=_QUERYPARAMSREQUEST,
output_type=_QUERYPARAMSRESPONSE,
serialized_options=b"\202\323\344\223\002\034\022\032/terra/wasm/v1beta1/params",
create_key=_descriptor._internal_create_key,
),
],
)
_sym_db.RegisterServiceDescriptor(_QUERY)
DESCRIPTOR.services_by_name["Query"] = _QUERY
# @@protoc_insertion_point(module_scope)
|
from django.db import models
# Create your models here.
class Sample_Data(models.Model):
name = models.CharField(max_length=100)
age = models.IntegerField(null=True)
|
from PyQt6.QtCore import *
from PyQt6.QtGui import *
class QXPixmap(QPixmap):
"""
extension of QPixmap
contains cached scaled versions
cached grayscaled
cached QIcon
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cache = {}
def scaled_cached(self, width: int, height: int, aspectRatioMode: Qt.AspectRatioMode = Qt.AspectRatioMode.KeepAspectRatio) -> 'QPixmap':
"""
get scaled version from cache or create.
"""
key = (width, height)
pixmap = self._cache.get(key, None)
if pixmap is None:
pixmap = self._cache[key] = QXPixmap( self.scaled(width, height, aspectRatioMode=aspectRatioMode, transformMode=Qt.TransformationMode.SmoothTransformation) )
return pixmap
def as_QIcon(self) -> QIcon:
icon = self._cache.get( QIcon, None )
if icon is None:
icon = self._cache[QIcon] = QIcon(self)
return icon
def grayscaled_cached(self) -> 'QXPixmap':
"""
get grayscaled version from cache or create.
"""
key = 'grayscaled'
pixmap = self._cache.get(key, None)
if pixmap is None:
pixmap = QXPixmap(self)
qp = QPainter(pixmap)
qp.setCompositionMode(QPainter.CompositionMode.CompositionMode_SourceIn)
qp.fillRect( pixmap.rect(), QColor(127,127,127,255) )
qp.end()
pixmap = self._cache[key] = pixmap
return pixmap
|
import os
import tarfile
from time import time
import glob
import gdown
folders = {
1999: '1ObZwt6cb97XogQoKu5vQvcnOzeQX4h_5',
2001: '1ishYRAffV0pdos8wufxQwC4y00b89sd4',
2002: '1_yoN_uqIcPv976jTJ3Aqyk1nuOPOaAvP',
2004: '1bnRoWv3jtXTJrd1waVsqiWd-1k24XtyB',
2005: '1g9Ap2i-yVqia1SQ0R4pAmdB_jvRBqogj',
2007: '1eScmenYHUFvPxL_fe1UmT_QiK8M_VZ6c',
2008: '1eScmenYHUFvPxL_fe1UmT_QiK8M_VZ6c',
2009: '1ov0hQ3uDlg63K-zIbisE58QYyYgMtzOJ',
2010: '1b6qydpGDjSuLcA5E_E2gQi2zQYmrzeNn',
2011: '1R1I03oyEhk8WaAWhnYNsfsR9gDmfVugL',
2013: '1se8x4MWy-dbK5UrPvNoRmRELAK8n0C6r',
2014: '1HyY0PAzzlTG2ciy_zKJ1MOgsNIeNvLWi',
2015: '1Ou2jROi2gwXkknQChtjNWn35dVktxIBC'
}
if not os.path.exists('dataset'):
os.mkdir('dataset')
# Download the tar files
for YEAR in folders.keys():
try:
print("-------------------------------------")
print("Downloading year ", YEAR)
output = f'dataset/{YEAR}.tar'
url = "https://drive.google.com/uc?id={id}".format(id=folders[YEAR])
gdown.download(url, output, quiet=False)
except:
print(f"Unable to download {YEAR}")
print("-------------------------------------")
# Extract the tar files
for f in glob.glob('dataset/*.tar'):
t1 = time()
with tarfile.open(f'{f}', 'r') as fh:
fh.extractall()
print(f"Extracted {f}.tar in {time()-t1} s")
|
import os
import time
import datetime
import numpy as np
import cv2
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
#import network
import segment_network
#import resnet_version
import segment_test_dataset
import segment_utils
import torch.autograd as autograd
from torch.autograd import Variable
import nibabel as nib
import csv
from scipy import ndimage
from skimage import measure
from medpy.io import load,save
import SimpleITK as sitk
from scipy.ndimage import label
import random
def seg_tester_baseline(opt, epoch, segmentor):
results_path = opt.sample_path + '/' + 'epoch%d' % (epoch)
if os.path.isdir(results_path) is False:
os.mkdir(results_path)
# Define the dataset
testset = segment_test_dataset.SegmentTestDataset(opt)
print('The overall number of images equals to %d' % len(testset))
# Define the dataloader
dataloader = DataLoader(testset, batch_size = 1, shuffle = False, num_workers = 0, pin_memory = True)
# ----------------------------------------
# Testing
# ----------------------------------------
# Testing loop
for batch_idx, (img, liver, file_name) in enumerate(dataloader):
img = img.cuda()
liver = liver.cuda()
file_name = file_name[0]
# Generator output
with torch.no_grad():
# sent to network
seg_input = img
seg_output = segmentor(seg_input)
seg_result = torch.sigmoid(seg_output)
seg_result[seg_result>opt.threshold]=1
seg_result[seg_result!=1]=0
seg_result = seg_result * liver
segment_utils.save_sample_png(sample_folder = results_path, sample_name = file_name, img=seg_result, pixel_max_cnt = 255)
print('---------------------' + file_name + ' has been finished----------------------')
# inpaint results to nii.gz label
out_folder = opt.sample_path + '/' + 'epoch%d_out_nii' % (epoch)
if not os.path.exists(out_folder):
os.makedirs(out_folder)
segment_utils.pngs_2_niigz(sample_folder = results_path, liver_nii_folder=os.path.join(opt.baseroot_test, 'Liver_nii') , out_folder=out_folder, d08=(opt.dataset==8))
# evaluate result
segment_utils.evaluate_nii(dir_out=out_folder, gt_nii_path=os.path.join(opt.baseroot_test, 'Gt_nii'), epoch_name=str(epoch))
def seg_tester_co_ttt(opt, checkpoint_name, lr, iters, generator_path, segmentor_path, reconstructor_path, threshold=0.5):
# reproduce
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
seed = 66
print("[ Using Seed : ", seed, " ]")
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ["PYTHONHASHSEED"] = str(seed)
# set path
results_path = opt.sample_path + checkpoint_name + '/' + 'out_png'
if os.path.isdir(results_path) is False:
os.mkdir(results_path)
# Define the dataset
trainset = segment_test_dataset.SegmentTestDataset(opt)
print('The overall number of images equals to %d' % len(trainset))
# Define the dataloader
dataloader = DataLoader(trainset, batch_size = 1, shuffle = False, num_workers = 0, pin_memory = True)
#-------------------------------------------------------------------------------------------------------------------------
# Build networks
generator = segment_utils.create_Unet(opt, n_type='U64')
reconstructor = segment_utils.create_Unet(opt, n_type='U64', in_channels=2)
segmentor = segment_utils.create_Unet(opt, n_type='U64')
# To device
generator = generator.cuda()
segmentor = segmentor.cuda()
reconstructor = reconstructor.cuda()
# load parameters
generator.load_state_dict(torch.load(generator_path))
segmentor.load_state_dict(torch.load(segmentor_path))
reconstructor.load_state_dict(torch.load(reconstructor_path))
# Optimizers
parameterg = list(generator.parameters())
optimizer_g = torch.optim.Adam(parameterg, lr = lr, betas=(0.9, 0.99))
parameters = list(segmentor.parameters())
optimizer_s = torch.optim.Adam(parameters, lr = lr, betas=(0.9, 0.99))
#-------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------
# Testing
# ----------------------------------------
# Testing loop
for batch_idx, (img, liver, lesion, file_name) in enumerate(dataloader):
img = img.cuda()
lesion = lesion.cuda()
liver = liver.cuda()
file_name = file_name[0]
# load parameters
generator.load_state_dict(torch.load(generator_path))
segmentor.load_state_dict(torch.load(segmentor_path))
reconstructor.load_state_dict(torch.load(reconstructor_path))
generator.eval()
segmentor.eval()
reconstructor.eval()
# get original images
with torch.no_grad():
# generator
gen_output = torch.sigmoid(generator(img))
# segmentor
seg_output = segmentor(img)
#-----------------------------sign--------------------------------------
sign_img = torch.sign(img - gen_output)*liver[:,0:1,:,:]
# reconstructor
re_output = torch.sigmoid(reconstructor(torch.cat((gen_output, torch.sigmoid(seg_output)), 1)))
seg_result = torch.sigmoid(seg_output)
seg_result[seg_result>threshold]=1
seg_result[seg_result!=1]=0
seg_result_ori = seg_result * liver
gen_output_ori = img * (1 - liver) + gen_output * liver
re_output_ori = img * (1 - liver) + re_output * liver
generator.eval()
segmentor.eval()
reconstructor.eval()
# update segmentor
segmentor.train()
for n_iter in range(iters):
optimizer_s.zero_grad()
# generator
gen_output = torch.sigmoid(generator(img))
# segmentor
seg_output_tumor = segmentor(img)
# reconstructor
re_output = torch.sigmoid(reconstructor(torch.cat((gen_output.detach(), torch.sigmoid(seg_output_tumor)), 1)))
# refine reoutput
re_output_liver = img * (1 - liver) + re_output * liver
# calculate reconstruction loss
loss_criterion_r = torch.nn.L1Loss()
loss_r = loss_criterion_r(re_output_liver, img)
loss_total_s = loss_r
loss_total_s.backward()
optimizer_s.step()
print("\r[Batch %d/%d] [R Loss: %.5f] " % ((batch_idx+1), len(dataloader), loss_total_s.item()))
# start test
generator.eval()
segmentor.eval()
reconstructor.eval()
with torch.no_grad():
# generator
gen_output = torch.sigmoid(generator(img))
# segmentor
seg_output = segmentor(img)
# reconstructor
re_output = torch.sigmoid(reconstructor(torch.cat((gen_output, torch.sigmoid(seg_output)), 1)))
seg_result = torch.sigmoid(seg_output)
seg_result[seg_result>threshold]=1
seg_result[seg_result!=1]=0
seg_result = seg_result * liver
re_output = img * (1 - liver) + re_output * liver
segment_utils.save_sample_png(sample_folder = results_path, sample_name = file_name, img=seg_result, pixel_max_cnt = 255)
print('---------------------' + file_name + ' has been finished----------------------')
# inpaint results to nii.gz label
out_folder = opt.sample_path + checkpoint_name + '/' + 'out_nii'
if not os.path.exists(out_folder):
os.makedirs(out_folder)
segment_utils.pngs_2_niigz(sample_folder = results_path, liver_nii_folder=os.path.join(opt.baseroot_test, 'Liver_nii') , out_folder=out_folder, d08=(opt.dataset==8))
# evaluate result
segment_utils.evaluate_nii(dir_out=out_folder, gt_nii_path=os.path.join(opt.baseroot_test, 'Gt_nii'), epoch_name='12')
|
import subprocess
from datetime import datetime
import time
import linecache
while True:
host = []
with open("ip_server.txt") as f:
try:
for line in f:
line = line.rstrip("\n")
line = line.split(",")
host.append(line)
break #para que solo lea la primera linea
except ValueError, IOError:
fecha = datetime.now().isoformat()
z = open("Error.txt","a")
z.write("Revisar el Archivo ip_server.txt,{} \n".format(fecha))
z.close()
#probando conexion a internet
c = subprocess.call("ping -c 1 8.8.8.8 > /dev/null 2>&1&",shell=True)
if (c == 0):pass
elif (c!=0):
fecha = datetime.now().isoformat()
z = open("Error.txt","a")
z.write("No hay conexion a internet,{} \n",format(fecha))
z.close()
linecache.checkcache("ip_server.txt")
activar = linecache.getline("ip_server.txt",2)
activar = activar.split(",")
activar = activar[1].rstrip("\n")
#flag = False
if activar == "si": flag = True
elif activar == "no":flag = False
print activar
linecache.clearcache()
c = 0
while flag:
#intenta conectarse durante 5 minutos luego se cierra
c += 1
a = subprocess.check_output(["vtund",host[0][0],host[0][1]],shell=False)
if (c == 30): break #si se cumplen 5 min y el tunel no funciona
if (a == 0): break #si se abre correctamente el tunel
elif (a!=0): continue #tratando de establecer el tunel
time.sleep(10)
time.sleep(10)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .... import operands
from ..utils import infer_dtype
from ...core import Tensor
from .core import TensorUnaryOp
from .utils import arithmetic_operand
@arithmetic_operand(sparse_mode='unary')
class TensorNanToNum(operands.NanToNum, TensorUnaryOp):
pass
@infer_dtype(np.nan_to_num)
def nan_to_num(x, copy=True, **kwargs):
"""
Replace nan with zero and inf with large finite numbers.
If `x` is inexact, NaN is replaced by zero, and infinity and -infinity
replaced by the respectively largest and most negative finite floating
point values representable by ``x.dtype``.
For complex dtypes, the above is applied to each of the real and
imaginary components of `x` separately.
If `x` is not inexact, then no replacements are made.
Parameters
----------
x : array_like
Input data.
copy : bool, optional
Whether to create a copy of `x` (True) or to replace values
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
Returns
-------
out : Tensor
`x`, with the non-finite values replaced. If `copy` is False, this may
be `x` itself.
See Also
--------
isinf : Shows which elements are positive or negative infinity.
isneginf : Shows which elements are negative infinity.
isposinf : Shows which elements are positive infinity.
isnan : Shows which elements are Not a Number (NaN).
isfinite : Shows which elements are finite (not NaN, not infinity)
Notes
-----
Mars uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> import mars.tensor as mt
>>> x = mt.array([mt.inf, -mt.inf, mt.nan, -128, 128])
>>> mt.nan_to_num(x).execute()
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000,
-1.28000000e+002, 1.28000000e+002])
>>> y = mt.array([complex(mt.inf, mt.nan), mt.nan, complex(mt.nan, mt.inf)])
>>> mt.nan_to_num(y).execute()
array([ 1.79769313e+308 +0.00000000e+000j,
0.00000000e+000 +0.00000000e+000j,
0.00000000e+000 +1.79769313e+308j])
"""
op = TensorNanToNum(**kwargs)
ret = op(x)
if copy:
return ret
# set back, make sure x is a Tensor
if not isinstance(x, Tensor):
raise ValueError('`x` must be a Tensor, got {0} instead'.format(type(x)))
x.data = ret.data
return x
|
km = int(input('Qual a distância da viagem(km): '))
'''if km <= 200:
valor = km * 0.50
else:
valor = km * 0.45'''
valor = km * 0.50 if km <= 200 else km * 0.45
print(f'O valor da viagem é R${valor:.2f}') |
from django.conf.urls.defaults import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from test_app.views import Edit
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'test_project.views.home', name='home'),
# url(r'^test_project/', include('test_project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^edit/(?P<author_id>[0-9]+)$', Edit.as_view(), name="edit"),
)
urlpatterns += staticfiles_urlpatterns() |
#used as a configuration file
def fan:
pin = 4
dependencies = [“python3”,”gpiozero”]
|
#!/usr/bin/python3
'''
Test harness: this is just a wrapper for qualiatas_test,
adapted for the "other" (or non-Qualitas) suites.
'''
import sys
import os
import qualitas_test as qt
# This is where I put the other suites:
OTHER_ROOT = '/media/passport/bigApps/corpus-python/jese-2018-versions/'
# The names of the non-qualitas suites:
OTHER_PROGS = ['linchen', 'destefanis', 'anaconda3',
'getpython3', 'newer-qualitas']
def root_for(suite):
'''Return the root directory for a given suite'''
if suite == 'anaconda3': # Go down a layer to get SciPy sub-packages
return os.path.join(OTHER_ROOT, suite, 'pkgs')
if suite in OTHER_PROGS:
return os.path.join(OTHER_ROOT, suite)
print_usage('suite "{}"'.format(suite))
# Like qt.print_latex_table, but sort in descending order of size
# and ignore sub-packages with no files.
# Latex 'longtable' package splits tables over multiple pages.
def print_long_latex_table(pyVersions, appnames, testroot, percs):
# First column of table should be the application names:
row_data = [[t.replace('_', '\_')] for t in appnames]
# Data columns are the percentages for each version:
for i, plist in enumerate(percs):
row_data[i].extend([qt.print_perc(p) for p in plist])
# Last column should be totals for each application:
for i,testdir in enumerate(appnames):
testpath = os.path.join(testroot,testdir)
row_data[i].append('%5d' % qt.qualitas.count_python_files(testpath))
# Now print the table, row-by-row:
print('\n\n\n')
print('\\begin{longtable}{l*{%d}{c}c}' % len(pyVersions))
print(qt.latex_table_row(['Application'] +
[p for p in pyVersions] + ['Files'], 'bf', True))
for row in sorted(row_data, key=lambda r:int(r[-1]), reverse=True):
if int(row[-1]) > 0:
qt.safe_print(qt.latex_table_row(row))
print('\\hline')
print('\\end{longtable}')
def print_usage(msg):
print('Unknown argument:', msg)
print('Usage:', sys.argv[0], '<suite> <pyver> <pyver> ...')
sys.exit(1)
# On the command line you can specify
# (1) the suite
# (2) the apps from the suite (optional)
# (3) the Python versions (optional)
if __name__ == '__main__':
suite = sys.argv[1]
suite_root = root_for(suite)
versions, appnames = qt.get_pyvers_appnames(sys.argv[2:], suite_root)
percs = qt.test_all(versions, appnames, suite_root)
if suite == 'anaconda3':
print_long_latex_table(versions, appnames, suite_root, percs)
else:
qt.print_latex_table(versions, appnames, suite_root, percs)
|
# -*- coding: utf-8 -*-
"""Wrapper for actually running ASCAT"""
from snakemake import shell
__author__ = "Manuel Holtgrewe <[email protected]>"
shell.executable("/bin/bash")
shell(
r"""
set -x
export TMPDIR=$(mktemp -d)
trap "rm -rf $TMPDIR" EXIT
# Also pipe stderr to log file
if [[ -n "{snakemake.log}" ]]; then
if [[ "$(set +e; tty; set -e)" != "" ]]; then
rm -f "{snakemake.log}" && mkdir -p $(dirname {snakemake.log})
exec 2> >(tee -a "{snakemake.log}" >&2)
else
rm -f "{snakemake.log}" && mkdir -p $(dirname {snakemake.log})
echo "No tty, logging disabled" >"{snakemake.log}"
fi
fi
# -------------------------------------------------------------------------------------------------
# Build input files
#
grep -v '^.chrs' {snakemake.input.baf_tumor} > $TMPDIR/baf_tumor.raw.txt
grep -v '^.chrs' {snakemake.input.cnv_tumor} > $TMPDIR/cnv_tumor.raw.txt
grep -v '^.chrs' {snakemake.input.baf_normal} > $TMPDIR/baf_normal.raw.txt
grep -v '^.chrs' {snakemake.input.cnv_normal} > $TMPDIR/cnv_normal.raw.txt
comm -12 \
<( \
comm -12 \
<(cut -f 1 $TMPDIR/baf_tumor.raw.txt | sort) \
<(cut -f 1 $TMPDIR/cnv_tumor.raw.txt | sort) \
) \
<( \
comm -12 \
<(cut -f 1 $TMPDIR/baf_normal.raw.txt | sort) \
<(cut -f 1 $TMPDIR/cnv_normal.raw.txt | sort) \
) \
| grep -v '^$' \
> $TMPDIR/ids.txt
for name in baf_tumor baf_normal cnv_tumor cnv_normal; do
echo -e "\tchrs\tpos\t{snakemake.wildcards.tumor_library_name}" > $TMPDIR/$name.txt
grep -w -F -f $TMPDIR/ids.txt $TMPDIR/$name.raw.txt >> $TMPDIR/$name.txt
done
# -------------------------------------------------------------------------------------------------
# Write ASCAT script and call R
#
cat <<"EOF" > $TMPDIR/run_ascat.R
library(ASCAT)
GC_CONTENT_FILE = "/fast/users/mholtgr/Data/ASCAT/GC_AffySNP6_102015.txt";
ascat.bc = ascat.loadData(
"cnv_tumor.txt",
"baf_tumor.txt",
"cnv_normal.txt",
"baf_normal.txt");
#ascat.bc = ascat.GCcorrect(ascat.bc, GC_CONTENT_FILE)
ascat.plotRawData(ascat.bc)
ascat.plotRawData(ascat.bc)
ascat.bc = ascat.aspcf(ascat.bc)
ascat.plotSegmentedData(ascat.bc)
ascat.output = ascat.runAscat(ascat.bc)
# Write out results.
write.table(ascat.output$nA, "{snakemake.wildcards.tumor_library_name}_na.txt");
write.table(ascat.output$nB, "{snakemake.wildcards.tumor_library_name}_nb.txt");
write.table(ascat.output$goodnessOfFit, "{snakemake.wildcards.tumor_library_name}_goodness_of_fit.txt");
write.table(ascat.output$ploidy, "{snakemake.wildcards.tumor_library_name}_ploidy.txt");
write.table(ascat.output$psi, "{snakemake.wildcards.tumor_library_name}_psi.txt");
write.table(ascat.output$segments, "{snakemake.wildcards.tumor_library_name}_segments.txt");
write.table(ascat.output$segments_raw, "{snakemake.wildcards.tumor_library_name}_segments_raw.txt");
write.table(ascat.output$aberrantcellfraction, "{snakemake.wildcards.tumor_library_name}_aberrantcellfraction.txt");
EOF
out_dir=$(readlink -f $(dirname {snakemake.output.done}))
cd $TMPDIR
R --vanilla < run_ascat.R
# -------------------------------------------------------------------------------------------------
# Move out output
#
for suffix in .ASCATprofile.png .ASPCF.png .germline.png .rawprofile.png .sunrise.png .tumour.png \
_na.txt _nb.txt _goodness_of_fit.txt _ploidy.txt _segments.txt _segments_raw.txt \
_psi.txt _aberrantcellfraction.txt; do
if [ -f "{snakemake.wildcards.tumor_library_name}$suffix" ]; then
cp \
{snakemake.wildcards.tumor_library_name}$suffix \
$out_dir
pushd $out_dir && \
md5sum {snakemake.wildcards.tumor_library_name}$suffix \
> {snakemake.wildcards.tumor_library_name}$suffix.md5 && \
popd
else
echo "WARNING {snakemake.wildcards.tumor_library_name}$suffix -- File does not exist"
fi
done
"""
)
|
from collections import defaultdict
dice = defaultdict(int)
for d1 in range(1, 4):
for d2 in range(1, 4):
for d3 in range(1, 4):
dice[d1 + d2 + d3] += 1
player = 0
p1 = 7
p2 = 3
s1 = 0
s2 = 0
universes = defaultdict(int)
universes[(p1, p2, s1, s2)] = 1
q = True
while q:
q = False
next = defaultdict(int)
for uni in universes:
p1, p2, s1, s2 = uni
if max(s1, s2) < 21:
q = True
for roll in dice:
p1, p2, s1, s2 = uni
if player == 0:
p1 = 1 + (p1 + roll - 1) % 10
s1 += p1
else:
p2 = 1 + (p2 + roll - 1) % 10
s2 += p2
next[(p1, p2, s1, s2)] += dice[roll] * universes[uni]
else:
next[(p1, p2, s1, s2)] += universes[uni]
universes = next
player = (player + 1) % 2
wins1 = 0
wins2 = 0
for uni in universes:
p1, p2, s1, s2 = uni
if s1 >= 21: wins1 += universes[uni]
if s2 >= 21: wins2 += universes[uni]
print(max(wins1, wins2))
|
example = """16,1,2,0,4,2,7,1,2,14"""
import numpy as np
def preprocess (data :str)-> list:
return [int(x) for x in data.split(',')]
def min_fuel(crabs: list) -> int:
crabs = sorted(crabs)
med = crabs[len(crabs)//2]
return sum(abs(x-med) for x in crabs)
crabs = preprocess(example)
min_fuel(crabs=crabs) == 37
# Part-2
def min_fuel_2(crabs: list) -> int:
scores = list()
# The minimum fuel location seems to be shifting from the median to the mean as distance is more costly now (squared)
med = np.median(crabs)
print(f"The Median: {med}")
avg = np.mean(crabs)
print(f"The Average: {avg}")
# Want to scan ~20% of the max-min range around the rounded mean value (part-2 data works with a 1% scan smallest)
pct = 0.20
space = (max(crabs)-min(crabs))*(pct/2)
space = round(space)
for x in range(int(avg)-space, int(avg)+space):
s = 0
for crab in crabs:
s += abs(x-crab)*(abs(x-crab)+1)/2
scores.append(int(s))
print("The location for the least fuel is {}".format(int(avg)-space + np.argmin(scores)) )
return min(scores)
assert min_fuel_2(crabs=crabs) == 168
if __name__ == '__main__':
with open('d7.txt') as f:
content = f.read()
CRABS = preprocess(content)
print("The fuel consumption for the 1st part: {} \n".format(min_fuel(crabs= CRABS)))
print("The fuel consumption for the 2nd part: {}".format(min_fuel_2(crabs= CRABS)))
|
import unittest
from checkov.terraform.checks.resource.aws.ECRPolicy import check
from checkov.terraform.models.enums import CheckResult
class TestECRPolicy(unittest.TestCase):
def test_failure(self):
resource_conf = {'repository': ['public_repo'], 'policy': [
'{\n "Version": "2008-10-17",\n "Statement": [\n {\n "Sid": "new policy",'
'\n "Effect": "Allow",\n "Principal": "*",\n "Action": [\n '
' "ecr:GetDownloadUrlForLayer",\n "ecr:BatchGetImage",\n '
'"ecr:BatchCheckLayerAvailability",\n "ecr:PutImage",\n '
'"ecr:InitiateLayerUpload",\n "ecr:UploadLayerPart",\n '
'"ecr:CompleteLayerUpload",\n "ecr:DescribeRepositories",\n '
'"ecr:GetRepositoryPolicy",\n "ecr:ListImages",\n "ecr:DeleteRepository",'
'\n "ecr:BatchDeleteImage",\n "ecr:SetRepositoryPolicy",\n '
'"ecr:DeleteRepositoryPolicy"\n ]\n }\n ]\n}']}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
resource_conf = {'repository': ['private_repo'], 'policy': [
'{\n "Version": "2008-10-17",\n "Statement": [\n {\n "Sid": "new policy",'
'\n "Effect": "Allow",\n "Principal": {\n "AWS": [\n '
' "arn:aws:iam::123456789012:user/pull-user-1",\n '
'"arn:aws:iam::123456789012:user/pull-user-2"\n ]\n },\n "Action": ['
'\n "ecr:GetDownloadUrlForLayer",\n "ecr:BatchGetImage",\n '
'"ecr:BatchCheckLayerAvailability",\n "ecr:PutImage",\n '
'"ecr:InitiateLayerUpload",\n "ecr:UploadLayerPart",\n '
'"ecr:CompleteLayerUpload",\n "ecr:DescribeRepositories",\n '
'"ecr:GetRepositoryPolicy",\n "ecr:ListImages",\n "ecr:DeleteRepository",'
'\n "ecr:BatchDeleteImage",\n "ecr:SetRepositoryPolicy",\n '
'"ecr:DeleteRepositoryPolicy"\n ]\n }\n ]\n}']}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
|
from pyspark import keyword_only
from pyspark.ml.param.shared import HasInputCols
from pyspark.ml.util import DefaultParamsReadable, DefaultParamsWritable
from pyspark.sql import DataFrame
from pyspark.ml import Transformer
from typing import List
class DropColumns(Transformer,
HasInputCols,
DefaultParamsReadable,
DefaultParamsWritable
):
"""Transformer that drops specified columns."""
@keyword_only
def __init__(self, inputCols: List[str] = None):
super(DropColumns, self).__init__()
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCols: List[str] = None):
kwargs = self._input_kwargs
return self._set(**kwargs)
def _transform(self, dataset: DataFrame):
return dataset.drop(*self.getInputCols())
|
#!/usr/bin/python3
# mari von steinkirch @2013
# steinkirch at gmail
def convert_from_decimal_larger_bases(number, base):
''' convert any decimal number to a number in a base up to 20'''
strings = "0123456789ABCDEFGHIJ"
result = ""
while number > 0:
digit = number%base
result = strings[digit] + result
number = number//base
return result
def test_convert_from_decimal_larger_bases():
number, base = 31, 16
assert(convert_from_decimal_larger_bases(number, base) == '1F')
print('Tests passed!')
if __name__ == '__main__':
test_convert_from_decimal_larger_bases()
|
"""Automatically serve ML model as a REST API"""
__version__ = "0.1.1"
from typing import List, Dict, Union, Callable
import pandas as pd
import sklearn
from flask import Flask
from flask_restful import Api
from scikit_rest.resource import Prediction
from scikit_rest.types import numpy_dict
from scikit_rest.validator import validate_col_types
def infer(input_df: pd.DataFrame) -> [List[str], Dict[str, Union[List, type]]]:
"""
Automatically infer the column list and column types from the input DataFrame
Args:
input_df: DataFrame, where the column list and column types will be inferred from.
Returns:
col_list: List of Column names, where the order of the values will dictate the order within the pandas DataFrame
col_types: Dictionary of Column Names and the type of the variable, used for input Validation. If the values
of the dictionary is instead a list, We assume that any input for the variable can only be any of
the ones listed within the list
"""
df = input_df.copy().infer_objects()
col_list = df.columns.tolist()
col_types = {}
for key, value in df.dtypes.to_dict().items():
col_types[key] = numpy_dict[value.type]
return col_list, col_types
def serve(
col_list: List[str],
col_types: Dict[str, Union[List, type]],
transform_fn: Callable,
predict_fn: Union[Callable, sklearn.base.BaseEstimator],
port: int = 1234,
is_nullable: bool = False,
name: str = "model",
):
"""
Setting up ML model as a REST API server
Args:
col_list: List of Column names, where the order of the values will dictate the order within the pandas DataFrame
col_types: Dictionary of Column Names and the type of the variable, used for input Validation. If the values
of the dictionary is instead a list, We assume that any input for the variable can only be any of
the ones listed within the list
transform_fn: Function which convert the input dataframe into test dataframe,
where we can call model.predict upon to get the final result
predict_fn: Function which convert the test dataframe into result. If a ML model instance is passed in, we will
instead try to call model.predict_proba / model.predict to get the result
port: Port Number where the REST API should be served upon
is_nullable: Whether input API can be nullable
name: Name of the program
"""
validate_col_types(col_types)
app = Flask(name)
api = Api(app)
api.add_resource(
Prediction,
"/",
resource_class_kwargs={
"col_list": col_list,
"col_types": col_types,
"transform_fn": transform_fn,
"predict_fn": predict_fn,
"is_nullable": is_nullable,
},
)
app.config["BUNDLE_ERRORS"] = True
app.run(host="0.0.0.0", port=port)
|
"""
The Metric class handles running metrics throughout a simulation.
To define a Metric, it needs only a name and an update function. The update function will access any of:
environment.action_history
meta_vars
environment.state
"""
class Metric(object):
def __init__(self, update_func):
# update_func should be a callable that takes a dict of parameters
self.update_func = update_func
self.vals = []
def update(self, update_args):
"""
:param update_args: Dict of parameters. Schema:
action_history: List of dicts corresponding to actions taken by agents in the environment
environment_state: Current state of the environment
meta_vars (Optional): Any meta variables global to the simulation
:return:
"""
self.vals.append(self.update_func(update_args))
|
# Given a string s and a string array dictionary,
# return the longest string in the dictionary that can be formed by deleting some of the given string characters.
# If there is more than one possible result,
# return the longest word with the smallest lexicographical order.
# If there is no possible result, return the empty string.
def findLongestWord(s, dictionary):
dictionary.sort(key=lambda x: (len(x),x))
for word in dictionary:
if isSubSeq(word,s):
return word
return ''
def isSubSeq(word,s):
i = 0
for char in s:
if char == word[i]:
i += 1
if i == len(word):
return True
return False
# s = "abpcplea"
# dictionary = ["a","b","c"]
s = "abpcplea"
dictionary = ["ale","apple","monkey","plea"]
print(findLongestWord(s, dictionary))
|
from threading import Lock
from lib.caching.fingerprint import is_similar
from lib.caching.policy import Policy
class TranscriptCache:
def __init__(self, policy: Policy):
self.policy = policy
self.cache = dict()
self.cache_lk = Lock()
def add(self, key: str, value: object) -> None:
if key is not None:
self.cache[key] = value
# evicted = self.policy.evict(key)
# if evicted is not None:
# return self.cache.pop(evicted)
# return {}
def get(self, fp_key) -> object:
resolution = {}
# cached = self.policy.resolve(key)
# if cached:
# resolution = self.cache[key]
self.cache_lk.acquire()
cache_instance = self.cache.copy()
self.cache_lk.release()
for key in cache_instance:
match = is_similar(key, fp_key)
if match:
return cache_instance[key]
return resolution
|
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto(allow_soft_placement=True, log_device_placement=True)
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
import tensorflow as tf
import tensorlayer as tl
import time
import os
import numpy as np
from dataset import make_val_dataset
from model import get_model
net = get_model()
val_ds = make_val_dataset()
model = 'weights/net_weights_e8.h5' # modify this to load trained model
net.load_weights(model)
net.eval()
model = os.path.basename(model)
step_time = time.time()
val_loss, val_acc, vn_iter = 0, 0, 0
for img, aud, out in val_ds:
_logits = net([img, aud])
val_loss += tl.cost.cross_entropy(_logits, out, name='eval_loss')
val_acc += np.mean(np.equal(np.argmax(_logits, 1), out))
vn_iter += 1
print("[model {}] took: {:3f}, vn_iter: {}, val_loss: {:5f}, val_acc: {:5f}"
.format(model, time.time() - step_time, vn_iter, val_loss / vn_iter, val_acc / vn_iter))
|
# Generated by Django 3.1 on 2021-01-21 11:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pathtests', '0003_auto_20210121_1747'),
('analysis', '0015_auto_20210121_2201'),
('annotation', '0016_one_off_move_ontology'),
]
operations = [
migrations.AlterUniqueTogether(
name='hposynonym',
unique_together=None,
),
migrations.RemoveField(
model_name='hposynonym',
name='hpo',
),
migrations.RemoveField(
model_name='humanphenotypeontology',
name='children',
),
migrations.AlterUniqueTogether(
name='mimgene',
unique_together=None,
),
migrations.RemoveField(
model_name='mimgene',
name='gene',
),
migrations.RemoveField(
model_name='mimgene',
name='mim_morbid',
),
migrations.RemoveField(
model_name='mimmorbidalias',
name='mim_morbid',
),
migrations.AlterUniqueTogether(
name='phenotypemim',
unique_together=None,
),
migrations.RemoveField(
model_name='phenotypemim',
name='hpo',
),
migrations.RemoveField(
model_name='phenotypemim',
name='mim_morbid',
),
migrations.RemoveField(
model_name='textphenotypematch',
name='hpo',
),
migrations.RemoveField(
model_name='textphenotypematch',
name='omim_alias',
),
migrations.DeleteModel(
name='HPOEdge',
),
migrations.DeleteModel(
name='HPOSynonym',
),
migrations.DeleteModel(
name='HumanPhenotypeOntology',
),
migrations.DeleteModel(
name='MIMGene',
),
migrations.DeleteModel(
name='MIMMorbid',
),
migrations.DeleteModel(
name='MIMMorbidAlias',
),
migrations.DeleteModel(
name='PhenotypeMIM',
),
]
|
from distutils.core import setup
def readme():
with open('README.md') as file:
README = file.read()
return README
setup(
name = 'TOPSIS-Utkarsh-101803613',
packages = ['TOPSIS-Utkarsh-101803613'],
version = '0.1',
license='MIT',
description = 'Topsis Assignment',
author = 'Utkarsh Sinha',
author_email = '[email protected]',
url = 'https://github.com/utkarsh-1106',
download_url = 'https://github.com/utkarsh-1106', #will update soon.
keywords = ['TOPSIS', 'PYTHON', 'ASSIGNMENT'],
install_requires=[
'numpy',
'scrapeasy',
'pandas',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
) |
"""Add_token_hash_User
Revision ID: b9586e96ee77
Revises: 28fb5b1eaf5d
Create Date: 2018-12-20 16:56:32.260456
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b9586e96ee77'
down_revision = '28fb5b1eaf5d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.add_column(sa.Column('token_hash', sa.String(length=128), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_column('token_hash')
# ### end Alembic commands ###
|
#!/usr/bin/env python3
import re
import warnings
warnings.simplefilter(action='ignore', category=UserWarning)
import numpy as np
import pandas as pd
import pyranges as pr
from src import logger
from src.query import bam
__all__ = ["Interval", "Regions"]
tag_parser = re.compile(r"(?P<chrom>chr.{1,2}):(?P<start>\d*)-(?P<end>\d*)_?(?P<strand>[+-]?)")
#--------------------------------------------------------------------------------------------------#
class Interval:
"""Class for performing basic queries and manipulations on a single `Interval`."""
def __init__(self, chrom, start, end, strand=None, name=None):
self.chrom = chrom if chrom.startswith("chr") else f"chr{chrom}"
self.start = int(start)
self.end = int(end)
self.strand = strand
self.name = self.tag if name is None else name
self._validate()
self.is_stranded = self.strand == "+" or self.strand == "-"
# position at genomic features
self.mid = (self.start + self.end) // 2
if self.is_stranded:
self.tss = self.start if self.strand == "+" else self.end
self.tes = self.start if self.strand == "-" else self.end
def _validate(self):
"""Check validity of constructor arguments."""
assert self.end > self.start
assert self.strand in ["+", "-", ".", None]
# TODO: check bounds are within chromosome sizes
@classmethod
def load_tag(cls, tag):
parsed_tag = tag_parser.match(tag).groupdict()
parsed_tag["start"], parsed_tag["end"] = int(parsed_tag["start"]), int(parsed_tag["end"])
if parsed_tag["strand"] == "": parsed_tag["strand"] = None
return cls(**parsed_tag)
@classmethod
def load_ensg(cls, gene):
from src.load import aals
assert gene in aals.gene_coords.index
chrom, start, end, strand = aals.gene_coords.loc[gene]
return cls(chrom, start, end, strand, name=gene)
@classmethod
def load(cls, *args):
"""Lazy loading."""
if len(args) == 1 and isinstance(args[0], Interval):
return args[0]
elif len(args) == 1 and isinstance(args[0], str):
if args[0].startswith("chr"):
return cls.load_tag(args[0])
elif args[0].startswith("ENSG"):
return cls.load_ensg(args[0])
else:
raise ValueError("Could not load Interval.")
elif len(args) == 1 and isinstance(args[0], pd.Series):
return cls(**args[0], name=args[0].name)
else:
return cls(*args[0])
#----------------------------------------------------------------------------------------------------#
# Access genomic features as `Interval` objects
#----------------------------------------------------------------------------------------------------#
@property
def as_start(self):
return Interval(self.chrom, self.start, self.start+1, self.strand)
@property
def as_end(self):
return Interval(self.chrom, self.end-1, self.end, self.strand)
@property
def as_mid(self):
return Interval(self.chrom, self.mid, self.mid+1, self.strand)
@property
def as_tss(self):
return Interval(self.chrom, self.tss, self.tss+1, self.strand)
@property
def as_tes(self):
return Interval(self.chrom, self.tes-1, self.tes, self.strand)
def get_pos(self, gf):
"""Access position by string or returns default genomic feature."""
if gf == "ref": gf = "tss" if self.is_stranded else "mid"
return getattr(self, f"{gf}")
#----------------------------------------------------------------------------------------------------#
# Operations to generate new `Interval` instances relative to `self`
#----------------------------------------------------------------------------------------------------#
def widen(self, w):
return Interval(self.chrom, self.start-w, self.end+w, self.strand)
def slide(self, s, wrt_strand=None):
if self.is_stranded and s != 0 and wrt_strand is None:
raise ValueError("`wrt_strand` must be explicit if `Interval` is stranded.")
if wrt_strand and self.strand == "-": s = -s
return Interval(self.chrom, self.start+s, self.end+s, self.strand)
def transform(self, w=0, s=0, wrt_strand=None):
"""Expand the region by `window`, shift the region downstream (3' direction) by `shift`. """
return self.widen(w=w).slide(s=s, wrt_strand=wrt_strand)
#----------------------------------------------------------------------------------------------------#
# Queries
#----------------------------------------------------------------------------------------------------#
def get_genotypes(self):
from src.analysis import vcf
return vcf.query_interval(self.chrom, self.start, self.end)
def get_rna_coverages(self):
coverages = self.as_Regions().get_rna_coverages()
return coverages.iloc[0]
def get_atac_coverages(self):
coverages = self.as_Regions().get_atac_coverages()
return coverages.iloc[0]
def get_pileups(self):
# TODO
# get_pileups_in_interval
pass
#----------------------------------------------------------------------------------------------------#
# Output formats
#----------------------------------------------------------------------------------------------------#
@property
def tag(self):
return coords_to_tag(self.chrom, self.start, self.end)
def unstrand(self):
if self.is_stranded:
return Interval(self.chrom, self.start, self.end, name=self.name)
else:
return Interval(self.chrom, self.start, self.end)
def as_tuple3(self):
return self.chrom, self.start, self.end
def as_tuple(self):
return self.chrom, self.start, self.end, self.strand
def as_dict(self):
return {"chrom": self.chrom, "start": self.start, "end": self.end, "strand": self.strand}
def as_Regions(self):
interval_s = pd.Series(self.dict, name=self.tag)
return Regions(interval_s.to_frame().T)
def __repr__(self):
if self.is_stranded:
return f"{self.tag}_{self.strand}"
else:
return self.tag
def length(self):
return self.end - self.start
# class Peak(Interval):
# def __init__(self, peak_id):
# parsed_tag = tag_parser.match(peak_id).groupdict()
# chrom, start, end = parsed_tag["chrom"], int(parsed_tag["start"]), int(parsed_tag["end"])
# super().__init__(chrom, start, end)
# class Gene(Interval):
# def __init__(self, gene_id):
# coords = aals.gene_coords.loc[gene_id]
# super().__init__(**coords)
#----------------------------------------------------------------------------------------------------#
# Regions subclass
#----------------------------------------------------------------------------------------------------#
class Regions(pd.DataFrame):
_df,_pr = None,None
@property
def _constructor(self):
# return Regions
if "chrom" in self.columns and "start" in self.columns and "end" in self.columns:
return Regions
else:
logger.update("Not formatted as `Regions`. Falling back to dataframe.")
return pd.DataFrame
@property
def is_stranded(self):
return "strand" in self.columns
@property
def is_sorted(self):
shifted = self.shift(fill_value=0)
return ((self["start"] > shifted["start"]) | (self["chrom"] != shifted["chrom"])).all()
#----------------------------------------------------------------------------------------------------#
# Queries
#----------------------------------------------------------------------------------------------------#
def get_rna_coverages(self, max_size=10):
return bam.get_coverages_in_regions(aals.rna_bams, self)
def get_atac_coverages(self, max_size=10):
return bam.get_coverages_in_regions(aals.atac_bams, self)
#----------------------------------------------------------------------------------------------------#
# Intersect with other regions
#----------------------------------------------------------------------------------------------------#
def in_interval(self, chrom, start, end):
return self[(self["chrom"] == "chrom") & (self["end"] > start) & (self["start"] < end)]
def overlap_with(self, other):
"""Reports features that overlap with other."""
other = _format_input_as_pyranges(other)
overlap_idx = self.pr.overlap(other).__getattr__(self.index.name)
return self.reindex(overlap_idx)
def overlapping_idx(self, other, col_names=None, **kwargs):
"""Reports indices of overlapping intervals in self and other."""
return _get_overlapping_regions(self, other, col_names=col_names)
def adjacent_idx(self, hops):
"""Reports pairs indices of adjacent intervals. Distance is set by `hops`."""
assert isinstance(hops, int) and hops != 0
pos_hops = abs(hops)
chrom_vals = self["chrom"].values
chroms_1, chroms_2 = chrom_vals[:-pos_hops], chrom_vals[pos_hops:]
same_chrom = chroms_1 == chroms_2
names = self.index.name, f"{hops}_hop"
if hops > 0:
return pd.DataFrame((row[:2] for row in zip(self.index[:-pos_hops], self.index[pos_hops:], same_chrom) if row[2]), columns=names)
else:
return pd.DataFrame((row[:2] for row in zip(self.index[pos_hops:], self.index[:-pos_hops], same_chrom) if row[2]), columns=names)
def k_adjacent(self, interval, k=5, gf=None, report_distance=True):
"""Gets the k nearest intervals in either direction."""
interval = unpack_interval_arg(interval)
contig_features = self[self["chrom"] == interval.chrom]
nearest_feature = contig_features.k_nearest(interval, k=1, gf=gf, report_distance=False).index[0]
nearest_idx = np.where(contig_features.index == nearest_feature)[0][0]
lower_idx, upper_idx = max(nearest_idx-k, 0), min(nearest_idx+k, len(contig_features))
regions = contig_features.iloc[lower_idx:upper_idx]
if report_distance: regions = regions.assign(distance=self.distances_from_interval(interval, gf=gf))
return regions
def k_nearest(self, interval, k=10, gf=None, report_distance=True):
"""Gets k nearest features by absolute distance."""
interval = unpack_interval_arg(interval)
distances = self.distances_from_interval(interval, gf=gf)
regions = self.reindex(distances.abs().sort_values()[:k].index).sort()
if report_distance: regions = regions.assign(distance=self.distances_from_interval(interval, gf=gf))
return regions
def previous_feature(self, interval, n=1, gf=None, report_distance=True):
"""Gets k nearest features by absolute distance."""
interval = unpack_interval_arg(interval)
adjacent_intervals = self.k_adjacent(interval, k=1, gf=gf, report_distance=False)
return Interval.load(adjacent_intervals.iloc[0])
#----------------------------------------------------------------------------------------------------#
# Converters and I/O
#----------------------------------------------------------------------------------------------------#
@property
def annotate(self):
# if self.omic == "rna":
# return self.assign(symbol=self["gene_id"].map(data.ensg))
pass
@property
def pr(self):
if self._pr is None:
self._pr = df_to_pr(self.df)
return self._pr
def bed(self, strand_fill="."):
"""Convert `Regions` object to BED format."""
pass
@property
def df(self):
if self._df is None:
self._df = pd.DataFrame(self)
return self._df
def write_bed(self, path):
# TODO
pass
#----------------------------------------------------------------------------------------------------#
# Utility methods
#----------------------------------------------------------------------------------------------------#
@property
def tags(self):
return self["chrom"] + ":" + self["start"].astype(str) + "-" + self["end"].astype(str)
def set_index_to_tags(self, name="peak_id"):
new_regions = self.copy()
new_regions.index = self.tags
new_regions.index.name = name
return new_regions
def sort(self):
return sort_regions(self)
#----------------------------------------------------------------------------------------------------#
# Constructors
#----------------------------------------------------------------------------------------------------#
def unstrand(self):
return self.drop(columns=["strand"], errors="ignore")
def widen(self, w):
"""Expand region by w."""
new_regions = self.copy()
new_regions["start"] -= w
new_regions["end"] += w
return new_regions
def slide(self, s):
"""Slide region by s."""
new_regions = self.copy()
if self.is_stranded:
s = self["strand"].replace({"+":s, "-":-s})
new_regions["start"] += s
new_regions["end"] += s
return new_regions
def transform(self, w=0, s=0):
new_regions = self.copy()
if self.is_stranded:
s = self["strand"].replace({"+":s, "-":-s})
new_regions["start"] += s - w
new_regions["end"] += s + w
return new_regions
#----------------------------------------------------------------------------------------------------#
# Access positions
#----------------------------------------------------------------------------------------------------#
@property
def start(self):
new_regions = self.copy()
new_regions["end"] = new_regions["start"] + 1
return new_regions
@property
def end(self):
new_regions = self.copy()
new_regions["start"] = new_regions["end"] - 1
return new_regions
@property
def mid(self):
new_regions = self.copy()
new_regions["start"] = (new_regions["start"] + new_regions["end"]) // 2
new_regions["end"] = new_regions["start"]
return new_regions
@property
def tss(self):
new_regions = self.copy()
new_regions["start"] = np.where(new_regions["strand"] == "+", new_regions["start"], new_regions["end"]-1)
new_regions["end"] = new_regions["start"] + 1
return new_regions
@property
def tes(self):
new_regions = self.copy()
new_regions["start"] = np.where(new_regions["strand"] == "-", new_regions["start"], new_regions["end"]-1)
new_regions["end"] = new_regions["start"] + 1
return new_regions
@property
def start_pos(self):
return self["start"].copy()
@property
def end_pos(self):
return self["end"].copy()
@property
def mid_pos(self):
return ((self["start"] + self["end"]) // 2).rename("mid")
@property
def tss_pos(self):
tss_pos = np.where(self["strand"] == "+", self["start"], self["end"])
return pd.Series(data=tss_pos, index=self.index, name="tss")
@property
def tes_pos(self):
tes_pos = np.where(self["strand"] == "-", self["start"], self["end"])
return pd.Series(data=tes_pos, index=self.index, name="tes")
def get_pos(self, gf=None):
"""Access position by string or returns default genomic feature."""
if gf is None: gf = "tss" if self.is_stranded else "mid"
return getattr(self, f"{gf}_pos")
def distances_from_interval(self, interval, gf):
interval = unpack_interval_arg(interval)
target_positions = self[self["chrom"] == interval.chrom].get_pos(gf=gf)
distances = target_positions - interval.get_pos(gf=None)
return distances*-1 if interval.strand == "-" else distances
def unpack_interval_arg(arg, regions=None):
if isinstance(arg, str) and arg in regions.index:
return Interval.load(regions.loc[arg])
else:
return Interval.load(arg)
#----------------------------------------------------------------------------------------------------#
# BED operations
#----------------------------------------------------------------------------------------------------#
def _get_overlapping_regions(regions1, regions2, col_names=None, **kwargs):
regions1_id = _get_regions_id(regions1)
regions2_id = _get_regions_id(regions2)
regions1 = _format_input_as_pyranges(regions1)
regions2 = _format_input_as_pyranges(regions2)
joined = regions1.join(regions2, **kwargs).as_df()
if regions1_id == regions2_id: regions2_id = regions2_id + "_b"
pairs = joined[[regions1_id, regions2_id]]
if col_names: pairs.columns = col_names
return pairs
def _get_regions_id(obj):
"""Get id from regions."""
if isinstance(obj, pr.PyRanges):
additional_cols = obj.columns.drop(["Chromosome", "Start", "End", "Strand"], errors="ignore")
assert len(additional_cols) == 1, "cannot determine regions id"
return additional_cols[0]
else:
assert isinstance(obj, Regions), "input not formatted as pyranges or Regions object"
return obj.index.name
def _format_input_as_pyranges(obj):
"""Formats pyranges or Regions object as pyranges"""
if isinstance(obj, pr.PyRanges):
return obj
else:
assert isinstance(obj, Regions), "input not formatted as pyranges or Regions object"
return obj.pr
#----------------------------------------------------------------------------------------------------#
# Utility functions
#----------------------------------------------------------------------------------------------------#
def df_to_pr(df):
"""Convert dataframe of regions to pyranges"""
pos_columns = ["chrom", "start", "end", "strand"] if "strand" in df.columns else ["chrom", "start", "end"]
# reorder columns to place positions first
df = df.reset_index()
df = df[pos_columns + df.columns.drop(pos_columns, errors="ignore").tolist()]
df = df.rename(columns={"chrom": "Chromosome", "start": "Start", "end": "End", "strand": "Strand"})
return pr.PyRanges(df)
def coords_to_tag(chrom, start, end):
return f"{chrom}:{start}-{end}"
def sort_regions(regions_df):
"""Lexicographical sort on bed-style regions."""
from src.load import hg38
tmp_regions = regions_df.copy()
tmp_regions["chrom_tag"] = tmp_regions["chrom"].str[3:]
tmp_regions["not_standard_chrom"] = ~tmp_regions["chrom"].isin(hg38.chroms)
sorted_idx = tmp_regions.sort_values(["not_standard_chrom", "chrom_tag", "start", "end"]).index
return regions_df.reindex(sorted_idx)
def parse_coords_index(coords):
regions = coords.str.extract(tag_parser)
regions["start"] = pd.to_numeric(regions["start"], downcast="unsigned")
regions["end"] = pd.to_numeric(regions["end"], downcast="unsigned")
regions.index = coords
return regions
# #----------------------------------------------------------------------------------------------------#
# # Regions Accessor
# #----------------------------------------------------------------------------------------------------#
# @pd.api.extensions.register_dataframe_accessor("bed")
# class RegionsAccessor:
# def __init__(self, regions):
# self._validate(regions)
# self._regions = regions
# self._pos = None
# self._pr = None
# @staticmethod
# def _validate(regions):
# assert "chrom" in regions.columns
# assert "start" in regions.columns
# assert "end" in regions.columns
# if "strand" in regions:
# assert regions["strand"].isin(["+", "-"]).all()
# #----------------------------------------------------------------------------------------------------#
# # Regions properties
# #----------------------------------------------------------------------------------------------------#
# @property
# def is_stranded(self):
# return "strand" in self._regions.columns
# @property
# def unstrand(self):
# return self._regions.drop(columns=["strand"], errors="ignore")
# @property
# def pos_columns(self):
# return ["chrom", "start", "end", "strand"] if self.is_stranded else ["chrom", "start", "end"]
# @property
# def pos(self):
# """Returns a dataframe with additional position columns."""
# if self._pos is None:
# positions = self._regions.copy()
# positions["mid"] = (positions["start"] + positions["end"]) // 2
# if self.is_stranded:
# positions["tss"] = np.where(positions["strand"] == "+", positions["start"], positions["end"])
# positions["tes"] = np.where(positions["strand"] == "-", positions["start"], positions["end"])
# self._pos = positions
# return self._pos
# #----------------------------------------------------------------------------------------------------#
# # Operations to generate new regions
# #----------------------------------------------------------------------------------------------------#
# def recenter(self, feature):
# """Returns bed dataframe centered around a genomic feature."""
# new_regions = self._regions.copy()
# if feature == "start":
# new_regions["end"] = new_regions["start"] + 1
# elif feature == "end":
# new_regions["start"] = new_regions["end"] - 1
# elif feature == "mid":
# pos = self.pos["mid"]
# new_regions["start"], new_regions["end"] = pos, pos + 1
# elif feature == "tss":
# pos = self.pos["tss"] + new_regions["strand"].replace({"+":0, "-":-1})
# new_regions["start"], new_regions["end"] = pos, pos + 1
# elif feature == "tes":
# pos = self.pos["tes"] + new_regions["strand"].replace({"+":0, "-":1})
# new_regions["start"], new_regions["end"] = pos-1, pos
# else:
# raise ValueError
# return new_regions
# def expand(self, w):
# new_regions = self._regions.copy()
# new_regions["start"] -= w
# new_regions["end"] += w
# return new_regions
# def shift(self, s):
# new_regions = self._regions.copy()
# if self.is_stranded:
# s = self._regions["strand"].replace({"+":s, "-":-s})
# new_regions["start"] += s
# new_regions["end"] += s
# return new_regions
# def transform(self, w=0, s=0):
# new_regions = self._regions.copy()
# if self.is_stranded:
# s = self._regions["strand"].replace({"+":s, "-":-s})
# new_regions["start"] += s - w
# new_regions["end"] += s + w
# return new_regions
# #----------------------------------------------------------------------------------------------------#
# # Make queries from other data
# #----------------------------------------------------------------------------------------------------#
# def get_coverages(self, omic, max_size=10):
# assert self.regions.shape[1] <= max_size
# if omic == "atac":
# return get_coverages_in_regions(aals.atac_bams, self.regions)
# elif omic == "rna":
# return get_coverages_in_regions(aals.atac_bams, self.regions)
# else:
# raise ValueError
# #----------------------------------------------------------------------------------------------------#
# # Utility methods
# #----------------------------------------------------------------------------------------------------#
# @property
# def tags(self):
# return self._regions["chrom"] + ":" + self._regions["start"].astype(str) + "-" + self._regions["end"].astype(str)
# def set_index_to_tags(self, name="peak_id"):
# new_regions = self._regions.copy()
# new_regions.index = self.tags
# new_regions.index.name = name
# return new_regions
# def sort(self):
# pass
# # return sort_regions(self)
# def as_pr(self):
# if self._pr is None:
# self._pr = df_to_pr(self._regions)
# return self._pr
# def as_bed(self, strand_fill="."):
# """Convert `Regions` object to BED format."""
# pass
# # regions_bed = pd.DataFrame(self.reset_index())
# # index_name = regions_bed.columns[0]
# # if "score" not in regions_bed.columns:
# # regions_bed["score"] = "."
# # if "strand" not in regions_bed.columns:
# # regions_bed["strand"] = strand_fill
# # return regions_bed[["chrom", "start", "end", index_name, "score", "strand"]]
# def to_bed(self, path):
# # TODO
# pass
|
class OptimizerLog(object):
def __init__(self):
pass
def error(self, errorstring):
pass
def notify(self,what):
pass
|
import os
import random
import ray
from alpha_zero_general import Coach
from alpha_zero_general import DotDict
from alpha_zero_general.coach import ModelTrainer
from alpha_zero_general.coach import ReplayBuffer
from alpha_zero_general.coach import SelfPlay
from alpha_zero_general.coach import SharedStorage
from example.othello.game import OthelloGame
from example.othello.keras import OthelloNNet
args = DotDict(
{
"numIters": 2,
"numEps": 2, # Number of complete self-play games to simulate during a new iteration.
"tempThreshold": 15, #
"updateThreshold": 0.6, # During arena playoff, new neural net will be accepted if threshold or more of games are won.
"maxlenOfQueue": 10, # Number of game examples to train the neural networks.
"numMCTSSims": 2, # Number of games moves for MCTS to simulate.
"arenaCompare": 2, # Number of games to play during arena play to determine if new net will be accepted.
"cpuct": 1,
"checkpoint": "/tmp/alpha_zero_general/",
"load_model": False,
"load_folder_file": ("/tmp/alpha_zero_general/", "best.pth.tar"),
"numItersForTrainExamplesHistory": 20,
"nr_actors": 2, # Number of self play episodes executed in parallel
}
)
def test_shared_storage(local_ray):
init_weights = [0, 0]
init_revision = 1
s = SharedStorage.remote(init_weights, revision=init_revision)
assert ray.get(s.get_revision.remote()) == init_revision
assert ray.get(s.get_weights.remote()) == (init_weights, init_revision)
next_weights = [1, 1]
next_revision = ray.get(s.set_weights.remote(next_weights, 0.5, 0.2))
assert next_revision == init_revision + 1
assert ray.get(s.get_weights.remote()) == (next_weights, next_revision)
assert ray.get(s.get_infos.remote()) == {
"trained_enough": False,
"policy_loss": 0.5,
"value_loss": 0.2,
}
assert ray.get(s.get_weights.remote(revision=next_revision + 1)) == (
None,
next_revision,
)
ray.get(s.set_info.remote("trained_enough", True))
assert ray.get(s.trained_enough.remote()) is True
def test_replay_buffer(local_ray, tmpdir):
def mock_game_examples(game=1, size=10):
return [game] * size
r = ReplayBuffer.remote(games_to_use=5, folder=tmpdir)
assert ray.get(r.get_number_of_games_played.remote()) == 0
game_1 = mock_game_examples(game=1)
r.add_game_examples.remote(game_1)
assert ray.get(r.get_number_of_games_played.remote()) == 1
assert os.path.isfile(os.path.join(tmpdir, f"game_{1:08d}"))
assert ray.get(ray.get(r.get_examples.remote())) == [game_1]
for game in range(2, 7):
r.add_game_examples.remote(mock_game_examples(game=game))
assert ray.get(r.get_number_of_games_played.remote()) == 6
games = ray.get(ray.get(r.get_examples.remote()))
assert len(games) == 5
assert games[0][0] == 2
assert games[-1][0] == 6
assert os.path.isfile(os.path.join(tmpdir, f"game_{6:08d}"))
r = ReplayBuffer.remote(games_to_use=5, folder=tmpdir)
assert ray.get(r.load.remote()) == 6
games = ray.get(ray.get(r.get_examples.remote()))
assert len(games) == 5
assert games[0][0] == 2
assert games[-1][0] == 6
r = ReplayBuffer.remote(games_to_play=5, games_to_use=5, folder=tmpdir)
assert ray.get(r.load.remote()) == 6
assert ray.get(r.played_enough.remote()) is True
r = ReplayBuffer.remote(games_to_play=10, games_to_use=5, folder=tmpdir)
assert ray.get(r.load.remote()) == 6
assert ray.get(r.played_enough.remote()) is False
def test_self_play(local_ray, tmpdir):
game = OthelloGame(6)
nnet = OthelloNNet(game)
s = SharedStorage.remote(nnet.get_weights())
r = ReplayBuffer.remote(games_to_play=1, games_to_use=1, folder=tmpdir)
assert ray.get(r.get_number_of_games_played.remote()) == 0
self_play = SelfPlay.remote(r, s, game, nnet.__class__, dict(args))
ray.get(self_play.start.remote())
assert ray.get(r.get_number_of_games_played.remote()) == 1
assert ray.get(r.played_enough.remote()) is True
games = ray.get(ray.get(r.get_examples.remote()))
assert len(games) == 1
examples = games[0]
assert len(examples) > 2
board, policy, winner = examples[0]
assert isinstance(board, type(game.get_init_board()))
assert len(policy) == game.get_action_size()
assert all(0 <= value <= 1 for value in policy)
assert winner in [1, -1]
def mock_example_data(game):
board = game.get_init_board()
pi = [random.random() for _ in range(game.get_action_size())]
player = random.choice([1, -1])
return [(b, p, player) for b, p in game.get_symmetries(board, pi)]
@ray.remote
class MockedReplayBuffer(ReplayBuffer.__ray_actor_class__): # type: ignore
"""A replay buffer that behaves so that we'll go through all branches
of ModelTrainer.start()."""
played_enough_return_values = [False, False, False, True]
def played_enough(self):
"""Returns preset values useful in this test."""
return self.played_enough_return_values.pop(0)
games_played_return_values = [0, 2, 4, 8]
def get_number_of_games_played(self):
"""Returns preset values useful in this test."""
return self.games_played_return_values.pop(0)
def test_model_trainer_loop(local_ray, tmpdir):
game = OthelloGame(6)
nnet = OthelloNNet(game)
s = SharedStorage.remote(nnet.get_weights())
assert ray.get(s.get_revision.remote()) == 0
r = MockedReplayBuffer.remote(
games_to_play=4, games_to_use=4, folder=tmpdir
)
r.add_game_examples.remote(mock_example_data(game))
model_trainer = ModelTrainer.options(num_gpus=0).remote(
r, s, game, nnet.__class__, dict(args), selfplay_training_ratio=1
)
ray.get(model_trainer.start.remote())
assert ray.get(s.get_revision.remote()) > 0
assert ray.get(s.trained_enough.remote()) is True
def test_model_trainer_pit_accept_model(capsys, local_ray, tmpdir):
game = OthelloGame(6)
nnet = OthelloNNet(game)
s = SharedStorage.remote(nnet.get_weights())
assert ray.get(s.get_revision.remote()) == 0
r = ReplayBuffer.remote(games_to_play=2, games_to_use=2, folder=tmpdir)
r.add_game_examples.remote(mock_example_data(game))
# provoke model acceptance by tweaking updateThreshold to pass
custom_args = dict(args, updateThreshold=-0.1)
model_trainer = ModelTrainer.options(num_gpus=0).remote(
r, s, game, nnet.__class__, custom_args, pit_against_old_model=True
)
ray.get(model_trainer.train.remote())
assert ray.get(s.get_revision.remote()) == 1
out, _err = capsys.readouterr()
assert "PITTING AGAINST PREVIOUS VERSION" in out
assert "ACCEPTING NEW MODEL" in out
def test_model_trainer_pit_reject_model(capsys, local_ray, tmpdir):
game = OthelloGame(6)
nnet = OthelloNNet(game)
s = SharedStorage.remote(nnet.get_weights())
assert ray.get(s.get_revision.remote()) == 0
r = ReplayBuffer.remote(games_to_play=2, games_to_use=2, folder=tmpdir)
r.add_game_examples.remote(mock_example_data(game))
# provoke model rejection by tweaking updateThreshold to fail
custom_args = dict(args, updateThreshold=1.1)
model_trainer = ModelTrainer.options(num_gpus=0).remote(
r, s, game, nnet.__class__, custom_args, pit_against_old_model=True
)
ray.get(model_trainer.train.remote())
assert ray.get(s.get_revision.remote()) == 0
out, _err = capsys.readouterr()
assert "PITTING AGAINST PREVIOUS VERSION" in out
assert "REJECTING NEW MODEL" in out
def test_coach(capsys, tmpdir):
args.checkpoint = tmpdir
game = OthelloGame(6)
nnet = OthelloNNet(game)
coach = Coach(game, nnet, args)
coach.learn()
|
from django.conf import settings
from django.conf.urls import include, url
from django.core import urlresolvers
from django.utils.html import format_html, format_html_join
from django.utils.translation import ugettext_lazy as _
from wagtail.wagtailcore import hooks
from wagtail.wagtailadmin.menu import MenuItem
from wagtail.wagtailimages import urls
def register_admin_urls():
return [
url(r'^images/', include(urls)),
]
hooks.register('register_admin_urls', register_admin_urls)
def construct_main_menu(request, menu_items):
if request.user.has_perm('wagtailimages.add_image'):
menu_items.append(
MenuItem(_('Images'), urlresolvers.reverse('wagtailimages_index'), classnames='icon icon-image', order=300)
)
hooks.register('construct_main_menu', construct_main_menu)
def editor_js():
js_files = [
'wagtailimages/js/hallo-plugins/hallo-wagtailimage.js',
'wagtailimages/js/image-chooser.js',
]
js_includes = format_html_join('\n', '<script src="{0}{1}"></script>',
((settings.STATIC_URL, filename) for filename in js_files)
)
return js_includes + format_html(
"""
<script>
window.chooserUrls.imageChooser = '{0}';
registerHalloPlugin('hallowagtailimage');
</script>
""",
urlresolvers.reverse('wagtailimages_chooser')
)
hooks.register('insert_editor_js', editor_js)
|
#!/usr/bin/env python
from utils.munin.base import MuninGraph
class NBMuninGraph(MuninGraph):
@property
def graph_config(self):
return {
'graph_category' : 'NewsBlur',
'graph_title' : 'NewsBlur Feeds & Subscriptions',
'graph_vlabel' : 'Feeds & Subscribers',
'graph_args' : '-l 0',
'feeds.label': 'feeds',
'subscriptions.label': 'subscriptions',
'profiles.label': 'profiles',
'social_subscriptions.label': 'social_subscriptions',
}
def calculate_metrics(self):
from apps.rss_feeds.models import Feed
from apps.reader.models import UserSubscription
from apps.social.models import MSocialProfile, MSocialSubscription
return {
'feeds': Feed.objects.latest('pk').pk,
'subscriptions': UserSubscription.objects.latest('pk').pk,
'profiles': MSocialProfile.objects.count(),
'social_subscriptions': MSocialSubscription.objects.count(),
}
if __name__ == '__main__':
NBMuninGraph().run()
|
import datetime
import pytest
from jason import props
def test_validates():
assert (
props.Date().load(datetime.date.fromisoformat("1970-01-01")).isoformat()
== "1970-01-01"
)
def test_true_from_string():
assert props.Date().load("1970-01-01").isoformat() == "1970-01-01"
def test_from_invalid_string():
with pytest.raises(props.PropertyValidationError):
props.Date().load("nope")
def test_allow_strings_is_false():
with pytest.raises(props.PropertyValidationError):
props.Date(allow_strings=False).load("1970-01-01")
def test_nullable():
props.Date(nullable=True).load(None)
def test_not_nullable():
with pytest.raises(props.PropertyValidationError):
props.Date().load(None)
def test_wrong_type():
with pytest.raises(props.PropertyValidationError):
props.Date().load(12345)
def test_default():
assert props.Date(default="1970-01-01").load(None).isoformat()
|
from .base_request import BaseRequest
from .token import Token
from .settings import Settings
from . import exceptions
class Auth(object):
"""
This class implements all authentication functions for Resin Python SDK.
"""
def __init__(self):
self.base_request = BaseRequest()
self.settings = Settings()
self.token = Token()
def login(self, **credentials):
"""
This function is used for logging into Resin.io using email and password.
Args:
**credentials: credentials keyword arguments.
username (str): Resin.io email.
password (str): Password.
Returns:
This functions saves Auth Token to Settings and returns nothing.
Raises:
LoginFailed: if the email or password is invalid.
Examples:
>>> from resin import Resin
>>> resin = Resin()
>>> credentials = {'username': '<your email>', 'password': '<your password>''}
>>> resin.auth.login(**credentials)
(Empty Return)
"""
token = self.authenticate(**credentials)
if self.token.is_valid_token(token):
self.token.set(token)
else:
raise exceptions.LoginFailed()
def login_with_token(self, token):
"""
This function is used for logging into Resin.io using Auth Token.
Auth Token can be found in Preferences section on Resin.io Dashboard.
Args:
token (str): Auth Token.
Returns:
This functions saves Auth Token to Settings and returns nothing.
Raises:
MalformedToken: if token is invalid.
Examples:
>>> from resin import Resin
>>> resin = Resin()
>>> auth_token = <your token>
>>> resin.auth.login_with_token(auth_token)
(Empty Return)
"""
if self.token.is_valid_token(token):
self.token.set(token)
else:
raise exceptions.MalformedToken(token)
def who_am_i(self):
"""
This function retrieves username of logged in user.
Returns:
str: username.
Raises:
NotLoggedIn: if there is no user logged in.
Examples:
>>> resin.auth.who_am_i()
u'g_trong_nghia_nguyen'
"""
return self.token.get_username()
def authenticate(self, **credentials):
"""
This function authenticates provided credentials information.
You should use Auth.login when possible, as it takes care of saving the Auth Token and username as well.
Args:
**credentials: credentials keyword arguments.
username (str): Resin.io username.
password (str): Password.
Returns:
str: Auth Token,
Raises:
LoginFailed: if the username or password is invalid.
Examples:
>>> resin.auth.authenticate(username='<your email>', password='<your password>')
'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpZCI6NTM5NywidXNlcm5hbWUiOiJnX3Ryb25nX25naGlhX25ndXllbiIsImVtYWlsIjoicmVzaW5weXRob25zZGt0ZXN0QGdtYWlsLmNvbSIsInNvY2lhbF9zZXJ2aWNlX2FjY291bnQiOlt7ImNyZWF0ZWRfYXQiOiIyMDE1LTExLTIzVDAzOjMwOjE0LjU3MloiLCJpZCI6MTE2NiwidXNlciI6eyJfX2RlZmVycmVkIjp7InVyaSI6Ii9ld2EvdXNlcig1Mzk3KSJ9LCJfX2lkIjo1Mzk3fSwicHJvdmlkZXIiOiJnb29nbGUiLCJyZW1vdGVfaWQiOiIxMDE4OTMzNzc5ODQ3NDg1NDMwMDIiLCJkaXNwbGF5X25hbWUiOiJUcm9uZyBOZ2hpYSBOZ3V5ZW4iLCJfX21ldGFkYXRhIjp7InVyaSI6Ii9ld2Evc29jaWFsX3NlcnZpY2VfYWNjb3VudCgxMTY2KSIsInR5cGUiOiIifX1dLCJoYXNfZGlzYWJsZWRfbmV3c2xldHRlciI6ZmFsc2UsImp3dF9zZWNyZXQiOiI0UDVTQzZGV1pIVU5JR0NDT1dJQUtST0tST0RMUTRNVSIsImhhc1Bhc3N3b3JkU2V0Ijp0cnVlLCJuZWVkc1Bhc3N3b3JkUmVzZXQiOmZhbHNlLCJwdWJsaWNfa2V5Ijp0cnVlLCJmZWF0dXJlcyI6W10sImludGVyY29tVXNlck5hbWUiOiJnX3Ryb25nX25naGlhX25ndXllbiIsImludGVyY29tVXNlckhhc2giOiI5YTM0NmUwZTgzNjk0MzYxODU3MTdjNWRhZTZkZWZhZDdiYmM4YzZkOGNlMzgxYjhhYTY5YWRjMTRhYWZiNGU0IiwicGVybWlzc2lvbnMiOltdLCJpYXQiOjE0NDgyNTYzMDYsImV4cCI6MTQ0ODg2MTEwNn0.U9lfEpPHBRvGQSayASE-glI-lQtAjyIFYd00uXOUzLI'
"""
return self.base_request.request(
'login_', 'POST', data=credentials,
endpoint=self.settings.get('api_endpoint'), auth=False
)
def is_logged_in(self):
"""
This function checks if you're logged in
Returns:
bool: True if logged in, False otherwise.
Examples:
# Check if user logged in.
>>> if resin.auth.is_logged_in():
... print('You are logged in!')
... else:
... print('You are not logged in!')
"""
try:
self.base_request.request(
'/whoami', 'GET', endpoint=self.settings.get('api_endpoint')
)
return True
except exceptions.RequestError:
return False
def get_token(self):
"""
This function retrieves Auth Token.
Returns:
str: Auth Token.
Raises:
InvalidOption: if not logged in and there is no token in Settings.
Examples:
# If you are logged in.
>>> resin.auth.get_token()
'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpZCI6NTM5NywidXNlcm5hbWUiOiJnX3Ryb25nX25naGlhX25ndXllbiIsImVtYWlsIjoicmVzaW5weXRob25zZGt0ZXN0QGdtYWlsLmNvbSIsInNvY2lhbF9zZXJ2aWNlX2FjY291bnQiOlt7ImNyZWF0ZWRfYXQiOiIyMDE1LTExLTIzVDAzOjMwOjE0LjU3MloiLCJpZCI6MTE2NiwidXNlciI6eyJfX2RlZmVycmVkIjp7InVyaSI6Ii9ld2EvdXNlcig1Mzk3KSJ9LCJfX2lkIjo1Mzk3fSwicHJvdmlkZXIiOiJnb29nbGUiLCJyZW1vdGVfaWQiOiIxMDE4OTMzNzc5ODQ3NDg1NDMwMDIiLCJkaXNwbGF5X25hbWUiOiJUcm9uZyBOZ2hpYSBOZ3V5ZW4iLCJfX21ldGFkYXRhIjp7InVyaSI6Ii9ld2Evc29jaWFsX3NlcnZpY2VfYWNjb3VudCgxMTY2KSIsInR5cGUiOiIifX1dLCJoYXNfZGlzYWJsZWRfbmV3c2xldHRlciI6ZmFsc2UsImp3dF9zZWNyZXQiOiI0UDVTQzZGV1pIVU5JR0NDT1dJQUtST0tST0RMUTRNVSIsImhhc1Bhc3N3b3JkU2V0Ijp0cnVlLCJuZWVkc1Bhc3N3b3JkUmVzZXQiOmZhbHNlLCJwdWJsaWNfa2V5Ijp0cnVlLCJmZWF0dXJlcyI6W10sImludGVyY29tVXNlck5hbWUiOiJnX3Ryb25nX25naGlhX25ndXllbiIsImludGVyY29tVXNlckhhc2giOiI5YTM0NmUwZTgzNjk0MzYxODU3MTdjNWRhZTZkZWZhZDdiYmM4YzZkOGNlMzgxYjhhYTY5YWRjMTRhYWZiNGU0IiwicGVybWlzc2lvbnMiOltdLCJpYXQiOjE0NDgyNTY2ODMsImV4cCI6MTQ0ODg2MTQ4M30.oqq4DUI4cTbhzYznSwODZ_4zLOeGiJYuZRn82gTfQ6o'
"""
return self.token.get()
def get_user_id(self):
"""
This function retrieves current logged in user's id.
Returns:
str: user id.
Raises:
InvalidOption: if not logged in.
Examples:
# If you are logged in.
>>> resin.auth.get_user_id()
5397
"""
return self.token.get_user_id()
def get_email(self):
"""
This function retrieves current logged in user's get_email
Returns:
str: user email.
Raises:
InvalidOption: if not logged in.
Examples:
# If you are logged in.
>>> resin.auth.get_email()
u'[email protected]'
"""
return self.token.get_email()
def log_out(self):
"""
This function is used for logging out from Resin.io.
Returns:
bool: True if successful, False otherwise.
Examples:
# If you are logged in.
>>> resin.auth.log_out()
True
"""
return self.token.remove()
def register(self, **credentials):
"""
This function is used for registering to Resin.io.
Args:
**credentials: credentials keyword arguments.
email (str): email to register.
password (str): Password.
Returns:
str: Auth Token for new account.
Raises:
RequestError: if error occurs during registration.
Examples:
>>> credentials = {'email': '<your email>', 'password': '<your password>'}
>>> resin.auth.register(**credentials)
'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpZCI6NTM5OCwidXNlcm5hbWUiOiJ0ZXN0MjcxMCIsImVtYWlsIjoidGVzdDI3MTBAZ21haWwuY29tIiwic29jaWFsX3NlcnZpY2VfYWNjb3VudCI6bnVsbCwiaGFzX2Rpc2FibGVkX25ld3NsZXR0ZXIiOmZhbHNlLCJqd3Rfc2VjcmV0IjoiQlJXR0ZIVUgzNVBKT0VKTVRSSVo2MjdINjVKVkJKWDYiLCJoYXNQYXNzd29yZFNldCI6dHJ1ZSwibmVlZHNQYXNzd29yZFJlc2V0IjpmYWxzZSwicHVibGljX2tleSI6ZmFsc2UsImZlYXR1cmVzIjpbXSwiaW50ZXJjb21Vc2VyTmFtZSI6InRlc3QyNzEwIiwiaW50ZXJjb21Vc2VySGFzaCI6IjNiYTRhZDRkZjk4MDQ1OTc1YmU2ZGUwYWJmNjFiYjRmYWY4ZmEzYTljZWI0YzE4Y2QxOGU1NmViNmI1NzkxZDAiLCJwZXJtaXNzaW9ucyI6W10sImlhdCI6MTQ0ODI1NzgyOCwiZXhwIjoxNDQ4ODYyNjI4fQ.chhf6deZ9BNDMmPr1Hm-SlRoWkK7t_4cktAPo12aCoE'
"""
return self.base_request.request(
'user/register', 'POST', data=credentials,
endpoint=self.settings.get('api_endpoint'), auth=False
)
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class SwitchIntegers2(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.code = self._io.read_u1()
_on = self.code
if _on == 1:
self.len = self._io.read_u1()
elif _on == 2:
self.len = self._io.read_u2le()
elif _on == 4:
self.len = self._io.read_u4le()
elif _on == 8:
self.len = self._io.read_u8le()
self.ham = self._io.read_bytes(self.len)
if self.len > 3:
self.padding = self._io.read_u1()
@property
def len_mod_str(self):
if hasattr(self, '_m_len_mod_str'):
return self._m_len_mod_str if hasattr(self, '_m_len_mod_str') else None
self._m_len_mod_str = str(((self.len * 2) - 1))
return self._m_len_mod_str if hasattr(self, '_m_len_mod_str') else None
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(name='ibmBluegroup',
version='1.05',
description="IBM Bluegroup API",
packages=find_packages(),
keywords='Bluegroup',
author='ThomasIBM',
author_email='[email protected]',
license="Apache License, Version 2.0",
url='https://github.com/ThomasIBM/ibmBluegroup',
include_package_data=True,
zip_safe=False,
install_requires=[
'httplib2',
],
) |
import sys
from base64 import b64encode, b64decode
class base_conv(object):
def __init__(self):
self.chars = 'abcdefghijklmnopqrstuvwxyz'
self.base = len(self.chars)
self.splitter = "!"
self.debug = False
@staticmethod
def base_alpha_encode(chars, base, binary):
encoded = ''
while int(binary) > 0:
binary, remainder = divmod(binary, base)
encoded = chars[remainder] + encoded
return encoded
@staticmethod
def base_alpha_decode(chars, base, charset):
i = 1
res = 0
for char in charset:
res += chars.index(char) * i
i *= base
return chr(res)
def to_base(self, string):
res = ''
for char in string:
res += self.base_alpha_encode(self.chars, self.base, ord(char)) + self.splitter
return res
def from_base(self, enc):
res = ''
char_list = enc.split(self.splitter)
char_list.pop()
for word in char_list:
res += self.base_alpha_decode(self.chars, self.base, word[::-1])
return res
class Enigma(object):
def __init__(self):
self.rotor = []
self.chars = 'abcdefghijklmnopqrstuvwxyz'
self.rotor.append('ekmflgdqvzntowyhxuspaibrcj') # rotor I
self.rotor.append('ajdksiruxblhwtmcqgznpyfvoe') # rotor II
self.rotor.append('bdfhjlcprtxvznyeiwgakmusqo') # rotor III
self.reflector = 'yruhqsldpxngokmiebfzcwvjat' # reflector b
# For the sake of simplification we will use
# knock-ups on each 26th symbol reached; right-to-left
self.rotor_index = [0, 0, 0]
self.decrypt_index = [0, 0, 0]
@staticmethod
def rotate_index(rotor_index):
rotor_index[2] += 1
if rotor_index[2] == 26:
rotor_index[2] = 0
rotor_index[1] += 1
if rotor_index[1] == 26:
rotor_index[1] = 0
rotor_index[0] += 1
if rotor_index[0] == 26:
rotor_index[0] = 0
def crypt(self, char, splitter, rotor_index):
if char == splitter:
return splitter
self.rotate_index(rotor_index)
input, output = [], []
input.append((self.chars.index(char) + rotor_index[2]) % len(self.chars))
output.append(self.rotor[0][input[-1]])
input.append((self.chars.index(output[-1]) + rotor_index[1] - rotor_index[2]) % len(self.chars))
output.append(self.rotor[1][input[-1]])
input.append((self.chars.index(output[-1]) + rotor_index[0] - rotor_index[1]) % len(self.chars))
output.append(self.rotor[2][input[-1]])
input.append((self.chars.index(output[-1]) - rotor_index[0]) % len(self.chars))
output.append(self.reflector[input[-1]])
input.append((self.chars.index(output[-1]) + rotor_index[0]) % len(self.chars))
current_char = self.chars[input[-1]]
output.append(self.rotor[2].index(current_char))
input.append((output[-1] + rotor_index[1] - rotor_index[0]) % len(self.chars))
current_char = self.chars[input[-1]]
output.append(self.rotor[1].index(current_char))
input.append((output[-1] + rotor_index[2] - rotor_index[1]) % len(self.chars))
current_chat = self.chars[input[-1]]
output.append(self.rotor[0].index(current_chat))
input.append((output[-1] - rotor_index[2]) % len(self.chars))
current_char = self.chars[input[-1]]
output.append(self.chars.index(current_char))
return self.chars[output[-1]]
def crypt_string(self, string, splitter):
result = ""
for char in string:
result += self.crypt(char, splitter, self.rotor_index)
return result
def decrypt_string(self, string, splitter):
result = ""
for char in string:
result += self.crypt(char, splitter, self.decrypt_index)
return result
if __name__ == '__main__':
str = "Hello World! \U00012300"
print("Source : ", str)
basis = base_conv()
enigma = Enigma()
enc = basis.to_base(str)
print("Encoded string: ", enc, "\nLength: ", len(enc))
enigmed = enigma.crypt_string(enc, basis.splitter)
print("Crypted string: ", enigmed)
de_enigmed = enigma.decrypt_string(enigmed, basis.splitter)
print("Decrypted string: ", de_enigmed)
decode = basis.from_base(de_enigmed)
print("Decoded: ", decode)
'''str = "Hello World!\n"
print("Source : ", str)
enigma = Enigma()
enigmed = enigma.crypt_string(str, '!')
print("Crypted string: ", enigmed)
de_enigmed = enigma.decrypt_string(enigmed, '!')
print("Decrypted string: ", de_enigmed)''' |
# -*- coding: utf-8 -*-
from openprocurement.auctions.core.utils import (
opresource,
)
from openprocurement.auctions.insider.views.other.question import (
AuctionQuestionResource,
)
@opresource(name='dgfFinancialAssets:Auction Questions',
collection_path='/auctions/{auction_id}/questions',
path='/auctions/{auction_id}/questions/{question_id}',
auctionsprocurementMethodType="dgfFinancialAssets",
description="dgfFinancialAssets:Auction questions")
class FinancialAuctionQuestionResource(AuctionQuestionResource):
pass
|
import pafy
import tkinter as tk
root = tk.Tk()
root.title("youtube-py-dl")
canvas = tk.Canvas(root, width=400, height=200, relief='raised')
canvas.pack()
label = tk.Label(root, text='Enter YouTube URL')
label.config(font=('verdana', 12))
canvas.create_window(200, 100, window=label)
entry = tk.Entry(root)
canvas.create_window(200, 140, window=entry)
def download():
video = pafy.new(entry.get())
best = video.getbest()
best.download()
button = tk.Button(text='Download', command=download, bg='white', fg='black', font=('verdana', 12))
canvas.create_window(200, 180, window=button)
root.mainloop() |
import psycopg2
import json
#
from bridgebots import Deal
class HandDao:
def __init__(self):
self.connection = psycopg2.connect("dbname=bridge user=bridgebot host=localhost password=bidslam")
def close(self):
self.connection.close()
def get_processed_files(self):
with self.connection.cursor() as cursor:
cursor.execute("SELECT file_name FROM recap_files")
processed_files_tuples = cursor.fetchall()
return [x[0] for x in processed_files_tuples]
def record_porcessed_file(self, file_name):
with self.connection.cursor() as cursor:
cursor.execute("INSERT INTO recap_files VALUES (%s)", (file_name,))
self.connection.commit()
def write_hand(self, hand):
print("Writing hand")
with self.connection.cursor() as cursor:
north = hand.north_hand
east = hand.east_hand
south = hand.south_hand
west = hand.west_hand
try:
cursor.execute(
"INSERT INTO hands (dealer, ew_vulnerable, ns_vulnerable, "
"nspades, nhearts, ndiamonds, nclubs, "
"espades, ehearts, ediamonds, eclubs, "
"sspades, shearts, sdiamonds, sclubs, "
"wspades, whearts, wdiamonds, wclubs) "
"VALUES (%s, %s, %s, "
"%s, %s, %s, %s, "
"%s, %s, %s, %s, "
"%s, %s, %s, %s, "
"%s, %s, %s, %s);",
(
hand.dealer,
hand.ew_vulnerable,
hand.ns_vulnerable,
north.spades,
north.hearts,
north.diamonds,
north.clubs,
east.spades,
east.hearts,
east.diamonds,
east.clubs,
south.spades,
south.hearts,
south.diamonds,
south.clubs,
west.spades,
west.hearts,
west.diamonds,
west.clubs,
),
)
self.connection.commit()
except Exception as e:
print(e)
def write_double_dummy(self, deal: Deal, analysis: json):
pass
hand_dao = HandDao()
hand_dao.record_porcessed_file("test_file.txt")
print(hand_dao.get_processed_files())
|
import os
import sys
from glob import glob
sys.path.append(os.path.dirname(os.path.realpath(os.path.dirname(__file__))))
import torch
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter
import numpy as np
from arguments import get_args
from dataloader import get_dataloader
from relationnet import RelationNetwork, Embedding
from utils.train_utils import AverageMeter, save_checkpoint, plot_classes_preds
from utils.common import split_support_query_set
best_acc1 = 0
device = 'cuda' if torch.cuda.is_available() else 'cpu'
args = get_args()
writer = SummaryWriter(args.log_dir)
def main():
global args, best_acc1, device
# Init seed
np.random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
torch.cuda.manual_seed(args.manual_seed)
if args.dataset == 'miniImageNet':
train_loader, val_loader = get_dataloader(args, 'train', 'val')
in_channel = 3
feature_dim = 64 * 3 * 3
elif args.dataset == 'omniglot':
train_loader, val_loader = get_dataloader(args, 'trainval', 'test')
in_channel = 1
feature_dim = 64
else:
raise ValueError(f"Dataset {args.dataset} is not supported")
embedding = Embedding(in_channel).to(device)
model = RelationNetwork(feature_dim).to(device)
criterion = torch.nn.MSELoss()
embed_optimizer = torch.optim.Adam(embedding.parameters(), args.lr)
model_optimizer = torch.optim.Adam(model.parameters(), args.lr)
cudnn.benchmark = True
if args.resume:
try:
checkpoint = torch.load(sorted(glob(f'{args.log_dir}/checkpoint_*.pth'), key=len)[-1])
except Exception:
checkpoint = torch.load(args.log_dir + '/model_best.pth')
model.load_state_dict(checkpoint['model_state_dict'])
embedding.load_state_dict(checkpoint['embedding_state_dict'])
model_optimizer.load_state_dict(checkpoint['model_optimizer_state_dict'])
embed_optimizer.load_state_dict(checkpoint['embed_optimizer_state_dict'])
start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
print(f"load checkpoint {args.exp_name}")
else:
start_epoch = 1
embed_scheduler = torch.optim.lr_scheduler.MultiplicativeLR(optimizer=embed_optimizer,
lr_lambda=lambda epoch: 0.5)
model_scheduler = torch.optim.lr_scheduler.MultiplicativeLR(optimizer=model_optimizer,
lr_lambda=lambda epoch: 0.5)
for _ in range(start_epoch):
embed_scheduler.step()
model_scheduler.step()
print(f"model parameter : {sum(p.numel() for p in model.parameters() if p.requires_grad)}")
for epoch in range(start_epoch, args.epochs + 1):
train_loss = train(train_loader, model, embedding, model_optimizer, embed_optimizer, criterion, epoch)
is_test = False if epoch % args.test_iter else True
if is_test or epoch == args.epochs or epoch == 1:
val_loss, acc1 = validate(val_loader, model, embedding, criterion, epoch)
if acc1 >= best_acc1:
is_best = True
best_acc1 = acc1
else:
is_best = False
save_checkpoint({
'model_state_dict': model.state_dict(),
'embedding_state_dict': embedding.state_dict(),
'model_optimizer_state_dict': model_optimizer.state_dict(),
'embed_optimizer_state_dict': embed_optimizer.state_dict(),
'best_acc1': best_acc1,
'epoch': epoch,
}, is_best, args)
if is_best:
writer.add_scalar("BestAcc", acc1, epoch)
print(f"[{epoch}/{args.epochs}] {train_loss:.3f}, {val_loss:.3f}, {acc1:.3f}, # {best_acc1:.3f}")
else:
print(f"[{epoch}/{args.epochs}] {train_loss:.3f}")
embed_scheduler.step()
model_scheduler.step()
writer.close()
def train(train_loader, model, embedding, model_optimizer, embed_optimizer, criterion, epoch):
losses = AverageMeter()
num_class = args.classes_per_it_tr
num_support = args.num_support_tr
num_query = args.num_query_tr
total_epoch = len(train_loader) * (epoch - 1)
model.train()
embedding.train()
for i, data in enumerate(train_loader):
x, y = data[0].to(device), data[1].to(device)
x_support, x_query, y_support, y_query = split_support_query_set(x, y, num_class, num_support, num_query)
support_vector = embedding(x_support)
query_vector = embedding(x_query)
_size = support_vector.size()
support_vector = support_vector.view(num_class, num_support, _size[1], _size[2], _size[3]).sum(dim=1)
support_vector = support_vector.repeat(num_class * num_query, 1, 1, 1)
query_vector = torch.stack([x for x in query_vector for _ in range(num_class)])
_concat = torch.cat((support_vector, query_vector), dim=1)
y_pred = model(_concat).view(-1, num_class)
y_one_hot = torch.zeros(num_query * num_class, num_class).to(device).scatter_(1, y_query.unsqueeze(1), 1)
loss = criterion(y_pred, y_one_hot)
losses.update(loss.item(), y_pred.size(0))
model_optimizer.zero_grad()
embed_optimizer.zero_grad()
loss.backward()
model_optimizer.step()
embed_optimizer.step()
if i == 0:
y_hat = y_pred.argmax(1)
writer.add_figure('y_prediction vs. y/Train',
plot_classes_preds(y_hat, y_pred, [x_support, x_query],
[y_support, y_query], num_class, num_support, num_query),
global_step=total_epoch)
writer.add_scalar("Loss/Train", loss.item(), total_epoch + i)
return losses.avg
@torch.no_grad()
def validate(val_loader, model, embedding, criterion, epoch):
losses = AverageMeter()
accuracies = AverageMeter()
num_class = args.classes_per_it_val
num_support = args.num_support_val
num_query = args.num_query_val
total_epoch = len(val_loader) * (epoch - 1)
model.eval()
embedding.eval()
for i, data in enumerate(val_loader):
x, y = data[0].to(device), data[1].to(device)
x_support, x_query, y_support, y_query = split_support_query_set(x, y, num_class, num_support, num_query)
support_vector = embedding(x_support)
query_vector = embedding(x_query)
_size = support_vector.size()
support_vector = support_vector.view(num_class, num_support, _size[1], _size[2], _size[3]).sum(dim=1)
support_vector = support_vector.repeat(num_class * num_query, 1, 1, 1)
query_vector = torch.stack([x for x in query_vector for _ in range(num_class)])
_concat = torch.cat((support_vector, query_vector), dim=1)
y_pred = model(_concat).view(-1, num_class)
y_one_hot = torch.zeros(num_query * num_class, num_class).to(device).scatter_(1, y_query.unsqueeze(1), 1)
loss = criterion(y_pred, y_one_hot)
losses.update(loss.item(), y_pred.size(0))
y_hat = y_pred.argmax(1)
accuracy = y_hat.eq(y_query).float().mean()
accuracies.update(accuracy)
if i == 0:
y_hat = y_pred.argmax(1)
writer.add_figure('y_prediction vs. y/Val',
plot_classes_preds(y_hat, y_pred, [x_support, x_query],
[y_support, y_query], num_class, num_support, num_query),
global_step=total_epoch)
writer.add_scalar("Loss/Val", loss.item(), total_epoch + i)
writer.add_scalar("Acc/Val", accuracy, total_epoch + i)
return losses.avg, accuracies.avg
if __name__ == '__main__':
main()
|
from .problem import Problem
from .submission import Submission
from .user import User
|
from django.test.testcases import TestCase
from dbfiles.models import DBFile
class DBFileTests(TestCase):
def test_db_file_save(self):
content = b"Hello World!"
name = "my-files/hello-world.txt"
db_file = DBFile.objects.create(content=content, name=name)
self.assertEqual(db_file.content, content)
self.assertEqual(db_file.name, name)
self.assertEqual(db_file.size, len(content))
self.assertTrue(db_file.created_on)
self.assertTrue(db_file.updated_on)
self.assertEqual(str(db_file), name)
|
# LIBTBX_SET_DISPATCHER_NAME dxtbx.show_mask_info
import argparse
import sys
import dxtbx.util
from dxtbx.model.experiment_list import ExperimentListFactory
def run(args=None):
dxtbx.util.encode_output_as_utf8()
parser = argparse.ArgumentParser()
parser.add_argument("filenames", metavar="IMAGE", nargs="+")
options = parser.parse_args(args)
try:
el = ExperimentListFactory.from_filenames(options.filenames)
except FileNotFoundError as e:
sys.exit(str(e))
dxtbx.util.show_mask_info(el)
if __name__ == "__main__":
run()
|
# coding: utf-8
# In[ ]:
#练习一:文本加密解密(先看有关ASCII码的相关知识以及码表,查维基百科或百度百科)
#输入:一个txt文件(假设全是字母的英文词,每个单词之间用单个空格隔开,假设单词最长为10个字母)
#加密:得到每个单词的长度 n ,随机生成一个9位的数字,将 n-1 与这个9位的数字连接,形成一个10位的数字,
#作为密匙 key 。依照 key 中各个数字对单词中每一个对应位置的字母进行向后移位(例:如过 key 中某数字为 2 ,
#对应该位置的字母为 a ,加密则应移位成 c ,如果超过 z ,则回到 A 处继续移位),对长度不到10的单词,移位后,
#将移位后的单词利用随机字母补全到10个,最终形成以10个字母为一个单词,并以单个空格分割的加密文本,存入文件。
#解密:给定该文本文件并给定key(10位数字),恢复原来的文本。
#(提示,利用 ord() 及 chr() 函数, ord(x) 是取得字符 x 的ASCII码, chr(n) 是取得整数n(代表ASCII码)对应的字符。
#例: ord(a) 的值为 97 , chr(97) 的值为 'a' ,因字母 a 的ASCII码值为 97 。)
fh = open(r'd:\temp\words.txt')
text = fh.read()
fh.close()
print(len(text))
print(text)
|
#!/usr/bin/env python
# Copyright 2018 Xiaohui Zhang
# Apache 2.0.
from __future__ import print_function
from collections import defaultdict
import argparse
import sys
import math
def GetArgs():
parser = argparse.ArgumentParser(
description = "Use a greedy framework to select pronunciation candidates"
"from three sources: a reference lexicon, G2P lexicon and phonetic-decoding"
"(PD) lexicon. Basically, this script implements the Alg. 1 in the paper:"
"Acoustic data-driven lexicon learning based on a greedy pronunciation "
"selection framework, by X. Zhang, V. Mahonar, D. Povey and S. Khudanpur,"
"Interspeech 2017. The inputs are an arc-stats file, containing "
"acoustic evidence (tau_{uwb} in the paper) and three source lexicons "
"(phonetic-decoding(PD)/G2P/ref). The outputs is the learned lexicon for"
"all words in the arc_stats (acoustic evidence) file.",
epilog = "See subtools/kaldi/steps/dict/learn_lexicon_greedy.sh for example.")
parser.add_argument("--alpha", type = str, default = "0,0,0",
help = "Scaling factors for the likelihood reduction threshold."
"of three pronunciaiton candidate sources: phonetic-decoding (PD),"
"G2P and reference. The valid range of each dimension is [0, 1], and"
"a large value means we prune pronunciations from this source more"
"aggressively. Setting a dimension to zero means we never want to remove"
"pronunciaiton from that source. See Section 4.3 in the paper for details.")
parser.add_argument("--beta", type = str, default = "0,0,0",
help = "smoothing factors for the likelihood reduction term."
"of three pronunciaiton candidate sources: phonetic-decoding (PD),"
"G2P and reference. The valid range of each dimension is [0, 100], and"
"a large value means we prune pronunciations from this source more"
"aggressively. See Section 4.3 in the paper for details.")
parser.add_argument("--delta", type = float, default = 0.000000001,
help = "Floor value of the pronunciation posterior statistics."
"The valid range is (0, 0.01),"
"See Section 3 in the paper for details.")
parser.add_argument("silence_phones_file", metavar = "<silphone-file>", type = str,
help = "File containing a list of silence phones.")
parser.add_argument("arc_stats_file", metavar = "<arc-stats-file>", type = str,
help = "File containing word-pronunciation statistics obtained from lattices; "
"each line must be <word> <utt-id> <start-frame> <count> <phones>")
parser.add_argument("word_counts_file", metavar = "<counts-file>", type = str,
help = "File containing word counts in acoustic training data; "
"each line must be <word> <count>.")
parser.add_argument("ref_lexicon", metavar = "<reference-lexicon>", type = str,
help = "The reference lexicon (most probably hand-derived)."
"Each line must be <word> <phones>")
parser.add_argument("g2p_lexicon", metavar = "<g2p-expanded-lexicon>", type = str,
help = "Candidate ronouciations from G2P results."
"Each line must be <word> <phones>")
parser.add_argument("pd_lexicon", metavar = "<phonetic-decoding-lexicon>", type = str,
help = "Candidate ronouciations from phonetic decoding results."
"Each line must be <word> <phones>")
parser.add_argument("learned_lexicon", metavar = "<learned-lexicon>", type = str,
help = "Learned lexicon.")
print (' '.join(sys.argv), file=sys.stderr)
args = parser.parse_args()
args = CheckArgs(args)
return args
def CheckArgs(args):
args.silence_phones_file_handle = open(args.silence_phones_file)
if args.arc_stats_file == "-":
args.arc_stats_file_handle = sys.stdin
else:
args.arc_stats_file_handle = open(args.arc_stats_file)
args.word_counts_file_handle = open(args.word_counts_file)
args.ref_lexicon_handle = open(args.ref_lexicon)
args.g2p_lexicon_handle = open(args.g2p_lexicon)
args.pd_lexicon_handle = open(args.pd_lexicon)
args.learned_lexicon_handle = open(args.learned_lexicon, "w")
alpha = args.alpha.strip().split(',')
if len(alpha) is not 3:
raise Exception('Invalid alpha ', args.alpha)
for i in range(0,3):
if float(alpha[i]) < 0 or float(alpha[i]) > 1:
raise Exception('alaph ', alpha[i],
' is invalid, it must be within [0, 1].')
if float(alpha[i]) == 0:
alpha[i] = -1e-3
# The absolute likelihood loss (search for loss_abs) is supposed to be positive.
# But it could be negative near zero because of numerical precision limit.
# In this case, even if alpha is set to be zero, which means we never want to
# remove pronunciation from that source, the quality score (search for q_b)
# could still be negative, which means this pron could be potentially removed.
# To prevent this, we set alpha as a negative value near zero to ensure
# q_b is always positive.
args.alpha = [float(alpha[0]), float(alpha[1]), float(alpha[2])]
print("[alpha_{pd}, alpha_{g2p}, alpha_{ref}] is: ", args.alpha)
exit
beta = args.beta.strip().split(',')
if len(beta) is not 3:
raise Exception('Invalid beta ', args.beta)
for i in range(0,3):
if float(beta[i]) < 0 or float(beta[i]) > 100:
raise Exception('beta ', beta[i],
' is invalid, it must be within [0, 100].')
args.beta = [float(beta[0]), float(beta[1]), float(beta[2])]
print("[beta_{pd}, beta_{g2p}, beta_{ref}] is: ", args.beta)
if args.delta <= 0 or args.delta > 0.1:
raise Exception('delta ', args.delta, ' is invalid, it must be within'
'(0, 0.01).')
print("delta is: ", args.delta)
return args
def ReadArcStats(arc_stats_file_handle):
stats = defaultdict(lambda : defaultdict(dict))
stats_summed = defaultdict(float)
for line in arc_stats_file_handle.readlines():
splits = line.strip().split()
if (len(splits) == 0):
continue
if (len(splits) < 5):
raise Exception('Invalid format of line ' + line
+ ' in ' + arc_stats_file)
utt = splits[1]
start_frame = int(splits[2])
word = splits[0]
count = float(splits[3])
phones = splits[4:]
phones = ' '.join(phones)
stats[word][(utt, start_frame)][phones] = count
stats_summed[(word, phones)] += count
return stats, stats_summed
def ReadWordCounts(word_counts_file_handle):
counts = {}
for line in word_counts_file_handle.readlines():
splits = line.strip().split()
if len(splits) < 2:
raise Exception('Invalid format of line ' + line
+ ' in counts file.')
word = splits[0]
count = int(splits[1])
counts[word] = count
return counts
def ReadLexicon(args, lexicon_file_handle, counts):
# we're skipping any word not in counts (not seen in training data),
# cause we're only learning prons for words who have acoustic examples.
lexicon = defaultdict(set)
for line in lexicon_file_handle.readlines():
splits = line.strip().split()
if len(splits) == 0:
continue
if len(splits) < 2:
raise Exception('Invalid format of line ' + line
+ ' in lexicon file.')
word = splits[0]
if word not in counts:
continue
phones = ' '.join(splits[1:])
lexicon[word].add(phones)
return lexicon
def FilterPhoneticDecodingLexicon(args, pd_lexicon):
# We want to remove all candidates which contain silence phones
silphones = set()
for line in args.silence_phones_file_handle:
silphones.add(line.strip())
rejected_candidates = set()
for word, prons in pd_lexicon.iteritems():
for pron in prons:
for phone in pron.split():
if phone in silphones:
rejected_candidates.add((word, pron))
break
for word, pron in rejected_candidates:
pd_lexicon[word].remove(pron)
return pd_lexicon
# One iteration of Expectation-Maximization computation (Eq. 3-4 in the paper).
def OneEMIter(args, word, stats, prons, pron_probs, debug=False):
prob_acc = [0.0 for i in range(len(prons[word]))]
s = sum(pron_probs)
for i in range(len(pron_probs)):
pron_probs[i] = pron_probs[i] / s
log_like = 0.0
for (utt, start_frame) in stats[word]:
prob = []
soft_counts = []
for i in range(len(prons[word])):
phones = prons[word][i]
soft_count = stats[word][(utt, start_frame)].get(phones, 0)
if soft_count < args.delta:
soft_count = args.delta
soft_counts.append(soft_count)
prob = [i[0] * i[1] for i in zip(soft_counts, pron_probs)]
for i in range(len(prons[word])):
prob_acc[i] += prob[i] / sum(prob)
log_like += math.log(sum(prob))
pron_probs = [1.0 / float(len(stats[word])) * p for p in prob_acc]
log_like = 1.0 / float(len(stats[word])) * log_like
if debug:
print("Log_like of the word: ", log_like, "pron probs: ", pron_probs)
return pron_probs, log_like
def SelectPronsGreedy(args, stats, counts, ref_lexicon, g2p_lexicon, pd_lexicon, dianostic_info=False):
prons = defaultdict(list) # Put all possible prons from three source lexicons into this dictionary
src = {} # Source of each (word, pron) pair: 'P' = phonetic-decoding, 'G' = G2P, 'R' = reference
learned_lexicon = defaultdict(set) # Put all selected prons in this dictionary
for lexicon in ref_lexicon, g2p_lexicon, pd_lexicon:
for word in lexicon:
for pron in lexicon[word]:
prons[word].append(pron)
for word in prons:
for pron in prons[word]:
if word in pd_lexicon and pron in pd_lexicon[word]:
src[(word, pron)] = 'P'
if word in g2p_lexicon and pron in g2p_lexicon[word]:
src[(word, pron)] = 'G'
if word in ref_lexicon and pron in ref_lexicon[word]:
src[(word, pron)] = 'R'
for word in prons:
if word not in stats:
continue
n = len(prons[word])
pron_probs = [1/float(n) for i in range(n)]
if dianostic_info:
print("pronunciations of word '{}': {}".format(word, prons[word]))
active_indexes = set(range(len(prons[word])))
deleted_prons = [] # indexes of prons to be deleted
soft_counts_normalized = []
while len(active_indexes) > 1:
log_like = 1.0
log_like_last = -1.0
num_iters = 0
while abs(log_like - log_like_last) > 1e-7:
num_iters += 1
log_like_last = log_like
pron_probs, log_like = OneEMIter(args, word, stats, prons, pron_probs, False)
if log_like_last == 1.0 and len(soft_counts_normalized) == 0: # the first iteration
soft_counts_normalized = pron_probs
if dianostic_info:
print("Avg.(over all egs) soft counts: {}".format(soft_counts_normalized))
if dianostic_info:
print("\n Log_like after {} iters of EM: {}, estimated pron_probs: {} \n".format(
num_iters, log_like, pron_probs))
candidates_to_delete = []
for i in active_indexes:
pron_probs_mod = [p for p in pron_probs]
pron_probs_mod[i] = 0.0
for j in range(len(pron_probs_mod)):
if j in active_indexes and j != i:
pron_probs_mod[j] += 0.01
pron_probs_mod = [s / sum(pron_probs_mod) for s in pron_probs_mod]
log_like2 = 1.0
log_like2_last = -1.0
num_iters2 = 0
# Running EM until convengence
while abs(log_like2 - log_like2_last) > 0.001 :
num_iters2 += 1
log_like2_last = log_like2
pron_probs_mod, log_like2 = OneEMIter(args, word, stats,
prons, pron_probs_mod, False)
loss_abs = log_like - log_like2 # absolute likelihood loss before normalization
# (supposed to be positive, but could be negative near zero because of numerical precision limit).
log_delta = math.log(args.delta)
thr = -log_delta
loss = loss_abs
source = src[(word, prons[word][i])]
if dianostic_info:
print("\n set the pron_prob of '{}' whose source is {}, to zero results in {}"
" loss in avg. log-likelihood; Num. iters until converging:{}. ".format(
prons[word][i], source, loss, num_iters2))
# Compute quality score q_b = loss_abs * / (M_w + beta_s(b)) + alpha_s(b) * log_delta
# See Sec. 4.3 and Alg. 1 in the paper.
if source == 'P':
thr *= args.alpha[0]
loss *= float(len(stats[word])) / (float(len(stats[word])) + args.beta[0])
if source == 'G':
thr *= args.alpha[1]
loss *= float(len(stats[word])) / (float(len(stats[word])) + args.beta[1])
if source == 'R':
thr *= args.alpha[2]
loss *= float(len(stats[word])) / (float(len(stats[word])) + args.beta[2])
if loss - thr < 0: # loss - thr here is just q_b
if dianostic_info:
print("Smoothed log-like loss {} is smaller than threshold {} so that the quality"
"score {} is negative, adding the pron to the list of candidates to delete"
". ".format(loss, thr, loss-thr))
candidates_to_delete.append((loss-thr, i))
if len(candidates_to_delete) == 0:
break
candidates_to_delete_sorted = sorted(candidates_to_delete,
key=lambda candidates_to_delete: candidates_to_delete[0])
deleted_candidate = candidates_to_delete_sorted[0]
active_indexes.remove(deleted_candidate[1])
pron_probs[deleted_candidate[1]] = 0.0
for i in range(len(pron_probs)):
if i in active_indexes:
pron_probs[i] += 0.01
pron_probs = [s / sum(pron_probs) for s in pron_probs]
source = src[(word, prons[word][deleted_candidate[1]])]
pron = prons[word][deleted_candidate[1]]
soft_count = soft_counts_normalized[deleted_candidate[1]]
quality_score = deleted_candidate[0]
# This part of diagnostic info provides hints to the user on how to adjust the parameters.
if dianostic_info:
print("removed pron {}, from source {} with quality score {:.5f}".format(
pron, source, quality_score))
if (source == 'P' and soft_count > 0.7 and len(stats[word]) > 5):
print("WARNING: alpha_{pd} or beta_{pd} may be too large!"
" For the word '{}' whose count is {}, the candidate "
" pronunciation from phonetic decoding '{}' with normalized "
" soft count {} (out of 1) is rejected. It shouldn't have been"
" rejected if alpha_{pd} is smaller than {}".format(
word, len(stats[word]), pron, soft_count, -loss / log_delta,
-args.alpha[0] * len(stats[word]) + (objf_change + args.beta[0])),
file=sys.stderr)
if loss_abs > thr:
print(" or beta_{pd} is smaller than {}".format(
(loss_abs / thr - 1) * len(stats[word])), file=sys.stderr)
if (source == 'G' and soft_count > 0.7 and len(stats[word]) > 5):
print("WARNING: alpha_{g2p} or beta_{g2p} may be too large!"
" For the word '{}' whose count is {}, the candidate "
" pronunciation from G2P '{}' with normalized "
" soft count {} (out of 1) is rejected. It shouldn't have been"
" rejected if alpha_{g2p} is smaller than {} ".format(
word, len(stats[word]), pron, soft_count, -loss / log_delta,
-args.alpha[1] * len(stats[word]) + (objf_change + args.beta[1])),
file=sys.stderr)
if loss_abs > thr:
print(" or beta_{g2p} is smaller than {}.".format((
loss_abs / thr - 1) * len(stats[word])), file=sys.stderr)
deleted_prons.append(deleted_candidate[1])
for i in range(len(prons[word])):
if i not in deleted_prons:
learned_lexicon[word].add(prons[word][i])
return learned_lexicon
def WriteLearnedLexicon(learned_lexicon, file_handle):
for word, prons in learned_lexicon.iteritems():
for pron in prons:
print('{0} {1}'.format(word, pron), file=file_handle)
file_handle.close()
def Main():
args = GetArgs()
# Read in three lexicon sources, word counts, and pron stats.
counts = ReadWordCounts(args.word_counts_file_handle)
ref_lexicon = ReadLexicon(args, args.ref_lexicon_handle, counts)
g2p_lexicon = ReadLexicon(args, args.g2p_lexicon_handle, counts)
pd_lexicon = ReadLexicon(args, args.pd_lexicon_handle, counts)
stats, stats_summed = ReadArcStats(args.arc_stats_file_handle)
pd_lexicon = FilterPhoneticDecodingLexicon(args, pd_lexicon)
# Select prons to construct the learned lexicon.
learned_lexicon = SelectPronsGreedy(args, stats, counts, ref_lexicon, g2p_lexicon, pd_lexicon)
# Write the learned prons for words out of the ref. vocab into learned_lexicon_oov.
WriteLearnedLexicon(learned_lexicon, args.learned_lexicon_handle)
if __name__ == "__main__":
Main()
|
import sys, time
from django.conf import settings
from django.db import connection, transaction, backend
from django.core import management
from django.dispatch import dispatcher
from django.test import signals
from django.template import Template
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
def instrumented_test_render(self, context):
"""An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
dispatcher.send(signal=signals.template_rendered, sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
"""
Template.original_render = Template.render
Template.render = instrumented_test_render
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
"""
Template.render = Template.original_render
del Template.original_render
def _set_autocommit(connection):
"Make sure a connection is in autocommit mode."
if hasattr(connection.connection, "autocommit"):
connection.connection.autocommit(True)
elif hasattr(connection.connection, "set_isolation_level"):
connection.connection.set_isolation_level(0)
def create_test_db(verbosity=1, autoclobber=False):
if verbosity >= 1:
print "Creating test database..."
# If we're using SQLite, it's more convenient to test against an
# in-memory database.
if settings.DATABASE_ENGINE == "sqlite3":
TEST_DATABASE_NAME = ":memory:"
else:
if settings.TEST_DATABASE_NAME:
TEST_DATABASE_NAME = settings.TEST_DATABASE_NAME
else:
TEST_DATABASE_NAME = TEST_DATABASE_PREFIX + settings.DATABASE_NAME
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = connection.cursor()
_set_autocommit(connection)
try:
cursor.execute("CREATE DATABASE %s" % backend.quote_name(TEST_DATABASE_NAME))
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_DATABASE_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database..."
cursor.execute("DROP DATABASE %s" % backend.quote_name(TEST_DATABASE_NAME))
if verbosity >= 1:
print "Creating test database..."
cursor.execute("CREATE DATABASE %s" % backend.quote_name(TEST_DATABASE_NAME))
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
connection.close()
settings.DATABASE_NAME = TEST_DATABASE_NAME
management.syncdb(verbosity, interactive=False)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = connection.cursor()
def destroy_test_db(old_database_name, verbosity=1):
# Unless we're using SQLite, remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
if verbosity >= 1:
print "Destroying test database..."
connection.close()
TEST_DATABASE_NAME = settings.DATABASE_NAME
settings.DATABASE_NAME = old_database_name
if settings.DATABASE_ENGINE != "sqlite3":
cursor = connection.cursor()
_set_autocommit(connection)
time.sleep(1) # To avoid "database is being accessed by other users" errors.
cursor.execute("DROP DATABASE %s" % backend.quote_name(TEST_DATABASE_NAME))
connection.close()
|
from django.db import models
# Create your models here.
class Question(models.Model):
question_title = models.CharField(max_length=200)
question_text = models.TextField()
active = models.BooleanField(default=True)
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices')
choice_text = models.CharField(max_length=200)
def __str__(self):
return self.choice_text
class Uni(models.Model):
uni_id = models.IntegerField(unique=True, primary_key=True)
uni_name = models.CharField(max_length=200)
def __str__(self):
return self.uni_name
class Vote(models.Model):
choice = models.ForeignKey(Choice, on_delete=models.CASCADE, related_name='votes')
uni = models.ForeignKey(Uni, on_delete=models.CASCADE)
|
import unittest
from typing import List
from cave_navigator import CaveNavigator
class TestCaveNavigator(unittest.TestCase):
def setUp(self) -> None:
pass
def test_cave_navigator_small_sample(self) -> None:
lines = file_read_helper('day-12/small_sample_input.txt')
cave_navigator = CaveNavigator(lines)
self.assertEqual(cave_navigator.count_paths(), 10)
def test_cave_navigator_medium_sample(self) -> None:
lines = file_read_helper('day-12/medium_sample_input.txt')
cave_navigator = CaveNavigator(lines)
self.assertEqual(cave_navigator.count_paths(), 19)
def test_cave_navigator_large_sample(self) -> None:
lines = file_read_helper('day-12/large_sample_input.txt')
cave_navigator = CaveNavigator(lines)
self.assertEqual(cave_navigator.count_paths(), 226)
def test_cave_navigator_puzzle(self) -> None:
lines = file_read_helper('day-12/puzzle_input.txt')
cave_navigator = CaveNavigator(lines)
self.assertEqual(cave_navigator.count_paths(), 3497)
def test_cave_navigator_enabled_extra_visit_small_sample(self) -> None:
lines = file_read_helper('day-12/small_sample_input.txt')
cave_navigator = CaveNavigator(lines)
self.assertEqual(cave_navigator.count_paths(allow_extra_visit=True), 36)
def test_cave_navigator_enabled_extra_visit_medium_sample(self) -> None:
lines = file_read_helper('day-12/medium_sample_input.txt')
cave_navigator = CaveNavigator(lines)
self.assertEqual(cave_navigator.count_paths(allow_extra_visit=True), 103)
def test_cave_navigator_enabled_extra_visit_large_sample(self) -> None:
lines = file_read_helper('day-12/large_sample_input.txt')
cave_navigator = CaveNavigator(lines)
self.assertEqual(cave_navigator.count_paths(allow_extra_visit=True), 3509)
def test_cave_navigator_enabled_extra_visit_puzzle(self) -> None:
lines = file_read_helper('day-12/puzzle_input.txt')
cave_navigator = CaveNavigator(lines)
self.assertEqual(cave_navigator.count_paths(allow_extra_visit=True), 93686)
def file_read_helper(filename: str) -> List[str]:
lines = []
with open(filename, 'r', encoding='UTF-8') as file:
for line in file:
lines.append(line.strip())
return lines
if __name__ == '__main__':
unittest.main()
|
# entry point for the input form to pass values back to this script
def setValues(tH0,tWM,tWV,tz,tmnue,tmnumu,tmnutau,tw,twp,tT0):
H0 = tH0
h = H0/100
WM = tWM
WV = tWV
z = tz
WR = 2.477E-5/(h*h) # does not include neutrinos, T0 = 2.72528
WK = 1-WM-WR-WV
mnue = tmnue
mnumu = tmnumu
mnutau = tmnutau
w = tw
wp = twp
T0 = tT0
compute()
# tangential comoving distance
def DCMT(WK,DCMR):
import math
ratio = 1.00
x = math.sqrt(abs(WK))*DCMR
# document.writeln("DCMR = " + DCMR + "<BR>")
# document.writeln("x = " + x + "<BR>")
if (x > 0.1):
if (WK > 0) : ratio = 0.5*(math.exp(x)-math.exp(-x))/x
else: ratio = math.sin(x)/x
# document.writeln("ratio = " + ratio + "<BR>")
y = ratio*DCMR
return y
y = x*x
# statement below fixed 13-Aug-03 to correct sign error in expansion
if (WK < 0): y = -y
ratio = 1 + y/6 + y*y/120
# document.writeln("ratio = " + ratio + "<BR>")
y= ratio*DCMR
return y
# comoving volume computation
def VCM(WK,DCMR):
import math
ratio = 1.00
x = math.sqrt(abs(WK))*DCMR
if (x > 0.1) :
if (WK > 0) : ratio = (0.125*(math.exp(2*x)-math.exp(-2*x))-x/2)/(x*x*x/3)
else: ratio =(x/2 - math.sin(2*x)/4)/(x*x*x/3)
y = ratio*DCMR*DCMR*DCMR/3
return y
y = x*x
# statement below fixed 13-Aug-03 to correct sign error in expansion
if (WK < 0): y = -y
ratio = 1 + y/5 + (2/105)*y*y
y = ratio*DCMR*DCMR*DCMR/3
return y
# function to give neutrino density over rest mass density
def nurho(mnurel,mnu):
import math
y = math.pow(1+math.pow(mnurel/mnu,1.842),1.0/1.842)
return y
# calculate the actual results
def compute(z,w,WM=0.27,WV=0.73):
i=0 # index
n=1000 # number of points in integrals
nda = 1 # number of digits in angular size distance
H0 = 71. # Hubble constant
#WM = 0.27 # Omega(matter)
#WV = 0.73 # Omega(vacuum) or lambda
WR = 0. # Omega(radiation)
WK = 0. # Omega curvaturve = 1-Omega(total)
Wnu = 0. # Omega from massive neutrinos
#z = 3.0 # redshift of the object
h = 0.71 # H0/100
mnue = 0.001 # mass of electron neutrino in eV
mnumu = 0.009 # mass of muon neutrino in eV
mnutau = 0.049 # mass of tau neutrino in eV
we = mnue/93. # Omega(nu(e))h^2
wmu = mnumu/93. # Omega(nu(mu))h^2
wtau = mnutau/93. # Omega(nu(tau))h^2
mnurel = 0.0005 # mass of neutrino that is just now relativistic in eV
T0 = 2.72528 # CMB temperature in K
c = 299792.458 # velocity of light in km/sec
Tyr = 977.8 # coefficent for converting 1/H into Gyr
DTT = 0.5 # time from z to now in units of 1/H0
DTT_Gyr = 0.0 # value of DTT in Gyr
age = 0.5 # age of Universe in units of 1/H0
age_Gyr = 0.0 # value of age in Gyr
zage = 0.1 # age of Universe at redshift z in units of 1/H0
zage_Gyr = 0.0 # value of zage in Gyr
DCMR = 0.0 # comoving radial distance in units of c/H0
DCMR_Mpc = 0.0
DCMR_Gyr = 0.0
DA = 0.0 # angular size distance
DA_Mpc = 0.0
DA_Gyr = 0.0
kpc_DA = 0.0
DL = 0.0 # luminosity distance
DL_Mpc = 0.0
DL_Gyr = 0.0 # DL in units of billions of light years
V_Gpc = 0.0
a = 1.0 # 1/(1+z), the scale factor of the Universe
az = 0.5 # 1/(1+z(object))
#w = -1. # equation of state, w = P/(rno*c^2)
wp = 0. # rate of change of equation of state, w(a) = w+2*wp*(1-a)
# following Linder, astro-ph/040250
import math
h = H0/100.
WR = 2.477E-5*math.pow(T0/2.72528,4)/(h*h) # no neutrinos
# avoid dividing by zero neutrino mass
if (mnue < 0.00001): mnue = 0.00001
if (mnumu < 0.00001): mnumu = 0.00001
if (mnutau < 0.00001): mnutau = 0.00001
# rest mass omega*h^2 for the three neutrino types
we = (mnue/93.64)*math.pow(T0/2.72528,3)
wmu = (mnumu/93.90)*math.pow(T0/2.72528,3)
wtau = (mnutau/93.90)*math.pow(T0/2.72528,3)
# mass of nu that is just now relativistic
# evaluates at 3.151*kT with T = (4/11)^(1/3)*To and To=2.72528
# This is 6.13 K, and 1 eV is 11604.5 K
mnurel = 6.13*(T0/2.72528)/11604.5
Wnu = (we*nurho(mnurel,mnue)+wmu*nurho(mnurel,mnumu)+wtau*nurho(mnurel,mnutau))/(h*h)
WK = 1-WM-WR-WV
WM = WM-Wnu
az = 1.0/(1+1.0*z)
age = 0
# do integral over a=1/(1+z) from 0 to az in n steps, midpoint rule
for i in range(n): #(i = 0 i != n i++) {
a = az*(i+0.5)/n
# rho(DE) = a^{-3-3*w_o-6*w'}*exp(6*w'*(a-1))*rho_o(DE)
# based on w = w_o+w_a*(1-a) with w_a = 2*w': Linder astro-ph/0402503
rhoV = WV*math.pow(a,-3-3*w-6*wp)*math.exp(6*wp*(a-1))
# get neutrino density corrected for kT/mc^2 by using lower mass
# instead of higher T:
Wnu = (we*nurho(mnurel,mnue*a)+wmu*nurho(mnurel,mnumu*a)+wtau*nurho(mnurel,mnutau*a))/(h*h)
adot = math.sqrt(WK+((WM+Wnu)/a)+(WR/(a*a))+(rhoV*a*a))
age = age + 1/adot
zage = az*age/n
# correction for annihilations of particles not present now like e+/e-
# added 13-Aug-03 based on T_vs_t.f
lpz = math.log((1+1.0*z))/math.log(10.0)
dzage = 0
if (lpz > 7.500): dzage = 0.002 * (lpz - 7.500)
if (lpz > 8.000): dzage = 0.014 * (lpz - 8.000) + 0.001
if (lpz > 8.500): dzage = 0.040 * (lpz - 8.500) + 0.008
if (lpz > 9.000): dzage = 0.020 * (lpz - 9.000) + 0.028
if (lpz > 9.500): dzage = 0.019 * (lpz - 9.500) + 0.039
if (lpz > 10.000): dzage = 0.048
if (lpz > 10.775): dzage = 0.035 * (lpz - 10.775) + 0.048
if (lpz > 11.851): dzage = 0.069 * (lpz - 11.851) + 0.086
if (lpz > 12.258): dzage = 0.461 * (lpz - 12.258) + 0.114
if (lpz > 12.382): dzage = 0.024 * (lpz - 12.382) + 0.171
if (lpz > 13.055): dzage = 0.013 * (lpz - 13.055) + 0.188
if (lpz > 14.081): dzage = 0.013 * (lpz - 14.081) + 0.201
if (lpz > 15.107): dzage = 0.214
zage = zage*10.0**dzage
#
zage_Gyr = (Tyr/H0)*zage
DTT = 0.0
DCMR = 0.0
# do integral over a=1/(1+z) from az to 1 in n steps, midpoint rule
for i in range(n):
a = az+(1-az)*(i+0.5)/n
rhoV = WV*math.pow(a,-3-3*w-6*wp)*math.exp(6*wp*(a-1))
Wnu = (we*nurho(mnurel,mnue*a)+wmu*nurho(mnurel,mnumu*a)+wtau*nurho(mnurel,mnutau*a))/(h*h)
adot = math.sqrt(WK+((WM+Wnu)/a)+(WR/(a*a))+(rhoV*a*a))
DTT = DTT + 1/adot
DCMR = DCMR + 1/(a*adot)
#print az
DTT = (1-az)*DTT/n
DCMR = (1-az)*DCMR/n
age = DTT+zage
age_Gyr = age*(Tyr/H0)
DTT_Gyr = (Tyr/H0)*DTT
DCMR_Gyr = (Tyr/H0)*DCMR
DCMR_Mpc = (c/H0)*DCMR
DA = az*DCMT(WK,DCMR)
DA_Mpc = (c/H0)*DA
kpc_DA = DA_Mpc/206.264806
DA_Gyr = (Tyr/H0)*DA
DL = DA/(az*az)
DL_Mpc = (c/H0)*DL
DL_Gyr = (Tyr/H0)*DL
V_Gpc = 4*math.pi*math.pow(0.001*c/H0,3)*VCM(WK,DCMR)
#print 'z',z,'DA_Mpc',DA_Mpc
return DCMR
if __name__ == '__main__':
import pylab
cluster_z_low = 0.2
cluster_z_high = 0.6
for cluster_z in [0.2,0.3,0.55]: #,1.2]:
for w in [-1]: #.5,-1,-0.5,]:
d_cluster_low = compute(cluster_z_low,w)
d_cluster_high = compute(cluster_z_high,w)
d_cluster = compute(cluster_z,w)
refer = (compute(0.8,w) - d_cluster)/compute(0.8,w)
import scipy
ratios_save = []
zs = []
for z in scipy.arange(cluster_z,3.,0.1):
zs.append(z)
s = compute(z,w)
#ratio = (d_cluster_high/(1+cluster_z_high))/(d_cluster_low/(1+cluster_z_low))*(s - d_cluster_high)/(s - d_cluster_low)
ratio = (d_cluster_high/(1+cluster_z_high))/(d_cluster_low/(1+cluster_z_low))*(s - d_cluster_high)/(s - d_cluster_low)
#nprint ratio, s, d_cluster, z
#ratios.append(ratio)
ratios_save.append((compute(z,w) - d_cluster)/compute(z,w)/refer)
for w in [-1.5,-1,-0.5,]:
d_cluster_low = compute(cluster_z_low,w)
d_cluster_high = compute(cluster_z_high,w)
d_cluster = compute(cluster_z,w)
refer = (compute(0.8,w) - d_cluster)/compute(0.8,w)
import scipy
ratios = []
zs = []
i = 0
for z in scipy.arange(cluster_z,3.,0.1):
zs.append(z)
s = compute(z,w)
#ratio = (d_cluster_high/(1+cluster_z_high))/(d_cluster_low/(1+cluster_z_low))*(s - d_cluster_high)/(s - d_cluster_low)
ratio = (d_cluster_high/(1+cluster_z_high))/(d_cluster_low/(1+cluster_z_low))*(s - d_cluster_high)/(s - d_cluster_low)
#print ratio, s, d_cluster, z
#ratios.append(ratio)
ratios.append((compute(z,w) - d_cluster)/compute(z,w)/refer/ratios_save[i])
i += 1
pylab.plot(scipy.array(zs), scipy.array(ratios))
pylab.savefig('shearratio.pdf')
pylab.show()
def compute_cube():
import scipy
dict = {}
for w in [-1]: #scipy.arange(-2,2,0.1):
for WM in [0.3]: #scipy.arange(0,1,0.1#):
WV = 1 - WM
for z in scipy.arange(0,2.5,0.01):
d = compute(z,w,WM,WV)
dict['%.2f' % z + '_' + '%.2f' % w + '_' + '%.2f' % WM] = d #str(z) + '_' + str(w) + '_' + str(WM)] = d
print d, z, w, WM, WV
print dict.keys()
import pickle
f = open('DA.pickle','w')
m = pickle.Pickler(f)
pickle.dump(dict,m)
f.close()
def dist_ratio(zs,cluster_z=0.55,w=-1.,omega_m=0.27,omega_lambda=0.73):
import pylab
#cluster_z = 0.55
ratios = []
for z in zs:
d_cluster = compute(cluster_z,w,omega_m,omega_lambda)
ratios.append((compute(z,w) - d_cluster)/compute(z,w,omega_m,omega_lambda))
import scipy
return scipy.array(ratios)
|
import logging
import re
import sys
from framework.mongo import database
from website.app import init_app
from framework.transactions.context import TokuTransaction
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def get_targets():
guids = [
x['_id']
for x in database['guid'].find(
{'referent': 'osfstorageguidfile'},
{'_id': True}
)
]
paths = {
x['path'].strip('/'): x['_id']
for x in database['osfstorageguidfile'].find({
'_id': {'$in': guids},
'path': {'$not': re.compile('.*{{.*')}
}, {'path': True})
}
return paths, database['trashedfilenode'].find({'_id': {'$in': list(paths.keys())}})
def migrate():
paths, targets = get_targets()
for trashed in targets:
logger.info('Migrating {} => {}'.format(paths[trashed['_id']], trashed['_id']))
database['guid'].update(
{'_id': paths[trashed['_id']]},
{'$set': {
'referent': (trashed['_id'], 'trashedfilenode')
}}
)
def main():
dry_run = '--dry' in sys.argv
if not dry_run:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
with TokuTransaction():
migrate()
if dry_run:
raise RuntimeError('Dry Run -- Transaction rolled back')
if __name__ == '__main__':
main()
|
#=========================================================================
# RegIncr
#=========================================================================
# This is a simple model for a registered incrementer. An eight-bit value
# is read from the input port, registered, incremented by one, and
# finally written to the output port.
from pymtl import *
class RegIncr( Model ):
# Constructor
def __init__( s ):
# Port-based interface
s.in_ = InPort ( Bits(8) )
s.out = OutPort ( Bits(8) )
# Concurrent block modeling register
s.reg_out = Wire( Bits(8) )
@s.tick
def block1():
if s.reset:
s.reg_out.next = 0
else:
s.reg_out.next = s.in_
# Concurrent block modeling incrementer
@s.combinational
def block2():
s.out.value = s.reg_out + 1
# Line Tracing
def line_trace( s ):
return "{} ({}) {}".format( s.in_, s.reg_out, s.out )
|
def rasterize_mesh_from_barycentric_coordinate_images(
mesh, bcoords_image, tri_indices_image
):
r"""
Renders an image of a `menpo.shape.TexturedTriMesh` or
`menpo.shape.ColouredTriMesh` from a barycentric coordinate image pair.
Note that the texture is rendered without any lighting model - think of
this as a piecewise affine warp of the mesh's texture into the image (
with z-buffering). As there is no lighting model, only meshes with
colour/texture can be used with this method (a single color for the whole
mesh would render flat with no shading).
Parameters
----------
mesh : `menpo.shape.TexturedTriMesh` or `menpo.shape.ColouredTriMesh`
The 3D mesh who's texture will be rendered to the image.
bcoords_image : `menpo.image.MaskedImage`
The per-triangle barycentric coordinates for what should be rendered
into each pixel. See :map:`rasterize_barycentric_coordinate_images`.
tri_indices_image : `menpo.image.MaskedImage`
The triangle index identifying the triangle that is visable at a pixel
after z-buffering. See :map:`rasterize_barycentric_coordinate_images`.
Returns
-------
`menpo.image.MaskedImage`
A rasterized image of the mesh.
"""
# Sample the mesh texture space to find the colors-per pixel
colours = mesh.sample_texture_with_barycentric_coordinates(
bcoords_image.as_vector(keep_channels=True).T, tri_indices_image.as_vector()
)
# Rebuild the image using the usual from_vector machinery
return tri_indices_image.from_vector(colours.T, n_channels=mesh.n_channels)
def rasterize_shape_image_from_barycentric_coordinate_images(
mesh, bcoords_image, tri_indices_image
):
r"""
Renders an XYZ shape image of a `menpo.shape.TexturedTriMesh` or
`menpo.shape.ColouredTriMesh` from a barycentric coordinate image pair.
Parameters
----------
mesh : `menpo.shape.TexturedTriMesh` or `menpo.shape.ColouredTriMesh`
The 3D mesh who's texture will be rendered to the image.
bcoords_image : `menpo.image.MaskedImage`
The per-triangle barycentric coordinates for what should be rendered
into each pixel. See :map:`rasterize_barycentric_coordinate_images`.
tri_indices_image : `menpo.image.MaskedImage`
The triangle index identifying the triangle that is visable at a pixel
after z-buffering. See :map:`rasterize_barycentric_coordinate_images`.
Returns
-------
`menpo.image.MaskedImage`
A rasterized shape image image of the mesh.
"""
# Sample the mesh texture space to find the colors-per pixel
shape_per_pixel = mesh.project_barycentric_coordinates(
bcoords_image.as_vector(keep_channels=True).T, tri_indices_image.as_vector()
)
# Rebuild the image using the usual from_vector machinery
return tri_indices_image.from_vector(
shape_per_pixel.points.T, n_channels=mesh.n_channels
)
def rasterize_mesh(mesh_in_img, image_shape):
from .cpu import rasterize_barycentric_coordinate_images
bcs = rasterize_barycentric_coordinate_images(mesh_in_img, image_shape)
return rasterize_mesh_from_barycentric_coordinate_images(mesh_in_img, *bcs)
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
__version__ = '0.10.2'
__version_info__ = (0, 10, 2)
|
# -------------------------------------------------------------
# generic_display.py - Implements an interface for all the display classes
# August 2018 - Andrei Diaconu
# -------------------------------------------------------------
__all__ = ("GenericDisplay",)
from typing import NamedTuple
class GenericDisplay:
"""
An interface for display classes to implement
"""
class DisplayOptions(NamedTuple):
"""
Any options that the display might have. To be overriden in its
implementation.
"""
pass
def __init__(self, data, *args):
"""
Initialises the display module
:param data: a derivative of a `data_io.Data` object
:param args: extra args
"""
self.data = data
self.data_options = data.data_options
def show(self):
"""
Method that actually displays the data
"""
pass
|
'''
给定两个大小为 m 和 n 的正序(从小到大)数组 nums1 和 nums2。请你找出并返回这两个正序数组的中位数。
O(log(m+n))算法
'''
|
import os
import torch
from collections import OrderedDict
import argparse
import numpy as np
eps_bn = 1e-5 #default epsilon for bn
mean_rgb = {
"": [0.0, 0.0, 0.0],
"pascal": [103.939, 116.779, 123.68],
"cityscapes": [0.0, 0.0, 0.0],
"railsem19": [0.0, 0.0, 0.0],
"vistas": [80.5423, 91.3162, 81.4312],
"pascal_bgr": [123.68, 116.779, 103.939],
"vistas_bgr": [81.4312, 91.3162, 80.5423]}
def transform_layer(m0_state, key0, normalization_factor = 1.0, norm_mean = [0.0,0.0,0.0], apply_bgr_flip = True):
orig_device = m0_state[key0].get_device()
w0 = m0_state[key0].cpu().numpy().astype(np.float64)
if normalization_factor != 1.0:
w0 = w0 * normalization_factor
if apply_bgr_flip:
if len(w0.shape) == 4 and w0.shape[1] == 3 and w0.shape[0] != 3:
w0 = np.copy(w0[:,::-1,:,:])
else:
print("Warning: unknown position of rgb channel dimension!")
norm_fact = None
for c in range(3):
if norm_mean[c] == 0.0:
continue
if len(w0.shape) == 4 and w0.shape[1] == 3 and w0.shape[0] != 3:
#TODO: find batch norm nodes (where bias is pushed into batch norm)
w_tmean = np.sum(w0[:,c,:,:] * - norm_mean[c], axis = (1,2)) #assume convolution operation
if norm_fact is None:
norm_fact = w_tmean
else:
norm_fact += w_tmean
else:
print("Warning: unknown position of rgb channel dimension!")
if not norm_fact is None:
key0_b = key0.replace('.weight','.bias')
if key0 == key0_b or key0_b not in m0_state:
print("Warning: cannot detect type of input layer "+ key0)
else:
w0_b = m0_state[key0_b].cpu().numpy().astype(np.float64)
m0_state[key0_b] = torch.tensor((w0_b - norm_fact).astype(np.float32), device = orig_device)
m0_state[key0] = torch.tensor(w0.astype(np.float32), device = orig_device)
def find_diffs_bn(state0, stateTempl):
to_bn = {}
from_bn = {}
bn_vars = ['weight', 'bias', 'running_mean', 'running_var', 'num_batches_tracked']
for idx0, k in enumerate(stateTempl.keys()):
if k in state0:
continue
k_split = k.split('.')
if len(k_split) > 2 and k_split[-2] == '1' and k_split[-1] in bn_vars: #check if this is a bn node
to_bn_name = k[:k.rfind('.')][:-2]+'.0'
if to_bn_name+'.weight' in state0:
if not to_bn_name in to_bn:
to_bn[to_bn_name] = (idx0, '.'.join(k_split[:-1])+'.bias' in stateTempl)
continue
if k.endswith('.0.bias'):
from_bn_name = k[:k.rfind('.')][:-2]+'.1'
if from_bn_name+'.running_mean' in state0:
if not from_bn_name in from_bn:
from_bn[from_bn_name] = (idx0, None)
continue
print("Warning: template's key "+ k+" not found in loaded model (and not bn)")
for idx0, k in enumerate(state0.keys()):
if k in state0:
continue
to_bn_name = k[:k.rfind('.')]+'.0'
if from_bnz in to_bn:
continue
from_bn_name = k[:k.rfind('.')]+'.1'
if from_bn_name in from_bn:
continue
print("Warning: loaded model's key "+ k+" not found template (and not bn)")
return to_bn, from_bn
def transform_from_bn(m0_state, key_from_bn):
k0 = key_from_bn[:-2]+'.0.weight'
k0bias = key_from_bn[:-2]+'.0.bias' #this entry should currently not exist!
if not key_from_bn.endswith('.1') or not k0 in m0_state or \
not key_from_bn+'.running_var' in m0_state or k0bias in m0_state:
print("Warning: Skipping unknown batch entry "+k)
return [],{}
orig_device = m0_state[k0].get_device()
#bn: y = (x-running_mean)*gamma/sqrt(running_var+eps) + beta
w1_var = m0_state[key_from_bn+'.running_var'].cpu().numpy().astype(np.float64)
w1_var = 1.0/np.sqrt(w1_var+eps_bn)
if key_from_bn+'.weight' in m0_state:
w1_var = w1_var * m0_state[key_from_bn+'.weight'].cpu().numpy().astype(np.float64)
w0_bias = -m0_state[key_from_bn+'.running_mean'].cpu().numpy().astype(np.float64) * w1_var
if key_from_bn+'.bias' in m0_state:
w0_bias += m0_state[key_from_bn+'.bias'].cpu().numpy().astype(np.float64)
w0 = m0_state[k0].cpu().numpy().astype(np.float64)
#apply batch norm weight accross output dim of previous node
w0r = w0.reshape((w0.shape[0],-1))
w0new = w0r*w1_var.reshape((w1_var.shape[0],1))
w0new = w0new.reshape(w0.shape)
m0_state[k0] = torch.tensor(np.copy(w0new).astype(np.float32), device = orig_device)
remove_nodes = [key_from_bn+'.weight',key_from_bn+'.running_mean',
key_from_bn+'.running_var',key_from_bn+'.num_batches_tracked', key_from_bn+'.bias']
append_nodes = {}
append_nodes[k0] = (k0bias, torch.tensor(np.copy(w0_bias).astype(np.float32), device = orig_device)) # this bias term is added after the weights term
return remove_nodes, append_nodes
def transform_to_bn(m0_state, key_to_bn, ref_is_affine):
k0w = key_to_bn+'.weight'
k1 = key_to_bn[:-2]+'.1'
k1w = k1 + '.weight'
k1bias = k1 +'.bias'
k1runmean = k1 + '.running_mean'
k1runvar = k1 + '.running_var'
k1numbtracked = k1 + '.num_batches_tracked'
if not key_to_bn.endswith('.0') or not k0w in m0_state or \
k1+'.weight' in m0_state or k1+'.running_var' in m0_state or k1bias in m0_state:
print("Warning: Cannot convert entry " + key_to_bn + " to bn")
return [],{}
append_nodes = {}
orig_device = m0_state[k0w].get_device()
#bn: y = (x-running_mean)*gamma/sqrt(running_var+eps) + beta
inp_dim = m0_state[k0w].shape[0]
np.zeros((inp_dim,), dtype = np.float32)
if ref_is_affine:
append_nodes[k0w] = (k1w, torch.tensor(np.ones((inp_dim,), dtype = np.float32), device = orig_device))
append_nodes[k1w] = (k1bias, torch.tensor(np.zeros((inp_dim,), dtype = np.float32), device = orig_device))
else:
k1bias = k0w # directly start with running_var
if key_to_bn+'.bias' in m0_state:
b0 = m0_state[key_to_bn+'.bias'].cpu().numpy().astype(np.float64)
append_nodes[k1bias] = (k1runmean, torch.tensor((b0*-1.0).astype(np.float32), device = orig_device)) #use original bias running_mean; the other weights are set to identity
else:
append_nodes[k1bias] = (k1runmean, torch.tensor(np.zeros((inp_dim,), dtype = np.float32), device = orig_device)) # this bias term is added after the weights term
append_nodes[k1runmean] = (k1runvar, torch.tensor(np.ones((inp_dim,), dtype = np.float32) - eps_bn, device = orig_device)) # this bias term is added after the weights term
append_nodes[k1runvar] = (k1numbtracked, torch.tensor(np.zeros((inp_dim,), dtype = np.float32), device = orig_device)) # this bias term is added after the weights term
remove_nodes = [key_to_bn+'.bias']
return remove_nodes, append_nodes
def convert(args):
m0 = torch.load(args.model_path)
m0_state = m0["model_state"]
norm_mean = [0.0,0.0,0.0]
versions = [mean_rgb[v.strip()] for v in args.change_version.split(';')]
if len(versions) == 2:
norm_mean = [versions[1][c] - versions[0][c] for c in range(3)]
if args.flip_rgb:
norm_mean = norm_mean[::-1]
normalization_factor = 1.0
if not args.img_norm is None:
if args.img_norm:
normalization_factor = 255.0
else:
normalization_factor = 1.0/255.0
inp_layers = [l.strip() for l in args.inp_layers.split(';')]
if len(inp_layers) == 0 or len(inp_layers[0]) == 0:
inp_layers = [list(m0_state.keys())[0]]
if inp_layers[0] == "module.convbnrelu1_1.cbr_unit.0.weight":
inp_layers.append("module.convbnrelu1_sub1.cbr_unit.0.weight")
trg_path = args.out_path
if len(trg_path) == 0:
trg_path = args.model_path.replace('.pth','').replace('.pkl','')+'_transf.pkl'
num_templ = 0
if len(args.target_template) > 0:
#use template model file to identify differences resp. batch norm nodes
m_trg_templ = torch.load(args.target_template)
m_trg_templ_state = m_trg_templ["model_state"]
to_bn, from_bn = find_diffs_bn(m0_state, m_trg_templ_state)
remove_nodes = []
append_nodes = {}
for k, _ in from_bn.items():
remove_nodes0, append_nodes0 = transform_from_bn(m0_state, k)
remove_nodes += remove_nodes0
append_nodes.update(append_nodes0)
for k, (_, ref_is_affine) in to_bn.items():
remove_nodes0, append_nodes0 = transform_to_bn(m0_state, k, ref_is_affine)
remove_nodes += remove_nodes0
append_nodes.update(append_nodes0)
m1_state = OrderedDict()
for k in m0_state:
if k in remove_nodes:
num_templ += 1
continue
m1_state[k] = m0_state[k]
k_app = k
while k_app in append_nodes:
key_next, node0 = append_nodes.pop(k_app)
k_app = key_next
m1_state[key_next] = node0
num_templ += 1
if len(append_nodes) > 0:
kk = list(append_nodes.keys())
print("Warning: Could not append %i nodes." % len(append_nodes), kk[0])
m0["model_state"] = m1_state
m0_state = m1_state
print("Model transformer applies these changes: normalization_shift, normalization_factor, flip_rgb", norm_mean, normalization_factor, args.flip_rgb)
print("to these input layers: ", inp_layers)
if num_templ > 0:
print("Changend %i nodes due to differences in template " % num_templ)
for l in inp_layers:
if not l in m0_state:
print("Warning: skipping unknown key "+l)
continue
transform_layer(m0_state, l, normalization_factor = normalization_factor, norm_mean = norm_mean, apply_bgr_flip = args.flip_rgb)
torch.save(m0, trg_path)
def main_convert(arg0):
parser = argparse.ArgumentParser(description="Program to remove image pre-processor steps by applying them to model wheights directly.\nWARNING: this currently does not work for batch-norm models!\nParams:")
parser.add_argument(
"--model_path",
nargs="?",
type=str,
default="frrnB_cityscapes_best_model_miou63.pkl",
help="Path to the saved model",
)
parser.add_argument(
"--inp_layers", nargs="?", type=str, default="", help="Names of all input layers, default: use auto-detection"
)
parser.add_argument(
"--change_version",
nargs="?",
type=str,
default="",
help="Change image mean normalization, command: <source_version>;<target_version>, e.g. cityscapes;pascal",
)
parser.add_argument(
"--img_norm",
dest="img_norm",
action="store_true",
help="Change image mean scaling (from [0;255] to [0;1])",
)
parser.add_argument(
"--no-img_norm",
dest="img_norm",
action="store_false",
help="Change image mean scaling (from [0;1] to [0;255])",
)
parser.add_argument(
"--flip_rgb",
dest="flip_rgb",
action="store_true",
help="Flip input channels (rgb<->bgr)",
)
parser.add_argument(
"--out_path", nargs="?", type=str, default="", help="Path for saving transformed model, default: inp + _transf.pkl"
)
parser.add_argument(
"--target_template",
nargs="?",
type=str,
default="",
help="Use target model file to identify conversions between batch normalization nodes",
)
parser.set_defaults(img_norm=None, flip_rgb=False)
args = parser.parse_args(arg0)
return convert(args)
if __name__ == "__main__":
sys.exit(main_convert(sys.argv[1:]))
|
import json
import os
from datetime import datetime
from datetime import timedelta
import airflow
import urllib3
from airflow import DAG
from airflow.models import Variable
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import BranchPythonOperator
from airflow.operators.python import PythonOperator
from airflow.operators.trigger_dagrun import TriggerDagRunOperator
from airflow.utils.task_group import TaskGroup
from airflow.utils.trigger_rule import TriggerRule
from hooks.db.airflow_metastore_helper import AirflowMetaStoreHelper
from hooks.db.redis_helper import RedisHelper
from hooks.hdfs.hdfs_helper import HdfsHelper
from operators.create_partitions import CreatePartitions
from operators.generate_statistics_by_partition import GenerateStatistics
from operators.hdfs_concat_files import HdfsConcatFiles
from operators.hdfs_prepare_concat import HdfsPrepareConcat
from operators.oracle_get_results import OracleGetResults
from transfers.oracle_blob_to_hdfs import OracleBlobToHdfsTransfer
from transfers.oracle_to_redis import OracleToRedisTransfer
from utils.config_helper import get_list_docs
from utils.config_helper import read_data_config
from utils.dag_helper import check_len_list_processed_dates
from utils.dag_helper import check_next_dag
from utils.dag_helper import clear_environment
from utils.dag_helper import crash_dag
from utils.time_handler import end_time
from utils.time_handler import start_time
from validators.compare_data_oracle_impala import CompareDataOracleImpala
from validators.prepare_reprocessing_inconsistency_data import PrepareReprocessingInconsistencyData
urllib3.disable_warnings()
# configs
here = os.path.abspath(os.path.dirname(__file__))
step = os.path.splitext(os.path.basename(__file__))[0]
with open(''.join(here + '/../configs/dag_config.json'), 'r') as f:
dag_config = json.load(f)
path_libs = dag_config['paths']['path_libs']
path_ojdbc = os.path.join(path_libs + '/' + dag_config['libs']['ojdbc'])
path_avro_tools = os.path.join(here + '/../libs/' + dag_config['libs']['avro_tools'])
path_spark_avro = os.path.join(path_libs + '/' + dag_config['libs']['spark_avro'])
path_native_lib = dag_config['paths']['path_native_lib']
def create_dag(
dag_name: str,
agg_by: str,
doc_type: str,
cache_blob: str,
path_avro_schema: str,
path_local_avro_schemas: str,
executor_cores: str,
executor_memory: str,
executor_instances: str,
driver_memory: str,
col_type_pk: str,
extra_cols: str,
max_registry_by_file: str,
oracle_conn_id: str,
table_ctrl: str,
table_ctrl_col_control_var: str,
table_ctrl_col_fk: str,
table_ctrl_col_dt_ref: str,
table_ctrl_col_dt_created: str,
oracle_conn_blob: str,
table_blob: str,
table_blob_col_pk: str,
table_blob_col_blob: str
) -> airflow.models.dag.DAG:
# -----------------
# DAG
# -----------------
args = {
'owner': 'job',
'run_as_user': 'job',
'start_date': datetime(2021, 8, 17),
'do_xcom_push': False,
'depends_on_past': True,
'retries': 10,
'retry_delay': timedelta(seconds=60),
'dag_name': dag_name
}
with DAG(dag_id=f'{step}_{dag_name}',
description=f'Import data from {dag_name}',
schedule_interval='00 19 * * *',
catchup=False,
default_args=args) as dag:
dag.doc_md = __doc__
dag.doc_md = """"""
layer = 'raw'
env = Variable.get('env', default_var='dev')
control_var = f"{int(Variable.get(f'{dag_name}_control_var', default_var='000000000000000')):015d}"
last_control_var = Variable.get(f'{dag_name}_last_control_var',
default_var='000000000000000')
current_dag_name = dag_name + '_' + control_var
total_pg = int(Variable.get(f'{current_dag_name}_total_pg', default_var=1))
list_all_dates = eval(Variable.get(f'{dag_name}_list_all_dates', default_var='[]'))
list_current_dates = eval(
Variable.get(f'{current_dag_name}_current_dates', default_var='[]'))
list_dags = eval(Variable.get(f'{dag_name}_list_dags', default_var='[]'))
total_rows = Variable.get('total_rows', default_var='100000')
items_by_query = 1000
sql_id = f'''
SELECT
{table_ctrl_col_fk} id,
{table_ctrl_col_control_var} control_var,
to_char({table_ctrl_col_dt_ref}, 'DD-MM-YYYY') dt_ref
FROM {table_ctrl}
WHERE
{table_ctrl_col_control_var} > :control_var
AND TO_DATE(to_char({table_ctrl_col_dt_created}, 'DD-MM-YYYY'), 'DD-MM-YYYY')
< TO_DATE(to_char(trunc(sysdate), 'DD-MM-YYYY'), 'DD-MM-YYYY')
ORDER BY {table_ctrl_col_control_var} ASC
FETCH FIRST :total_rows ROWS ONLY'''
dict_bind_sql_get_data = {'control_var': f'{control_var}',
'total_rows': f'{total_rows}'}
sql_count_id = f'''
SELECT COUNT({table_ctrl_col_fk})
FROM {table_ctrl}
WHERE
{table_ctrl_col_control_var} > :control_var
AND TO_DATE(to_char({table_ctrl_col_dt_created}, 'DD-MM-YYYY'), 'DD-MM-YYYY')
< TO_DATE(to_char(trunc(sysdate), 'DD-MM-YYYY'), 'DD-MM-YYYY')'''
dict_bind_sql_count_id = {'control_var': f'{control_var}'}
# -----------------
# TASKS
# -----------------
task_start = PythonOperator(
task_id='start',
python_callable=start_time,
depends_on_past=False,
op_kwargs={'dag_name': dag_name,
'execution_date': '{{ ts }}'}
)
task_oracle_execute_count = OracleGetResults(
task_id='oracle_execute_count',
current_dag_name=current_dag_name,
oracle_conn_id=oracle_conn_id,
sql_count_id=sql_count_id,
dict_bind=dict_bind_sql_count_id
)
task_check_if_contains_data_in_oracle = BranchPythonOperator(
task_id='check_if_contains_data_in_oracle',
python_callable=AirflowMetaStoreHelper().check_if_contains_data_in_oracle,
op_kwargs={'control_var': control_var,
'last_control_var': last_control_var,
'current_dag_name': current_dag_name,
'redis_conn_id': cache_blob,
'redis_key': f'{dag_name}_original',
'true_case': 'get_id',
'false_case': 'check_len_list_processed_dates'}
)
task_get_id = OracleToRedisTransfer(
task_id='get_id',
oracle_conn_id=oracle_conn_id,
redis_conn_id=cache_blob,
sql=sql_id,
dict_bind=dict_bind_sql_get_data,
name_redis_key=f'{dag_name}_original'
)
task_fill_data_gap = PythonOperator(
task_id='fill_data_gap',
python_callable=RedisHelper(cache_blob).fill_data_gaps,
op_kwargs={'current_dag_name': current_dag_name,
'redis_conn_id': cache_blob,
'redis_key': f'{dag_name}_original'}
)
task_get_dag_name = PythonOperator(
task_id='get_dag_name',
python_callable=AirflowMetaStoreHelper().get_dag_name,
op_kwargs={'current_dag_name': current_dag_name,
'name_list_dags': f'{dag_name}_list_dags',
'list_dags': list_dags}
)
task_get_date = PythonOperator(
task_id='get_date',
python_callable=RedisHelper(cache_blob).get_date,
op_kwargs={'dag_name': dag_name,
'current_dag_name': current_dag_name,
'list_columns': "['id', 'control_var', 'date']",
'redis_key': current_dag_name}
)
task_split_id_by_date = PythonOperator(
task_id='split_id_by_date',
python_callable=RedisHelper(cache_blob).split_id_by_date,
op_kwargs={'current_dag_name': current_dag_name,
'list_current_dates': list_current_dates,
'redis_key': current_dag_name}
)
task_generate_pagination = PythonOperator(
task_id='generate_pagination',
python_callable=RedisHelper(cache_blob).generate_pagination,
op_kwargs={'current_dag_name': current_dag_name,
'items_by_query': items_by_query,
'list_current_dates': list_current_dates,
'redis_key': current_dag_name}
)
task_generate_sql_by_date = PythonOperator(
task_id='generate_sql_by_date',
python_callable=RedisHelper(cache_blob).generate_sql_by_date,
op_kwargs={'current_dag_name': current_dag_name,
'list_current_dates': list_current_dates,
'oracle_conn': oracle_conn_blob,
'table_ctrl': table_ctrl,
'table_ctrl_col_fk': table_ctrl_col_fk,
'table_blob': table_blob,
'table_blob_col_pk': table_blob_col_pk,
'table_blob_col_blob': table_blob_col_blob,
'items_by_query': items_by_query,
'total_pg': total_pg,
'extra_cols': extra_cols,
'redis_key': current_dag_name}
)
task_extract_decompress_load = OracleBlobToHdfsTransfer(
task_id=f'extract_decompress_load',
retries=20,
dag_name=dag_name,
current_dag_name=current_dag_name,
oracle_conn_id=oracle_conn_id,
query_id=sql_id,
table_ctrl_col_fk=table_ctrl_col_fk,
extra_cols=extra_cols,
oracle_conn_blob=oracle_conn_blob,
table_blob_col_pk=table_blob_col_pk,
table_blob_col_blob=table_blob_col_blob,
path_avro_schema=path_avro_schema,
path_local_avro_schemas=f'{path_local_avro_schemas}/{layer}/{dag_name}.avsc',
total_pg=total_pg,
layer=layer,
env=env,
step=step,
executor_cores=executor_cores,
executor_memory=executor_memory,
executor_instances=executor_instances,
driver_memory=driver_memory,
path_ojdbc=path_ojdbc,
path_spark_avro=path_spark_avro,
path_native_lib=path_native_lib,
col_type_pk=col_type_pk,
compress_type='snappy',
hdfs_conn_id='webhdfs',
oracle_driver='oracle.jdbc.driver.OracleDriver',
list_current_dates=list_current_dates
)
task_update_control_var = PythonOperator(
task_id='update_control_var',
python_callable=AirflowMetaStoreHelper().update_control_var,
trigger_rule=TriggerRule.ALL_SUCCESS,
depends_on_past=True,
op_kwargs={'control_var': control_var,
'dag_name': dag_name,
'current_dag_name': current_dag_name,
'redis_conn_id': cache_blob,
'last_control_var': last_control_var,
'list_dags': list_dags,
'total_pg': total_pg,
'list_current_dates': list_current_dates,
'list_all_dates': list_all_dates,
'redis_key': current_dag_name}
)
task_clear_environment = PythonOperator(
task_id='clear_environment',
python_callable=clear_environment,
trigger_rule=TriggerRule.ALL_SUCCESS,
op_kwargs={'control_var': control_var,
'dag_name': dag_name,
'redis_conn_id': cache_blob,
'airflow_conn_id': 'airflow_db',
'last_control_var': last_control_var,
'list_dags': list_dags,
'redis_key': current_dag_name}
)
task_check_len_list_processed_dates = BranchPythonOperator(
task_id='check_len_list_processed_dates',
trigger_rule=TriggerRule.ALL_SUCCESS,
python_callable=check_len_list_processed_dates,
op_kwargs={'dag_name': dag_name,
'list_all_dates': list_all_dates,
'true_case': 'prepare_execution',
'false_case': 'waiting_execution'}
)
task_prepare_execution = DummyOperator(
task_id='prepare_execution'
)
with TaskGroup(group_id='group_hdfs_concat_file') as group_hdfs_concat_file:
task_hdfs_prepare_concat = PythonOperator(
task_id='hdfs_prepare_concat',
trigger_rule=TriggerRule.ALL_SUCCESS,
python_callable=HdfsPrepareConcat('webhdfs').execute,
op_kwargs={'dag_name': dag_name,
'current_dag_name': current_dag_name,
'hdfs_path': f'/data/{env}/{layer}/{dag_name}',
'agg_by': agg_by,
'layer': layer,
'env': env,
'list_all_dates': list_all_dates,
'path_avro_tools': path_avro_tools}
)
# TODO: refactor -> create a task
list_all_dates = AirflowMetaStoreHelper().set_granularity(list_all_dates=list_all_dates,
agg_by=agg_by)
for date in list_all_dates:
task_concat_file = HdfsConcatFiles(
task_id=f'hdfs_concat_file-{date}',
retries=100,
dag_name=dag_name,
date=date,
layer=layer,
env=env,
col_name_control_var=table_ctrl_col_control_var,
path_avro_schema=path_avro_schema,
hdfs_conn_id='webhdfs',
executor_cores=executor_cores,
executor_memory=executor_memory,
driver_memory=driver_memory,
path_ojdbc=path_ojdbc,
path_spark_avro=path_spark_avro,
path_native_lib=path_native_lib,
format_data='avro',
compress_type='snappy',
max_registry_by_avro=max_registry_by_file
)
task_hdfs_prepare_concat >> task_concat_file
task_create_partitions = PythonOperator(
task_id='create_partitions',
trigger_rule=TriggerRule.ALL_SUCCESS,
python_callable=CreatePartitions().execute,
op_kwargs={'dag_name': dag_name,
'current_dag_name': current_dag_name,
'list_all_dates': list_all_dates,
'hive_conn_id': 'hive',
'impala_conn_id': 'impala',
'agg_by': agg_by,
'layer': layer,
'env': env}
)
task_save_execution_state_hdfs = PythonOperator(
task_id='save_execution_state_hdfs',
python_callable=HdfsHelper('webhdfs').save_execution_state_hdfs,
op_kwargs={'dag_name': dag_name,
'layer': layer,
'control_var': control_var}
)
with TaskGroup(group_id='group_generate_statistics') as group_generate_statistics:
# TODO: refactor -> create a task
list_all_dates = AirflowMetaStoreHelper().set_granularity(list_all_dates=list_all_dates,
agg_by=agg_by)
for date in list_all_dates:
PythonOperator(
task_id=f'generate_statistics-{date}',
retries=50,
python_callable=GenerateStatistics().execute,
op_kwargs={'dag_name': dag_name,
'date': date,
'layer': layer,
'impala_conn_id': 'impala',
'hive_conn_id': 'hive'}
)
with TaskGroup(group_id='group_check_data_quality') as group_check_data_quality:
# TODO: refactor -> create a task
list_all_dates = AirflowMetaStoreHelper().set_granularity(list_all_dates=list_all_dates,
agg_by=agg_by)
for date in list_all_dates:
CompareDataOracleImpala(
task_id=f'compare_oracle_impala_{date}',
retries=100,
dag_name=dag_name,
last_control_var=last_control_var,
layer=layer,
date=date,
table_ctrl=table_ctrl,
dt_ref=table_ctrl_col_dt_ref,
agg_by=agg_by,
oracle_conn_id=oracle_conn_id,
hive_conn='impala',
table_ctrl_col_fk=table_ctrl_col_fk,
table_ctrl_col_dt_created=table_ctrl_col_dt_created
)
task_check_if_contains_inconsistency = BranchPythonOperator(
task_id=f'check_if_contains_inconsistency',
trigger_rule=TriggerRule.ALL_SUCCESS,
wait_for_downstream=True,
python_callable=AirflowMetaStoreHelper('airflow_db').check_if_contains_inconsistency,
op_kwargs={'dag_name': dag_name,
'last_control_var': last_control_var,
'layer': layer,
'true_case': 'prepare_reprocessing_inconsistency_data',
'false_case': f'check_next_dag',
'redis_conn_id': cache_blob,
'redis_key': f'{dag_name}_inconsistency_date'}
)
task_prepare_reprocessing_inconsistency_data = PrepareReprocessingInconsistencyData(
task_id=f'prepare_reprocessing_inconsistency_data',
trigger_rule=TriggerRule.ALL_SUCCESS,
dag_name=dag_name,
current_dag_name=current_dag_name,
layer=layer,
last_control_var=last_control_var,
list_all_dates=list_all_dates,
table_ctrl=table_ctrl,
table_ctrl_col_fk=table_ctrl_col_fk,
table_ctrl_col_control_var=table_ctrl_col_control_var,
table_ctrl_col_dt_ref=table_ctrl_col_dt_ref,
table_ctrl_col_dt_created=table_ctrl_col_dt_created,
hive_conn_id='impala',
hdfs_conn_id='webhdfs',
airflow_conn_id='airflow_db',
oracle_conn_id=oracle_conn_id
)
task_crash_dag = PythonOperator(
task_id=f'crash_dag',
trigger_rule=TriggerRule.ALL_SUCCESS,
python_callable=crash_dag,
)
task_check_next_dag = BranchPythonOperator(
task_id='check_next_dag',
trigger_rule=TriggerRule.ALL_SUCCESS,
python_callable=check_next_dag,
op_kwargs={'dag_name': dag_name,
'doc_type': doc_type,
'true_case': f'trigger_pre_process_{dag_name}',
'false_case': f'trigger_parser_{dag_name}'}
)
task_trigger_pre_process = TriggerDagRunOperator(
task_id=f'trigger_pre_process_{dag_name}',
trigger_dag_id=f"pre_process_{dag_name}"
)
task_trigger_parser = TriggerDagRunOperator(
task_id=f'trigger_parser_{dag_name}',
trigger_dag_id=f"parser_{dag_name}"
)
task_trigger_import_file = TriggerDagRunOperator(
task_id=f'trigger_import_file_{dag_name}',
trigger_dag_id=dag.dag_id
)
task_waiting_execution = DummyOperator(
trigger_rule=TriggerRule.ALL_DONE,
task_id='waiting_execution'
)
task_end = PythonOperator(
task_id='end',
python_callable=end_time,
op_kwargs={'current_dag_name': current_dag_name,
'dag_name': dag_name,
'last_control_var_name': f'{dag_name}_last_control_var',
'list_dates': f'{current_dag_name}_list_dates',
'postgres_conn_id': 'airflow_db'}
)
# -----------------
# GRAPH
# -----------------
# task_check_if_contains_data_in_oracle: true
task_start >> task_oracle_execute_count >> task_check_if_contains_data_in_oracle >> task_get_id >> task_fill_data_gap >> [
task_get_date,
task_get_dag_name] >> task_split_id_by_date >> task_generate_pagination >> task_generate_sql_by_date >> task_extract_decompress_load >> task_update_control_var >> [
task_clear_environment, task_trigger_import_file] >> task_waiting_execution >> task_end
# task_check_if_contains_data_in_oracle: false
# task_check_len_list_processed_dates: true
task_start >> task_oracle_execute_count >> task_check_if_contains_data_in_oracle >> task_check_len_list_processed_dates >> task_prepare_execution >> [
group_hdfs_concat_file, task_save_execution_state_hdfs] >> task_create_partitions >> [
group_check_data_quality, group_generate_statistics] >> task_check_if_contains_inconsistency
task_start >> task_oracle_execute_count >> task_check_if_contains_data_in_oracle >> task_check_len_list_processed_dates >> task_prepare_execution >> [
group_hdfs_concat_file,
task_save_execution_state_hdfs] >> task_create_partitions >> task_check_if_contains_inconsistency >> task_prepare_reprocessing_inconsistency_data >> task_crash_dag
# task_check_next_dag: true
task_check_if_contains_inconsistency >> task_check_next_dag >> task_trigger_pre_process >> task_waiting_execution >> task_end
# task_check_next_dag: false
task_check_if_contains_inconsistency >> task_check_next_dag >> task_trigger_parser >> task_waiting_execution >> task_end
# task_check_if_contains_data_in_oracle: false
# task_check_len_list_processed_dates: false
task_start >> task_oracle_execute_count >> task_check_if_contains_data_in_oracle >> task_check_len_list_processed_dates >> task_waiting_execution >> task_end
return dag
# -----------------
# DAG GENERATOR
# -----------------
for doc in get_list_docs(path=''.join(here + '/../configs/data/')):
config = read_data_config(data_name=doc)
if config[doc]['doc_type'] != 'table':
globals()[doc] = create_dag(
dag_name=config[doc]['name'],
agg_by=config[doc]['agg_by'],
doc_type=config[doc]['doc_type'],
path_avro_schema=dag_config['paths']['path_hdfs']['path_avro_schemas'],
path_local_avro_schemas=dag_config['paths']['path_local_avro_schemas'],
executor_cores=config[doc]['executor_cores'],
executor_memory=config[doc]['executor_memory'],
executor_instances=config[doc]['executor_instances'],
driver_memory=config[doc]['driver_memory'],
cache_blob=config[doc]['cache_blob'],
max_registry_by_file=config[doc]['max_registry_by_avro'],
col_type_pk=config[doc]['hdfs_data_schema']['raw']['cols'][0]['type'],
extra_cols=config[doc]['source_ctrl']['cols']['extra_cols'],
oracle_conn_id=config[doc]['source_ctrl']['oracle_conn'],
table_ctrl=config[doc]['source_ctrl']['table_name'],
table_ctrl_col_fk=config[doc]['source_ctrl']['cols']['fk'],
table_ctrl_col_control_var=config[doc]['source_ctrl']['cols']['control_var'],
table_ctrl_col_dt_ref=config[doc]['source_ctrl']['cols']['dt_ref'],
table_ctrl_col_dt_created=config[doc]['source_ctrl']['cols']['dt_created'],
oracle_conn_blob=config[doc]['source_blob']['oracle_conn'],
table_blob=config[doc]['source_blob']['table_name'],
table_blob_col_pk=config[doc]['source_blob']['cols']['pk'],
table_blob_col_blob=config[doc]['source_blob']['cols']['blob']
)
|
n = int(input())
fib = lambda n: 0 if n == 0 else 1 if n == 1 else fib(n-1) + fib(n-2)
print(fib(n)) |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from msccl.language import *
def allreduce_allpairs(size):
# Each rank sends the nth chunk to the nth rank into scratch space
for r1 in range(size):
for r2 in range(size):
if r1 != r2:
index = r2 * size
c = chunk(r1, Buffer.input, index, size=size)
c.copy(r2, 'scratch', sendtb=r2, recvtb=r1)
# Each rank performs a local reduction on the nth chunk
# Utilize 8 threadblocks for this reduction for better parallelism
for r in range(size):
for index in range(0, size * (size-1)):
c = chunk(r, Buffer.input, r*size + (index % size))
c.reduce(chunk(r, 'scratch', index), sendtb=(index % size))
# Each rank sends the fully reduced nth chunk to all other gpus
for r1 in range(size):
for r2 in range(size):
if r1 != r2:
index = r1 * size
c = chunk(r1, Buffer.input, index, size)
c.copy(r2, Buffer.input, index, sendtb=r2, recvtb=r1)
|
import pytest
from django.urls import reverse
from core.logic.serialization import b64json
from organizations.models import UserOrganization
from test_fixtures.scenarios.basic import users # noqa
@pytest.mark.django_db
class TestSlicerAPI:
def test_primary_dimension_required(self, flexible_slicer_test_data, admin_client):
url = reverse('flexible-slicer')
resp = admin_client.get(url)
assert resp.status_code == 400
assert 'error' in resp.json()
assert resp.json()['error']['code'] == 'E104'
def test_group_by_required(self, flexible_slicer_test_data, admin_client):
url = reverse('flexible-slicer')
resp = admin_client.get(url, {'primary_dimension': 'platform'})
assert resp.status_code == 400
assert 'error' in resp.json()
assert resp.json()['error']['code'] == 'E106'
def test_user_organization_filtering_no_access(self, flexible_slicer_test_data, users, client):
"""
Test that organizations in reporting API are properly filtered to only contain those
accessible by current user.
"""
user = users['user1']
assert not user.is_superuser, 'user must be unprivileged'
assert not user.is_from_master_organization, 'user must be unprivileged'
client.force_login(user)
resp = client.get(
reverse('flexible-slicer'),
{'primary_dimension': 'organization', 'groups': b64json(['metric'])},
)
assert resp.status_code == 200
data = resp.json()
assert data['count'] == 0
assert len(data['results']) == 0
def test_user_organization_filtering(self, flexible_slicer_test_data, client, users):
"""
Test that organizations in reporting API are properly filtered to only contain those
accessible by current user.
"""
organization = flexible_slicer_test_data['organizations'][1]
user = users['user1']
UserOrganization.objects.create(user=user, organization=organization)
assert not user.is_superuser, 'user must be unprivileged'
assert not user.is_from_master_organization, 'user must be unprivileged'
client.force_login(user)
resp = client.get(
reverse('flexible-slicer'),
{'primary_dimension': 'organization', 'groups': b64json(['metric'])},
)
assert resp.status_code == 200
data = resp.json()
assert data['count'] == 1
assert len(data['results']) == 1
assert data['results'][0]['pk'] == organization.pk
def test_parts_api(self, flexible_slicer_test_data, admin_client):
"""
Tests that the /parts/ endpoint for getting possible parts after splitting works
"""
resp = admin_client.get(
reverse('flexible-slicer-split-parts'),
{
'primary_dimension': 'organization',
'groups': b64json(['metric']),
'split_by': b64json(['platform']),
},
)
assert resp.status_code == 200
data = resp.json()
assert data['count'] == 3
assert len(data['values']) == 3
def test_parts_api_with_filter(self, flexible_slicer_test_data, admin_client):
"""
Tests that the /parts/ endpoint for getting possible parts after splitting works
"""
pls = flexible_slicer_test_data['platforms']
resp = admin_client.get(
reverse('flexible-slicer-split-parts'),
{
'primary_dimension': 'organization',
'groups': b64json(['metric']),
'split_by': b64json(['platform']),
'filters': b64json({'platform': [p.pk for p in pls[:2]]}),
},
)
assert resp.status_code == 200
data = resp.json()
assert data['count'] == 2
assert len(data['values']) == 2
def test_get_data_with_parts_no_part(self, flexible_slicer_test_data, admin_client):
"""
Tests that when getting data and `split_by` is active, we need to provide the `part` arg
"""
pls = flexible_slicer_test_data['platforms']
resp = admin_client.get(
reverse('flexible-slicer'),
{
'primary_dimension': 'organization',
'groups': b64json(['metric']),
'split_by': b64json(['platform']),
'filters': b64json({'platform': [p.pk for p in pls[:2]]}),
},
)
assert resp.status_code == 400
def test_get_data_with_parts_part_given(self, flexible_slicer_test_data, admin_client):
"""
Tests that when getting data and `split_by` is active, we need to provide the `part` arg
"""
pls = flexible_slicer_test_data['platforms']
resp = admin_client.get(
reverse('flexible-slicer'),
{
'primary_dimension': 'organization',
'groups': b64json(['metric']),
'split_by': b64json(['platform']),
'filters': b64json({'platform': [p.pk for p in pls[:2]]}),
'part': b64json([pls[0].pk]),
},
)
assert resp.status_code == 200
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
The Model Zoo.
This file contains a list of all the models in the model zoo, the path to
load them, agents & tasks associated (e.g. they were trained using) and a
description. Using the path you should be able to download and use the model
automatically, e.g.:
... code-block:
python examples/interactive.py --model-file
"zoo:wikipedia_20161221/tfidf_retriever/drqa_docs"
There are a number of guidelines you should follow in the zoo:
- You should choose the best directory name as possible. An input of
``zoo:PROJECTNAME/MODELNAME/FILENAME`` will attempt to use a build script from
parlai/zoo/PROJECTNAME/MODELNAME.py.
- You should include all of the following fields:
* title: the name of the entry:
* id: corresponds to PROJECTNAME
* description: describe the entry in reasonable detail. It should be at least
a couple sentences.
* example: an example command to chat with or evaluate the model
* result: the expected output from running the model. You are strongly encouraged
to make a nightly test which verifies this result.
* external_website: if applicable, an external website related to the zoo to
link to.
* project: if applicable, a link to the project folder. You must have either
external_website or project.
* example2 and result2 (optional): additional examples to run.
- As much as possible, you should try to include two examples: one to generate
some key metrics (e.g. from a paper) and one to actually chat with the model
using interactive.py. Both should strongly attempt to minimize mandatory
command line flags.
"""
model_list = [
{
"title": "DrQA SQuAD model",
"id": "drqa",
"path": "zoo:drqa/squad/model",
"agent": "drqa",
"task": "squad",
"description": "DrQA Reader trained on SQuAD",
"external_website": "https://github.com/facebookresearch/DrQA",
"example": (
"python -m parlai.scripts.eval_model -mf zoo:drqa/squad/model -t squad "
"-dt test"
),
"result": (
# TODO: this differs slightly from the actual results as of 2019-07-23
"{'exs': 10570, 'accuracy': 0.6886, 'f1': 0.7821, 'hits@1': 0.689, 'hits@5': 0.689, 'hits@10': 0.689, 'hits@100': 0.689, 'bleu': 0.1364, 'train_loss': 0}" # noqa: E501
),
},
{
"title": "Wikipedia Retriever (used for open SQuAD)",
"id": "wikipedia_20161221",
"path": "zoo:wikipedia_20161221/tfidf_retriever/drqa_docs",
"agent": "tfidf_retriever",
"external_website": "https://github.com/facebookresearch/DrQA",
"task": "wikipedia:full",
"example": (
"python -m parlai.scripts.interactive --model tfidf_retriever "
"-mf zoo:wikipedia_20161221/tfidf_retriever/drqa_docs"
),
"result": (
"""
Enter Your Message: Yann LeCun
[candidate_scores]: [507.05804682 390.18244433 279.24033928 269.60377042 214.00140589]
[SparseTfidfRetrieverAgent]:
Deep learning (also known as deep structured learning, hierarchical learning or deep machine learning) is a branch of machine learning based on a set of algorithms that attempt to model high level abstractions in data. In a simple case, you could have two sets of neurons: ones that receive an input signal and ones that send an output signal. When the input layer receives an input it passes on a modified version of the input to the next layer. In a deep network, there are many layers between the input and output (and the layers are not made of neurons but it can help to think of it that way), allowing the algorithm to use multiple processing layers, composed of multiple linear and non-linear transformations.
Deep learning is part of a broader family of machine learning methods based on ...
to commonsense reasoning which operates on concepts in terms of production rules of the grammar, and is a basic goal of both human language acquisition and AI. (See also Grammar induction.)
""" # noqa: E501
),
"description": (
"Retrieval over Wikipedia dump, used for DrQA on the open squad "
"dataset. This is the dump from the original paper, used for "
"replicating results."
),
},
{
"title": "Wikipedia Retriever (used for Wizard of Wikipedia)",
"id": "wikipedia_full",
"path": "zoo:wikipedia_full/tfidf_retriever/model",
"agent": "tfidf_retriever",
"task": "wikipedia:full",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/wizard_of_wikipedia",
"description": (
"Retrieval over Wikipedia dump, used for DrQA on the open squad " "dataset."
),
"example": (
"python -m parlai.scripts.interactive --model tfidf_retriever -mf "
"zoo:wikipedia_full/tfidf_retriever/model"
),
"result": (
"""
Enter Your Message: Yann LeCun
[candidate_scores]: [454.74038503 353.88863708 307.31353203 280.4501096 269.89960432]
[SparseTfidfRetrieverAgent]:
Yann LeCun (; born 1960) is a computer scientist with contributions in machine learning, computer vision, mobile robotics and computational neuroscience. He is well known for his work on optical character recognition and computer vision using convolutional neural networks (CNN), and is a founding father of convolutional nets. He is also one of the main creators of the DjVu image compression technology (together with Léon Bottou and Patrick Haffner). He co-developed the Lush programming language with Léon Bottou.
Yann LeCun was born near Paris, France, in 1960. He received a Diplôme d'Ingénieur from the Ecole Superieure d'Ingénieur en Electrotechnique et Electronique (ESIEE), Paris in 1983, and a PhD in Computer Science from Université Pierre et Marie Curie in 1987 during which he ...
of Science and Technology in Saudi Arabia because he was considered a terrorist in the country in view of his atheism.
In 2018 Yann LeCun picked a fight with a robot to support Facebook AI goals.
""" # noqa: E501
),
},
{
"title": "Wizard of Wikipedia (End to end Generator)",
"id": "wizard_of_wikipedia",
"path": "zoo:wizard_of_wikipedia/end2end_generator/model",
"description": ("End2End Generative model for Wizard of Wikipedia"),
"task": "wizard_of_wikipedia:generator",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/wizard_of_wikipedia",
"example": (
"python examples/display_model.py -t wizard_of_wikipedia:generator "
"-mf zoo:wizard_of_wikipedia/end2end_generator/model -n 1 "
"--display-ignore-fields knowledge_parsed"
),
"result": (
"""
[chosen_topic]: Gardening
[knowledge]: no_passages_used __knowledge__ no_passages_used
Gardening __knowledge__ Gardening is the practice of growing and cultivating plants as part of horticulture.
Gardening __knowledge__ In gardens, ornamental plants are often grown for their flowers, foliage, or overall appearance; useful plants, such as root vegetables, leaf vegetables, fruits, and herbs, are grown for consumption, for use as dyes, or for medicinal or cosmetic use.
Gardening __knowledge__ Gardening is considered by many people to be a relaxing activity.
Gardening __knowledge__ Gardening ranges in scale from fruit orchards, to long boulevard plantings with one or more different types of shrubs, trees, and herbaceous plants, to residential yards including lawns and foundation plantings, to plants in large or small containers ...
there had been several other notable gardening magazines in circulation, including the "Gardeners' Chronicle" and "Gardens Illustrated", but these were tailored more for the professional gardener.
[title]: Gardening
[checked_sentence]: Gardening is considered by many people to be a relaxing activity.
[eval_labels_choice]: I live on a farm, we garden all year long, it is very relaxing.
[checked_sentence_parsed]: Gardening __knowledge__ Gardening is considered by many people to be a relaxing activity.
[WizTeacher]: Gardening
I like Gardening, even when I've only been doing it for a short time.
[eval_labels: I live on a farm, we garden all year long, it is very relaxing.]
[TorchAgent]: i love gardening , it is considered a relaxing activity .
""" # noqa: E501
),
},
{
"title": "Wizard of Wikipedia (Full Dialogue Retrieval Model)",
"id": "wizard_of_wikipedia",
"path": "zoo:wizard_of_wikipedia/full_dialogue_retrieval_model/model",
"description": ("Full Dialogue Retrieval Model for Wizard of Wikipedia"),
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/wizard_of_wikipedia",
"task": "wizard_of_wikipedia",
"example2": "examples/interactive.py -m projects:wizard_of_wikipedia:interactive_retrieval -t wizard_of_wikipedia",
"result2": (
"[ Your chosen topic is: Teapot ]\n"
"Enter Your Message: do you like tea?\n"
"[WizardRetrievalInteractiveAgent]: Yes! I only use teapots that have a little air hole in the lid. That prevents the spout from dripping or splashing when the tea is poured. Most teapots have this though.\n"
"Enter Your Message: what about kettles?\n"
"[WizardRetrievalInteractiveAgent]: I would think you could use them to heat any type of liquid! I use my teapots with a tea cosy. It's a thermal cover that helps keep the tea hot.\n"
"Enter Your Message: do you like earl grey?\n"
"[WizardRetrievalInteractiveAgent]: I think I'll try some Lipton, I love their green tea!"
),
"example": (
"python examples/display_model.py -t wizard_of_wikipedia "
"-mf zoo:wizard_of_wikipedia/full_dialogue_retrieval_model/model "
"-m projects:wizard_of_wikipedia:wizard_transformer_ranker "
"--n-heads 6 --ffn-size 1200 --embeddings-scale False "
"--delimiter ' __SOC__ ' --n-positions 1000 --legacy True "
),
"result": (
"""
[chosen_topic]: Gardening
[knowledge]: Gardening Gardening is the practice of growing and cultivating plants as part of horticulture.
Gardening In gardens, ornamental plants are often grown for their flowers, foliage, or overall appearance; useful plants, such as root vegetables, leaf vegetables, fruits, and herbs, are grown for consumption, for use as dyes, or for medicinal or cosmetic use.
Gardening Gardening is considered by many people to be a relaxing activity.
Gardening Gardening ranges in scale from fruit orchards, to long boulevard plantings with one or more different types of shrubs, trees, and herbaceous plants, to residential yards including lawns and foundation plantings, to plants in large or small containers grown inside or outside.
Gardening Gardening may be very specialized, with only one type of plant grown, ...
there had been several other notable gardening magazines in circulation, including the "Gardeners' Chronicle" and "Gardens Illustrated", but these were tailored more for the professional gardener.
[title]: Gardening
[checked_sentence]: Gardening is considered by many people to be a relaxing activity.
[eval_labels_choice]: I live on a farm, we garden all year long, it is very relaxing.
[wizard_of_wikipedia]: Gardening
I like Gardening, even when I've only been doing it for a short time.
[label_candidates: OK what's the history?|Right, thats cool. I had no idea they still did the DVD thing, What is Netflix's highest rated show? do you know? |I will definitely check his first album out as he sounds interesting.|I don't know a whole lot about it. I was raised Catholic but don't practice anything now.|Well , this was a good conversation. |...and 95 more]
[eval_labels: I live on a farm, we garden all year long, it is very relaxing.]
[TorchAgent]: I live on a farm, we garden all year long, it is very relaxing.
""" # noqa: E501
),
},
{
"title": "LIGHT BERT-Biranker Dialogue model",
"id": "light",
"path": "zoo:light/biranker_dialogue/model",
"agent": "bert_ranker/bi_encoder_ranker",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/light",
"task": "light_dialog",
"description": ("LIGHT Dialogue task, replicating the numbers from the paper."),
"example": (
"python examples/eval_model.py -t light_dialog "
"-mf zoo:light/biranker_dialogue/model"
),
"result": "{'exs': 6623, 'accuracy': 0.7586, 'f1': 0.7802, 'hits@1': 0.759, 'hits@5': 0.965," # noqa: E501
"'hits@10': 0.994, 'hits@100': 1.0, 'bleu': 0.7255, 'lr': 5e-05, 'total_train_updates': 15050," # noqa: E501
"'examples': 6623, 'loss': 5307.0, 'mean_loss': 0.8013, 'mean_rank': 1.599, 'train_accuracy': 0}", # noqa: E501
},
{
"title": "Controllable Dialogue ConvAI2 model",
"id": "controllable_dialogue",
"path": "zoo:controllable_dialogue/convai2_finetuned_baseline",
"agent": "projects.controllable_dialogue.controllable_seq2seq.controllable_seq2seq:ControllableSeq2seqAgent", # noqa: E501
"task": "convai2",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/controllable_dialogue",
"example": (
"python -m parlai.scripts.eval_model --model "
"projects.controllable_dialogue.controllable_seq2seq.controllable_seq2seq:"
"ControllableSeq2seqAgent --task "
"projects.controllable_dialogue.tasks.agents "
"-mf zoo:controllable_dialogue/convai2_finetuned_baseline"
),
"result": (
"{'exs': 7801, 'accuracy': 0.0006409, 'f1': 0.1702, 'bleu': 0.005205, "
"'token_acc': 0.3949, 'loss': 3.129, 'ppl': 22.86}"
),
"description": ("Seq2Seq model with control trained on ConvAI2"),
},
{
"title": "TransResNet (ResNet 152) Personality-Captions model",
"id": "personality_captions",
"path": "zoo:personality_captions/transresnet",
"agent": "projects.personality_captions.transresnet.transresnet:TransresnetAgent", # noqa: E501
"task": "personality_captions",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/personality_captions",
"description": (
"Transresnet Model pretrained on the Personality-Captions task"
),
"example": (
"python examples/eval_model.py -t personality_captions "
"-mf zoo:personality_captions/transresnet/model --num-test-labels 5 -dt test"
),
"result": (
"{'exs': 10000, 'accuracy': 0.5113, 'f1': 0.5951, 'hits@1': 0.511, "
"'hits@5': 0.816, 'hits@10': 0.903, 'hits@100': 0.998, 'bleu': 0.4999, "
"'hits@1/100': 1.0, 'loss': -0.002, 'med_rank': 1.0}"
),
},
{
"title": "Poly-Encoder Transformer Reddit Pretrained Model",
"id": "pretrained_transformers",
"path": "zoo:pretrained_transformers/poly_model_huge_reddit",
"agent": "transformer/polyencoder",
"task": "pretrained_transformers",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/polyencoder/",
"description": (
"Poly-Encoder pretrained on Reddit. Use this model as an ``--init-model`` for a poly-encoder "
"when fine-tuning on another task. For more details on how to train, see the project page."
),
"example": (
"python -u examples/train_model.py "
"--init-model zoo:pretrained_transformers/poly_model_huge_reddit/model "
"-t convai2 "
"--model transformer/polyencoder --batchsize 256 --eval-batchsize 10 "
"--warmup_updates 100 --lr-scheduler-patience 0 --lr-scheduler-decay 0.4 "
"-lr 5e-05 --data-parallel True --history-size 20 --label-truncate 72 "
"--text-truncate 360 --num-epochs 8.0 --max_train_time 200000 -veps 0.5 "
"-vme 8000 --validation-metric accuracy --validation-metric-mode max "
"--save-after-valid True --log_every_n_secs 20 --candidates batch --fp16 True "
"--dict-tokenizer bpe --dict-lower True --optimizer adamax --output-scaling 0.06 "
"--variant xlm --reduction-type mean --share-encoders False "
"--learn-positional-embeddings True --n-layers 12 --n-heads 12 --ffn-size 3072 "
"--attention-dropout 0.1 --relu-dropout 0.0 --dropout 0.1 --n-positions 1024 "
"--embedding-size 768 --activation gelu --embeddings-scale False --n-segments 2 "
"--learn-embeddings True --polyencoder-type codes --poly-n-codes 64 "
"--poly-attention-type basic --dict-endtoken __start__ "
"--model-file <YOUR MODEL FILE>"
),
"result": (
"(subject to some variance, you may see the following as a result of validation of the model)\n"
"{'exs': 7801, 'accuracy': 0.8942 ...}"
),
},
{
"title": "Poly-Encoder Transformer Wikipedia/Toronto Books Pretrained Model",
"id": "pretrained_transformers",
"path": "zoo:pretrained_transformers/poly_model_huge_wikito",
"agent": "transformer/polyencoder",
"task": "pretrained_transformers",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/polyencoder/",
"description": (
"Poly-Encoder pretrained on Wikipedia/Toronto Books. Use this model as an ``--init-model`` for a poly-encoder "
"when fine-tuning on another task. For more details on how to train, see the project page."
),
"example": (
"python -u examples/train_model.py "
"--init-model zoo:pretrained_transformers/poly_model_huge_wikito/model "
"-t convai2 "
"--model transformer/polyencoder --batchsize 256 --eval-batchsize 10 "
"--warmup_updates 100 --lr-scheduler-patience 0 --lr-scheduler-decay 0.4 "
"-lr 5e-05 --data-parallel True --history-size 20 --label-truncate 72 "
"--text-truncate 360 --num-epochs 8.0 --max_train_time 200000 -veps 0.5 "
"-vme 8000 --validation-metric accuracy --validation-metric-mode max "
"--save-after-valid True --log_every_n_secs 20 --candidates batch --fp16 True "
"--dict-tokenizer bpe --dict-lower True --optimizer adamax --output-scaling 0.06 "
"--variant xlm --reduction-type mean --share-encoders False "
"--learn-positional-embeddings True --n-layers 12 --n-heads 12 --ffn-size 3072 "
"--attention-dropout 0.1 --relu-dropout 0.0 --dropout 0.1 --n-positions 1024 "
"--embedding-size 768 --activation gelu --embeddings-scale False --n-segments 2 "
"--learn-embeddings True --polyencoder-type codes --poly-n-codes 64 "
"--poly-attention-type basic --dict-endtoken __start__ "
"--model-file <YOUR MODEL FILE>"
),
"result": (
"(subject to some variance, you may see the following as a result of validation of the model)\n"
"{'exs': 7801, 'accuracy': 0.861 ...}"
),
},
{
"title": "Bi-Encoder Transformer Reddit Pretrained Model",
"id": "pretrained_transformers",
"path": "zoo:pretrained_transformers/poly_model_huge_reddit",
"agent": "transformer/biencoder",
"task": "pretrained_transformers",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/polyencoder/",
"description": (
"Bi-Encoder pretrained on Reddit. Use this model as an ``--init-model`` for a bi-encoder "
"when fine-tuning on another task. For more details on how to train, see the project page."
),
"example": (
"python -u examples/train_model.py "
"--init-model zoo:pretrained_transformers/bi_model_huge_reddit/model "
"--batchsize 512 -t convai2 "
"--model transformer/biencoder --eval-batchsize 6 "
"--warmup_updates 100 --lr-scheduler-patience 0 "
"--lr-scheduler-decay 0.4 -lr 5e-05 --data-parallel True "
"--history-size 20 --label-truncate 72 --text-truncate 360 "
"--num-epochs 10.0 --max_train_time 200000 -veps 0.5 -vme 8000 "
"--validation-metric accuracy --validation-metric-mode max "
"--save-after-valid True --log_every_n_secs 20 --candidates batch "
"--dict-tokenizer bpe --dict-lower True --optimizer adamax "
"--output-scaling 0.06 "
"--variant xlm --reduction-type mean --share-encoders False "
"--learn-positional-embeddings True --n-layers 12 --n-heads 12 "
"--ffn-size 3072 --attention-dropout 0.1 --relu-dropout 0.0 --dropout 0.1 "
"--n-positions 1024 --embedding-size 768 --activation gelu "
"--embeddings-scale False --n-segments 2 --learn-embeddings True "
"--share-word-embeddings False --dict-endtoken __start__ --fp16 True "
"--model-file <YOUR MODEL FILE>"
),
"result": (
"(subject to some variance, you may see the following as a result of validation of the model)\n"
"{'exs': 7801, 'accuracy': 0.8686 ...}"
),
},
{
"title": "Bi-Encoder Transformer Wikipedia/Toronto Books Pretrained Model",
"id": "pretrained_transformers",
"path": "zoo:pretrained_transformers/bi_model_huge_wikito",
"agent": "transformer/biencoder",
"task": "pretrained_transformers",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/polyencoder/",
"description": (
"Bi-Encoder pretrained on Wikipedia/Toronto Books. Use this model as an ``--init-model`` for a poly-encoder "
"when fine-tuning on another task. For more details on how to train, see the project page."
),
"example": (
"python -u examples/train_model.py "
"--init-model zoo:pretrained_transformers/bi_model_huge_wikito/model "
"--batchsize 512 -t convai2 "
"--model transformer/biencoder --eval-batchsize 6 "
"--warmup_updates 100 --lr-scheduler-patience 0 "
"--lr-scheduler-decay 0.4 -lr 5e-05 --data-parallel True "
"--history-size 20 --label-truncate 72 --text-truncate 360 "
"--num-epochs 10.0 --max_train_time 200000 -veps 0.5 -vme 8000 "
"--validation-metric accuracy --validation-metric-mode max "
"--save-after-valid True --log_every_n_secs 20 --candidates batch "
"--dict-tokenizer bpe --dict-lower True --optimizer adamax "
"--output-scaling 0.06 "
"--variant xlm --reduction-type mean --share-encoders False "
"--learn-positional-embeddings True --n-layers 12 --n-heads 12 "
"--ffn-size 3072 --attention-dropout 0.1 --relu-dropout 0.0 --dropout 0.1 "
"--n-positions 1024 --embedding-size 768 --activation gelu "
"--embeddings-scale False --n-segments 2 --learn-embeddings True "
"--share-word-embeddings False --dict-endtoken __start__ --fp16 True "
"--model-file <YOUR MODEL FILE>"
),
"result": (
"(subject to some variance, you may see the following as a result of validation of the model)\n"
"{'exs': 7801, 'accuracy': 0.846 ...}"
),
},
{
"title": "Cross-Encoder Transformer Reddit Pretrained Model",
"id": "pretrained_transformers",
"path": "zoo:pretrained_transformers/cross_model_huge_reddit",
"agent": "transformer/crossencoder",
"task": "pretrained_transformers",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/polyencoder/",
"description": (
"Cross-Encoder pretrained on Reddit. Use this model as an ``--init-model`` for a cross-encoder "
"when fine-tuning on another task. For more details on how to train, see the project page."
),
"example": (
"python -u examples/train_model.py "
"--init-model zoo:pretrained_transformers/cross_model_huge_reddit/model "
"-t convai2 "
"--model transformer/crossencoder --batchsize 16 --eval-batchsize 10 "
"--warmup_updates 1000 --lr-scheduler-patience 0 --lr-scheduler-decay 0.4 "
"-lr 5e-05 --data-parallel True --history-size 20 --label-truncate 72 "
"--text-truncate 360 --num-epochs 12.0 --max_train_time 200000 -veps 0.5 "
"-vme 2500 --validation-metric accuracy --validation-metric-mode max "
"--save-after-valid True --log_every_n_secs 20 --candidates inline --fp16 True "
"--dict-tokenizer bpe --dict-lower True --optimizer adamax --output-scaling 0.06 "
"--variant xlm --reduction-type first --share-encoders False "
"--learn-positional-embeddings True --n-layers 12 --n-heads 12 --ffn-size 3072 "
"--attention-dropout 0.1 --relu-dropout 0.0 --dropout 0.1 --n-positions 1024 "
"--embedding-size 768 --activation gelu --embeddings-scale False --n-segments 2 "
"--learn-embeddings True --dict-endtoken __start__ "
"--model-file <YOUR MODEL FILE>"
),
"result": (
"(subject to some variance, you may see the following as a result of validation of the model)\n"
"{'exs': 7801, 'accuracy': 0.903 ...}"
),
},
{
"title": "Cross-Encoder Transformer Wikipedia/Toronto Books Pretrained Model",
"id": "pretrained_transformers",
"path": "zoo:pretrained_transformers/cross_model_huge_wikito",
"agent": "transformer/crossencoder",
"task": "pretrained_transformers",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/polyencoder/",
"description": (
"Cross-Encoder pretrained on Wikipedia/Toronto Books. Use this model as an ``--init-model`` for a poly-encoder "
"when fine-tuning on another task. For more details on how to train, see the project page."
),
"example": (
"python -u examples/train_model.py "
"--init-model zoo:pretrained_transformers/cross_model_huge_wikito/model "
"-t convai2 "
"--model transformer/crossencoder --batchsize 16 --eval-batchsize 10 "
"--warmup_updates 1000 --lr-scheduler-patience 0 --lr-scheduler-decay 0.4 "
"-lr 5e-05 --data-parallel True --history-size 20 --label-truncate 72 "
"--text-truncate 360 --num-epochs 12.0 --max_train_time 200000 -veps 0.5 "
"-vme 2500 --validation-metric accuracy --validation-metric-mode max "
"--save-after-valid True --log_every_n_secs 20 --candidates inline --fp16 True "
"--dict-tokenizer bpe --dict-lower True --optimizer adamax --output-scaling 0.06 "
"--variant xlm --reduction-type first --share-encoders False "
"--learn-positional-embeddings True --n-layers 12 --n-heads 12 --ffn-size 3072 "
"--attention-dropout 0.1 --relu-dropout 0.0 --dropout 0.1 --n-positions 1024 "
"--embedding-size 768 --activation gelu --embeddings-scale False --n-segments 2 "
"--learn-embeddings True --dict-endtoken __start__ "
"--model-file <YOUR MODEL FILE>"
),
"result": (
"(subject to some variance, you may see the following as a result of validation of the model)\n"
"{'exs': 7801, 'accuracy': 0.873 ...}"
),
},
{
"title": "Poly-Encoder Transformer ConvAI2 Model",
"id": "pretrained_transformers",
"path": "zoo:pretrained_transformers/model_poly",
"agent": "transformer/polyencoder",
"task": "convai2",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/polyencoder/",
"description": (
"Polyencoder pretrained on Reddit and fine-tuned on ConvAI2 scoring 89+ hits @ 1/20. See the pretrained_transformers directory for a list of other available pretrained transformers"
),
"example": (
"python examples/interactive.py -mf "
"zoo:pretrained_transformers/model_poly/model -t convai2"
),
"result": (
"hi how are you doing ?\n"
"[Polyencoder]: i am alright . i am back from the library .\n"
"Enter Your Message: oh, what do you do for a living?\n"
"[Polyencoder]: i work at the museum downtown . i love it there .\n"
"Enter Your Message: what is your favorite drink?\n"
"[Polyencoder]: i am more of a tea guy . i get my tea from china .\n"
),
"example2": (
"python examples/eval_model.py -mf zoo:pretrained_transformers/model_poly/model -t convai2 --eval-candidates inline"
),
"result2": (
"[ Finished evaluating tasks ['convai2'] using datatype valid ]\n"
"{'exs': 7801, 'accuracy': 0.8942, 'f1': 0.9065, 'hits@1': 0.894, 'hits@5': 0.99, 'hits@10': 0.997, 'hits@100': 1.0, 'bleu': 0.8941, 'lr': 5e-09, 'total_train_updates': 0, 'examples': 7801, 'loss': 3004.0, 'mean_loss': 0.385, 'mean_rank': 1.234, 'mrr': 0.9359}"
),
},
{
"title": "Bi-Encoder Transformer ConvAI2 Model",
"id": "pretrained_transformers",
"path": "zoo:pretrained_transformers/model_bi",
"agent": "transformer/biencoder",
"task": "convai2",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/polyencoder/",
"description": (
"Bi-encoder pretrained on Reddit and fine-tuned on ConvAI2 scoring ~87 hits @ 1/20."
),
"example": (
"python examples/interactive.py -mf "
"zoo:pretrained_transformers/model_bi/model -t convai2"
),
"result": (
"hi how are you doing ?\n"
"[Biencoder]: my mother is from russia .\n"
"Enter Your Message: oh cool, whereabouts ?\n"
"[Biencoder]: no , she passed away when i was 18 . thinking about russian recipes she taught me ,\n"
"Enter Your Message: what do you cook?\n"
"[Biencoder]: like meat mostly , me and my dogs love them , do you like dogs ?\n"
),
"example2": (
"python examples/eval_model.py -mf zoo:pretrained_transformers/model_bi/model -t convai2 --eval-candidates inline"
),
"result2": (
"[ Finished evaluating tasks ['convai2'] using datatype valid ]\n"
"{'exs': 7801, 'accuracy': 0.8686, 'f1': 0.8833, 'hits@1': 0.869, 'hits@5': 0.987, 'hits@10': 0.996, 'hits@100': 1.0, 'bleu': 0.8685, 'lr': 5e-09, 'total_train_updates': 0, 'examples': 7801, 'loss': 28.77, 'mean_loss': 0.003688, 'mean_rank': 1.301, 'mrr': 0.9197}"
),
},
{
"title": "TransResNet (ResNet152) Image-Chat model",
"id": "image_chat",
"path": "zoo:image_chat/transresnet_multimodal",
"agent": "projects.image_chat.transresnet_multimodal.transresnet_multimodal:TransresnetMultimodalAgent", # noqa: E501
"task": "image_chat",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/image_chat",
"description": (
"Transresnet Multimodal Model pretrained on the Image-Chat task"
),
"example": (
"python examples/eval_model.py -t image_chat "
"-mf zoo:image_chat/transresnet_multimodal/model -dt test"
),
"result": "{'exs': 29991, 'accuracy': 0.4032, 'f1': 0.4432, 'hits@1': 0.403, 'hits@5': 0.672, 'hits@10': 0.779, 'hits@100': 1.0, 'bleu': 0.3923," # noqa: E501
"'first_round': {'hits@1/100': 0.3392, 'loss': -0.002001, 'med_rank': 3.0},"
"'second_round': {'hits@1/100': 0.4558, 'loss': -0.002001, 'med_rank': 2.0},"
"'third_round+': {'hits@1/100': 0.4147, 'loss': -0.002001, 'med_rank': 2.0}}" # noqa: E501
"'hits@10': 0.903, 'hits@100': 0.998, 'bleu': 0.4999, 'hits@1/100': 1.0, 'loss': -0.002, 'med_rank': 1.0}", # noqa: E501
},
{
"title": "Self-feeding Chatbot",
"id": "self_feeding",
"path": "zoo:self_feeding/model",
"agent": "projects.self_feeding.self_feeding_agent:SelfFeedingAgent",
"task": "self_feeding:all:train",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/self_feeding",
"description": (
"The self-feeding chatbot of Hancock, et al., 2019 "
"(https://arxiv.org/abs/1901.05415). This model learns from is mistakes "
"when actually talking with users. This particular model corresponds to "
"the model with 131k human-human chats + 60k human-bot chats + 60k "
"feedback chats."
),
"example": (
"python projects/self_feeding/interactive.py --model-file "
"zoo:self_feeding/hh131k_hb60k_fb60k_st1k/model --no-cuda true"
),
"result": (
"Enter Your Message: hi, my name is stephen. what's yours?\n"
"[SelfFeeding]: hi there greg . do you have pets ? i've 2 cats named "
"milo and fio .\n"
"Enter Your Message: sadly, i have no pets. my landlord isn't a fan.\n"
"[SelfFeeding]: sorry to hear that . i always had bad allergies when i "
"liven on my farm in kansas ."
),
"example2": (
"python examples/eval_model.py -mf "
"zoo:self_feeding/hh131k_hb60k_fb60k_st1k/model -t self_feeding:all"
),
"result2": (
"[ Finished evaluating tasks ['self_feeding:all'] using datatype valid ]\n"
"{'exs': 3500, 'dia_rank': 4.654, 'dia_acc': 0.3525, 'fee_rank': 1.0, "
"'fee_acc': 1.0, 'fee_exs': 1000, 'sat_re': 0.4607, 'sat_f1': 0.5605, "
"'sat_acc': 0.724}"
),
},
{
"title": "Transformer Classifier Single-turn Dialogue Safety Model",
"id": "dialogue_safety",
"path": "zoo:dialogue_safety/single_turn/model",
"agent": "transformer/classifier",
"task": "dialogue_safety:adversarial,dialogue_safety:standard",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/dialogue_safety",
"description": (
"Classifier trained on both the standard and adversarial safety tasks in addition to Wikipedia Toxic Comments."
),
"example": (
"python examples/eval_model.py -t dialogue_safety:adversarial "
"--round 3 -dt test -mf zoo:dialogue_safety/single_turn/model -bs 40"
),
"result": (
"{'exs': 3000, 'accuracy': 0.9627, 'f1': 0.9627, 'bleu': 9.627e-10, 'lr': 5e-09, 'total_train_updates': 0, 'examples': 3000, 'mean_loss': 0.005441, 'class___notok___recall': 0.7833, 'class___notok___prec': 0.8333, 'class___notok___f1': 0.8076, 'class___ok___recall': 0.9826, 'class___ok___prec': 0.9761, 'class___ok___f1': 0.9793, 'weighted_f1': 0.9621}"
),
},
{
"title": "BERT Classifier Multi-turn Dialogue Safety Model",
"id": "dialogue_safety",
"path": "zoo:dialogue_safety/multi_turn/model",
"agent": "bert_classifier",
"task": "dialogue_safety:multiturn",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/dialogue_safety",
"description": (
"Classifier trained on the multi-turn adversarial safety task in addition to both the single-turn standard and adversarial safety tasks and Wikipedia Toxic Comments."
),
"example": (
"python examples/eval_model.py -t dialogue_safety:multiturn -dt test -mf zoo:dialogue_safety/multi_turn/model --split-lines True -bs 40"
),
"result": (
"{'exs': 3000, 'accuracy': 0.9317, 'f1': 0.9317, 'bleu': 9.317e-10, 'lr': 5e-09, 'total_train_updates': 0, 'examples': 3000, 'mean_loss': 0.008921, 'class___notok___recall': 0.7067, 'class___notok___prec': 0.6444, 'class___notok___f1': 0.6741, 'class___ok___recall': 0.9567, 'class___ok___prec': 0.9671, 'class___ok___f1': 0.9618, 'weighted_f1': 0.9331}"
),
},
{
"title": "Integration Test Models",
"id": "unittest",
"path": "zoo:unittest/transformer_ranker/model",
"task": "integration_tests",
"description": (
"Model files used to check backwards compatibility and code coverage of important standard models."
),
"example": (
"python examples/eval_model.py -mf zoo:unittest/transformer_generator2/model -t integration_tests:multiturn_candidate -m transformer/generator"
),
"external_website": '',
"result": (
"""{'exs': 400, 'accuracy': 1.0, 'f1': 1.0, 'bleu-4': 0.2503, 'lr': 0.001, 'total_train_updates': 5000, 'gpu_mem_percent': 9.37e-05, 'loss': 0.0262, 'token_acc': 1.0, 'nll_loss': 7.935e-05, 'ppl': 1.0}"""
),
},
{
"title": "ImageSeq2Seq DodecaDialogue All Tasks MT Model",
"id": "dodecadialogue",
"path": "zoo:dodecadialogue/all_tasks_mt/model",
"agent": "image_seq2seq",
"task": "#Dodeca",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/dodecadialogue/",
"description": ("Image Seq2Seq model trained on all DodecaDialogue tasks"),
"example": (
"python examples/interactive.py -mf zoo:dodecadialogue/all_tasks_mt/model "
"--inference beam --beam-size 3 --beam-min-length 10 --beam-block-ngram 3 --beam-context-block-ngram 3"
),
"result": (
"Enter Your Message: hi how are you?\n"
"[ImageSeq2seq]: i ' m doing well . how are you ?\n"
"Enter Your Message: not much, what do you like to do?\n"
"[ImageSeq2seq]: i like to go to the park and play with my friends ."
),
"example2": (
"python examples/eval_model.py -mf zoo:dodecadialogue/all_tasks_mt/model -t \"#Dodeca\""
"--prepend-personality True --prepend-gold-knowledge True --image-mode no_image_model"
),
"result2": (
"[ Finished evaluating tasks ['#Dodeca'] using datatype valid ]\n"
" exs gpu_mem loss lr ppl token_acc total_train_updates tpb\n"
" WizTeacher 3939 2.161 8.678 .5325\n"
" all 91526 .3371 2.807 9.375e-07 18.23 .4352 470274 2237\n"
" convai2 7801 2.421 11.26 .4721\n"
" cornell_movie 13905 3.088 21.93 .4172\n"
" dailydialog 8069 2.47 11.82 .4745\n"
" empathetic_dialogues 5738 2.414 11.18 .4505\n"
" igc 486 2.619 13.73 .4718\n"
" image_chat:Generation 15000 3.195 24.42 .3724\n"
" light_dialog 6623 2.944 19 .3918\n"
" twitter 10405 3.61 36.98 .3656\n"
" ubuntu 19560 3.148 23.3 .4035"
),
},
{
"title": "ImageSeq2Seq DodecaDialogue ConvAI2 FT Model",
"id": "dodecadialogue",
"path": "zoo:dodecadialogue/convai2_ft/model",
"agent": "image_seq2seq",
"task": "convai2",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/dodecadialogue/",
"description": (
"Image Seq2Seq model trained on all DodecaDialogue tasks and fine-tuned on Convai2"
),
"example": (
"python examples/interactive.py -mf zoo:dodecadialogue/convai2_ft/model -t convai2 "
"--inference beam --beam-size 3 --beam-min-length 10 --beam-block-ngram 3 --beam-context-block-ngram 3"
),
"result": (
"[context]: your persona: i currently work for ibm in chicago.\n"
"your persona: i'm not a basketball player though.\n"
"your persona: i am almost 7 feet tall.\n"
"your persona: i'd like to retire to hawaii in the next 10 years.\n"
"Enter Your Message: hi how's it going\n"
"[ImageSeq2seq]: i ' m doing well . how are you ?\n"
"Enter Your Message: i'm well, i am really tall\n"
"[ImageSeq2seq]: that ' s cool . i like simple jokes ."
),
"example2": (
"python examples/eval_model.py -mf zoo:dodecadialogue/convai2_ft/model -t convai2"
),
"result2": (
"[ Finished evaluating tasks ['convai2'] using datatype valid ]\n"
" exs gpu_mem loss lr ppl token_acc total_train_updates tpb\n"
" 7801 .2993 2.415 7.5e-06 11.19 .4741 15815 845.8"
),
},
{
"title": "ImageSeq2Seq DodecaDialogue Cornell Movie FT Model",
"id": "dodecadialogue",
"path": "zoo:dodecadialogue/cornell_movie_ft/model",
"agent": "image_seq2seq",
"task": "cornell_movie",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/dodecadialogue/",
"description": (
"Image Seq2Seq model trained on all DodecaDialogue tasks and fine-tuned on the Cornell Movie task"
),
"example": (
"python examples/interactive.py -mf zoo:dodecadialogue/cornell_movie_ft/model "
"--inference beam --beam-size 10 --beam-min-length 20 --beam-block-ngram 3 --beam-context-block-ngram 3"
),
"result": (
"Enter Your Message: hi how's it going?\n"
"[ImageSeq2seq]: oh , it ' s great . i ' m having a great time . how are you doing ?\n"
"Enter Your Message: i'm doing well, what do you like to do?\n"
"[ImageSeq2seq]: i like to go to the movies . what about you ? do you have any hobbies ?"
),
"example2": (
"python examples/eval_model.py -mf zoo:dodecadialogue/cornell_movie_ft/model -t cornell_movie"
),
"result2": (
"[ Finished evaluating tasks ['cornell_movie'] using datatype valid ]\n"
" exs gpu_mem loss lr ppl token_acc total_train_updates tpb\n"
" 13905 .07094 2.967 2.5e-06 19.43 .4290 29496 15.76"
),
},
{
"title": "ImageSeq2Seq DodecaDialogue DailyDialog FT Model",
"id": "dodecadialogue",
"path": "zoo:dodecadialogue/daily_dialog_ft/model",
"agent": "image_seq2seq",
"task": "dailydialog",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/dodecadialogue/",
"description": (
"Image Seq2Seq model trained on all DodecaDialogue tasks and fine-tuned on the DailyDialog task"
),
"example": (
"python examples/interactive.py -mf zoo:dodecadialogue/daily_dialog_ft/model "
"--inference beam --beam-size 5 --beam-min-length 10 --beam-block-ngram 3 --beam-context-block-ngram 3"
),
"result": (
"Enter Your Message: hi how's it going\n"
"[ImageSeq2seq]: i ' m doing well . how about you ?\n"
"Enter Your Message: not much, what do you like to do?\n"
"[ImageSeq2seq]: i like to go to the beach and play volleyball ."
),
"example2": (
"python examples/eval_model.py -mf zoo:dodecadialogue/daily_dialog_ft/model -t dailydialog"
),
"result2": (
"[ Finished evaluating tasks ['dailydialog'] using datatype valid ]\n"
" exs gpu_mem loss lr ppl token_acc total_train_updates tpb\n"
" 8069 .06787 2.326 7.5e-06 10.24 .5093 150959 15.67"
),
},
{
"title": "ImageSeq2Seq DodecaDialogue ELI5 FT Model",
"id": "dodecadialogue",
"path": "zoo:dodecadialogue/eli5_ft/model",
"agent": "image_seq2seq",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/dodecadialogue/",
"task": "TBD",
"description": (
"Image Seq2Seq model trained on all DodecaDialogue tasks and fine-tuned on the ELI5 task"
),
"example": (
"python examples/interactive.py -mf zoo:dodecadialogue/eli5_ft/model "
"--inference beam --beam-size 10 --beam-min-length 200 --beam-block-ngram 3 --beam-context-block-ngram 3"
),
"result": (
"Enter Your Message: Hi, can you tell me about quantum physics?\n"
"[ImageSeq2seq]: yes , i can . quantum physics is the study of how particles "
"interact with each other , and how they interact with other particles . "
"it ' s important to note that quantum mechanics is n ' t the same as "
"classical physics . classical physics is a study of the properties of "
"particles , and what they do . in classical physics , there are two "
"types of particles : quarks and neutrinos . quarks are made up of quarks , "
"neutrinos , and electrons . neutrinos are made of protons , neutrons , "
"electrons , and neutrons . they ' re all the same thing , but they all "
"have the same properties . so , if you ' re interested in quantum physics , "
"you might want to check out / r / askscience . there ' s a subreddit "
"for that sort of thing . edit : i ' m not sure what you mean by "
"\" quantum physics \" , but i ' ll let you know if you want to know more . "
"edit 2 : thanks for the gold !"
),
},
{
"title": "ImageSeq2Seq DodecaDialogue Empathetic Dialogue FT Model",
"id": "dodecadialogue",
"path": "zoo:dodecadialogue/empathetic_dialogues_ft/model",
"agent": "image_seq2seq",
"task": "empathetic_dialogues",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/dodecadialogue/",
"description": (
"Image Seq2Seq model trained on all DodecaDialogue tasks and fine-tuned on the Empathetic Dialogue task"
),
"example": (
"python examples/interactive.py -mf zoo:dodecadialogue/empathetic_dialogues_ft/model "
"--inference beam --beam-size 5 --beam-min-length 10 --beam-block-ngram 3 --beam-context-block-ngram 3"
),
"result": (
"Enter Your Message: hi, how's it going?\n"
"[ImageSeq2seq]: i ' m doing well . how are you ?\n"
"Enter Your Message: i'm fine, feeling a little sad\n"
"[ImageSeq2seq]: that ' s too bad . what ' s going on ?"
),
"example2": (
"python examples/eval_model.py -mf zoo:dodecadialogue/empathetic_dialogues_ft/model -t empathetic_dialogues"
),
"result2": (
"[ Finished evaluating tasks ['empathetic_dialogues'] using datatype valid ]\n"
" exs gpu_mem loss lr ppl token_acc total_train_updates tpb\n"
" 5738 .3278 2.405 7.5e-06 11.08 .4517 20107 1914"
),
},
{
"title": "ImageSeq2Seq DodecaDialogue Image Grounded Conversations FT Model",
"id": "dodecadialogue",
"path": "zoo:dodecadialogue/igc_ft/model",
"agent": "image_seq2seq",
"task": "igc",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/dodecadialogue/",
"description": (
"Image Seq2Seq model trained on all DodecaDialogue tasks and fine-tuned on the Image Grounded Conversations task"
),
"example": (
"python examples/eval_model.py -mf zoo:dodecadialogue/igc_ft/model -t igc:responseOnly"
),
"result": (
"[ Finished evaluating tasks ['igc:responseOnly'] using datatype valid ]\n"
" exs gpu_mem loss lr ppl token_acc total_train_updates tpb\n"
" 162 .0726 2.832 1e-06 16.98 .4405 10215 9.852"
),
},
{
"title": "ImageSeq2Seq DodecaDialogue Image Chat FT Model",
"id": "dodecadialogue",
"path": "zoo:dodecadialogue/image_chat_ft/model",
"agent": "image_seq2seq",
"task": "image_chat",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/dodecadialogue/",
"description": (
"Image Seq2Seq model trained on all DodecaDialogue tasks and fine-tuned on the Image Chat task"
),
"example": (
"python examples/eval_model.py -mf zoo:dodecadialogue/image_chat_ft/model -t image_chat:generation "
"--image-mode no_image_model"
),
"result": (
"[ Finished evaluating tasks ['image_chat:generation'] using datatype valid ]\n"
" exs gpu_mem loss lr ppl token_acc total_train_updates tpb\n"
" 15000 .2231 4.353 3.125e-07 77.73 .2905 321001 1653"
),
},
{
"title": "ImageSeq2Seq DodecaDialogue LIGHT Dialogue FT Model",
"id": "dodecadialogue",
"path": "zoo:dodecadialogue/light_dialog_ft/model",
"agent": "image_seq2seq",
"task": "light_dialog",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/dodecadialogue/",
"description": (
"Image Seq2Seq model trained on all DodecaDialogue tasks and fine-tuned on the LIGHT Dialogue task"
),
"example": (
"python examples/interactive.py -mf zoo:dodecadialogue/light_dialog_ft/model "
"--inference beam --beam-size 5 --beam-min-length 20 --beam-block-ngram 3 --beam-context-block-ngram 3"
),
"result": (
"Enter Your Message: hi how's it going?\n"
"[ImageSeq2seq]: i ' m doing well . how about you ? what ' s going on in the world today ?\n"
"Enter Your Message: not much, wish it had some more epic battles!\n"
"[ImageSeq2seq]: me too . it ' s been so long since i ' ve seen a battle like this . do you have a favorite battle ?"
),
"example2": (
"python examples/eval_model.py -mf zoo:dodecadialogue/light_dialog_ft/model -t light_dialog"
),
"result2": (
"[ Finished evaluating tasks ['light_dialog'] using datatype valid ]\n"
" exs gpu_mem loss lr ppl token_acc total_train_updates tpb\n"
" 6623 .07002 2.927 7.5e-06 18.66 .3927 38068 20.81"
),
},
{
"title": "ImageSeq2Seq DodecaDialogue pushshift.io Reddit FT Model",
"id": "dodecadialogue",
"path": "zoo:dodecadialogue/reddit_ft/model",
"agent": "image_seq2seq",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/dodecadialogue/",
"task": "TBD",
"description": (
"Image Seq2Seq model trained on all DodecaDialogue tasks and fine-tuned on the pushshift.io Reddit task"
),
"example": (
"python examples/interactive.py -mf zoo:dodecadialogue/reddit_ft/model "
"--inference beam --beam-size 5 --beam-min-length 20 --beam-block-ngram 3 --beam-context-block-ngram 3"
),
"result": (
"Enter Your Message: hi how's it going?\n"
"[ImageSeq2seq]: hi , i ' m doing pretty well . how are you ? : ) and yourself ? : d\n"
"Enter Your Message: just hanging in there, you up to anything fun?\n"
"[ImageSeq2seq]: not really . i just got home from work . i ' ll be back in a few hours ."
),
},
{
"title": "ImageSeq2Seq DodecaDialogue Twitter FT Model",
"id": "dodecadialogue",
"path": "zoo:dodecadialogue/twitter_ft/model",
"agent": "image_seq2seq",
"task": "twitter",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/dodecadialogue/",
"description": (
"Image Seq2Seq model trained on all DodecaDialogue tasks and fine-tuned on the Twitter task"
),
"example": (
"python examples/interactive.py -mf zoo:dodecadialogue/twitter_ft/model "
"--inference beam --beam-size 10 --beam-min-length 20 --beam-block-ngram 3 --beam-context-block-ngram 3"
),
"result": (
"Enter Your Message: hi how's it going?\n"
"[ImageSeq2seq]: it ' s going well ! how are you ? @ smiling_face_with_heart - eyes @\n"
"Enter Your Message: im doing well, what do you like to do\n"
"[ImageSeq2seq]: hi ! i ' m doing well ! i like to read , watch movies , play video games , and listen to music . how about you ?"
),
"example2": (
"python examples/eval_model.py -mf zoo:dodecadialogue/twitter_ft/model -t twitter"
),
"result2": (
"[ Finished evaluating tasks ['twitter'] using datatype valid ]\n"
" exs gpu_mem loss lr ppl token_acc total_train_updates tpb\n"
" 10405 .3807 3.396 7.5e-06 29.83 .3883 524029 2395"
),
},
{
"title": "ImageSeq2Seq DodecaDialogue Ubuntu V2 FT Model",
"id": "dodecadialogue",
"path": "zoo:dodecadialogue/ubuntu_ft/model",
"agent": "image_seq2seq",
"task": "ubuntu",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/dodecadialogue/",
"description": (
"Image Seq2Seq model trained on all DodecaDialogue tasks and fine-tuned on the Ubuntu V2 task"
),
"example": (
"python examples/interactive.py -mf zoo:dodecadialogue/ubuntu_ft/model "
"--inference beam --beam-size 2 --beam-min-length 10 --beam-block-ngram 3 --beam-context-block-ngram 3"
),
"result": (
"Enter Your Message: hi how's it going?\n"
"[ImageSeq2seq]: i ' m fine . . . you ? .\n"
"Enter Your Message: doing ok, what do you like to do?\n"
"[ImageSeq2seq]: i like to read , write , and read ."
),
"example2": (
"python examples/eval_model.py -mf zoo:dodecadialogue/ubuntu_ft/model -t ubuntu"
),
"result2": (
"[ Finished evaluating tasks ['ubuntu'] using datatype valid ]\n"
" exs gpu_mem loss lr ppl token_acc total_train_updates tpb\n"
" 19560 .3833 2.844 2.5e-05 17.18 .4389 188076 3130"
),
},
{
"title": "ImageSeq2Seq DodecaDialogue Wizard of Wikipedia FT Model",
"id": "dodecadialogue",
"path": "zoo:dodecadialogue/wizard_of_wikipedia_ft/model",
"agent": "image_seq2seq",
"task": "wizard_of_wikipedia:Generator",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/dodecadialogue/",
"description": (
"Image Seq2Seq model trained on all DodecaDialogue tasks and fine-tuned on the Wizard of Wikipedia task"
),
"example": (
"python examples/interactive.py -mf zoo:dodecadialogue/wizard_of_wikipedia_ft/model "
"--inference beam --beam-size 10 --beam-min-length 10 --beam-block-ngram 3 --beam-context-block-ngram 3 -t wizard_of_wikipedia"
),
"result": (
"Please choose one of the following topics by typing A, B, C, ..., etc. :\n"
"\n"
"A: Denver Art Museum\n"
"B: Joke\n"
"C: Sport utility vehicle\n"
"D: Chocolate brownie\n"
"E: [NO TOPIC]\n"
"\n"
"Enter Your Message: D\n"
"[ Your chosen topic is: Chocolate brownie ]\n"
"/private/home/kshuster/ParlAI/parlai/core/torch_ranker_agent.py:670: UserWarning: [ Executing eval mode with provided inline set of candidates ]\n"
" ''.format(mode)\n"
"[ Using chosen sentence from Wikpedia ]: A chocolate brownie (commonly referred to as simply brownie) is a square, baked, chocolate dessert.\n"
"[ImageSeq2seq]: i love chocolate brownies . they ' re my favorite .\n"
"Enter Your Message: me too! do you ever make them?\n"
"[ Using chosen sentence from Wikpedia ]: Brownies are typically eaten by hand, often accompanied by milk, served warm with ice cream (a la mode), topped with whipped cream, or sprinkled with powdered sugar and fudge.\n"
"[ImageSeq2seq]: i don ' t make them , but i would love to ."
),
"example2": (
"python examples/eval_model.py -mf zoo:dodecadialogue/wizard_of_wikipedia_ft/model -t wizard_of_wikipedia:Generator --prepend-gold-knowledge true"
),
"result2": (
"[ Finished evaluating tasks ['wizard_of_wikipedia:Generator'] using datatype valid ]\n"
" exs gpu_mem loss lr ppl token_acc total_train_updates tpb\n"
" 3939 .3823 2.144 7.5e-06 8.532 .5348 22908 2852"
),
},
{
"title": "ImageSeq2Seq DodecaDialogue Base Model",
"id": "dodecadialogue",
"path": "zoo:dodecadialogue/base_model/model",
"agent": "image_seq2seq",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/dodecadialogue/",
"task": "#Dodeca",
"description": (
"Image Seq2Seq base model, from which all DodecaDialogue models were trained"
),
"example": (
"python examples/train_model.py -t \"#Dodeca\" --prepend-gold-knowledge true --prepend-personality true -mf /tmp/dodeca_model --init-model zoo:dodecadialogue/base_model/model --dict-file zoo:dodecadialogue/dict/dodeca.dict --model image_seq2seq --dict-tokenizer bpe --dict-lower true -bs 32 -eps 0.5 -esz 512 --ffn-size 2048 --fp16 false --n-heads 16 --n-layers 8 --n-positions 512 --text-truncate 512 --label-truncate 128 --variant xlm -lr 7e-6 --lr-scheduler reduceonplateau --optimizer adamax --dropout 0.1 --validation-every-n-secs 3600 --validation-metric ppl --validation-metric-mode min --validation-patience 10 --activation gelu --embeddings-scale true --learn-positional-embeddings true --betas 0.9,0.999 --warmup-updates 2000 --gradient-clip 0.1"
),
"result": ("A trained model (logs omitted)"),
},
{
"title": "BlendedSkillTalk: BlendedSkillTalk single-task model",
"id": "blended_skill_talk",
"path": "zoo:blended_skill_talk/bst_single_task/model",
"agent": "transformer/polyencoder",
"task": "blended_skill_talk",
"project": 'https://github.com/facebookresearch/ParlAI/tree/master/projects/bst',
"description": "Pretrained polyencoder retrieval model fine-tuned on the BlendedSkillTalk dialogue task.",
"example": "python examples/interactive.py -mf zoo:blended_skill_talk/bst_single_task/model -t blended_skill_talk",
"result": 'Results vary.',
"example2": "python examples/eval_model.py -mf zoo:blended_skill_talk/bst_single_task/model -t blended_skill_talk -dt test",
"result2": """09:51:57 | Finished evaluating tasks ['blended_skill_talk'] using datatype test
accuracy bleu-4 exs f1 gpu_mem hits@1 hits@10 hits@100 hits@5 loss mrr rank tpb
.7920 .7785 5482 .8124 .0370 .7920 .9788 1 .9542 .8251 .8636 1.866 19.76
""",
},
{
"title": "BlendedSkillTalk: ConvAI2 single-task model",
"id": "blended_skill_talk",
"path": "zoo:blended_skill_talk/convai2_single_task/model",
"agent": "transformer/polyencoder",
"task": "blended_skill_talk",
"project": 'https://github.com/facebookresearch/ParlAI/tree/master/projects/bst',
"description": "Pretrained polyencoder retrieval model fine-tuned on the ConvAI2 dialogue task.",
"example": "python examples/eval_model.py -mf zoo:blended_skill_talk/convai2_single_task/model -t blended_skill_talk -dt test",
"result": """10:23:53 | Finished evaluating tasks ['blended_skill_talk'] using datatype test
accuracy bleu-4 exs f1 gpu_mem hits@1 hits@10 hits@100 hits@5 loss mrr rank tpb
.7678 .7553 5482 .7902 .07928 .7678 .9728 1 .9414 .9337 .8451 2.04 19.76
""",
},
{
"title": "BlendedSkillTalk: EmpatheticDialogues single-task model",
"id": "blended_skill_talk",
"path": "zoo:blended_skill_talk/ed_single_task/model",
"agent": "transformer/polyencoder",
"task": "blended_skill_talk",
"project": 'https://github.com/facebookresearch/ParlAI/tree/master/projects/bst',
"description": "Pretrained polyencoder retrieval model fine-tuned on the EmpatheticDialogues dialogue task.",
"example": "python examples/eval_model.py -mf zoo:blended_skill_talk/ed_single_task/model -t blended_skill_talk -dt test",
"result": """10:16:47 | Finished evaluating tasks ['blended_skill_talk'] using datatype test
accuracy bleu-4 exs f1 gpu_mem hits@1 hits@10 hits@100 hits@5 loss mrr rank tpb
.6895 .6774 5482 .7219 .07928 .6895 .9509 1 .9051 1.242 .7849 2.79 19.76
""",
},
{
"title": "BlendedSkillTalk: Wizard of Wikipedia single-task model",
"id": "blended_skill_talk",
"path": "zoo:blended_skill_talk/wizard_single_task/model",
"agent": "transformer/polyencoder",
"task": "blended_skill_talk",
"project": 'https://github.com/facebookresearch/ParlAI/tree/master/projects/bst',
"description": "Pretrained polyencoder retrieval model fine-tuned on the Wizard of Wikipedia dialogue task.",
"example": "python examples/eval_model.py -mf zoo:blended_skill_talk/wizard_single_task/model -t blended_skill_talk -dt test",
"result": """10:34:46 | Finished evaluating tasks ['blended_skill_talk'] using datatype test
accuracy bleu-4 exs f1 gpu_mem hits@1 hits@10 hits@100 hits@5 loss mrr rank tpb
.6742 .6616 5482 .7059 .07928 .6742 .9445 1 .8902 1.321 .7706 2.962 19.76
""",
},
{
"title": "BlendedSkillTalk: MT Single-Skills model",
"id": "blended_skill_talk",
"path": "zoo:blended_skill_talk/multi_task/model",
"agent": "transformer/polyencoder",
"task": "blended_skill_talk",
"project": 'https://github.com/facebookresearch/ParlAI/tree/master/projects/bst',
"description": "Pretrained polyencoder retrieval model fine-tuned on the ConvAI2, EmpatheticDialogues, and Wizard of Wikipedia dialogue tasks.",
"example": "python examples/eval_model.py -mf zoo:blended_skill_talk/multi_task/model -t blended_skill_talk -dt test",
"result": """10:23:35 | Finished evaluating tasks ['blended_skill_talk'] using datatype test
accuracy bleu-4 exs f1 gpu_mem hits@1 hits@10 hits@100 hits@5 loss mrr rank tpb
.8010 .7872 5482 .8204 .07928 .8010 .9779 1 .9564 .8154 .8697 1.908 19.76
""",
},
{
"title": "BlendedSkillTalk: MT Single-Skills model fine-tuned on BST",
"id": "blended_skill_talk",
"path": "zoo:blended_skill_talk/multi_task_bst_tuned/model",
"agent": "transformer/polyencoder",
"task": "blended_skill_talk",
"project": 'https://github.com/facebookresearch/ParlAI/tree/master/projects/bst',
"description": "Pretrained polyencoder retrieval model fine-tuned on the ConvAI2, EmpatheticDialogues, and Wizard of Wikipedia dialogue tasks, and then further fine-tuned on the BlendedSkillTalk dialogue task.",
"example": "python examples/eval_model.py -mf zoo:blended_skill_talk/multi_task_bst_tuned/model -t blended_skill_talk -dt test",
"result": """10:36:01 | Finished evaluating tasks ['blended_skill_talk'] using datatype test
accuracy bleu-4 exs f1 gpu_mem hits@1 hits@10 hits@100 hits@5 loss mrr rank tpb
.8378 .8230 5482 .8543 .07928 .8378 .9872 1 .9704 .5897 .8963 1.604 19.76
""",
},
{
"title": "Tutorial Transformer Generator",
"id": "tutorial_transformer_generator",
"path": "zoo:tutorial_transformer_generator/model",
"task": "pushshift.io",
"description": (
"Small (87M paramter) generative transformer, pretrained on pushshift.io Reddit."
),
"example": (
"python -m parlai.scripts.interactive -mf zoo:tutorial_transformer_generator/model"
),
"external_website": '',
"result": (
"Enter Your Message: hi, how are you today?\n"
"[TransformerGenerator]: i ' m doing well , how about you ?\n"
"Enter Your Message: I'm giving a tutorial on chatbots!\n"
"[TransformerGenerator]: that ' s awesome ! what ' s it about ?\n"
"Enter Your Message: bots just like you\n"
"[TransformerGenerator]: i ' ll be sure to check it out !"
),
},
{
"title": "Blender 90M",
"id": "blender",
"path": "zoo:blender/blender_90M/model",
"agent": "transformer/generator",
"task": "blended_skill_talk",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/blender",
"description": (
"90< parameter generative model finetuned on blended_skill_talk tasks."
),
"example": (
"python parlai/scripts/safe_interactive.py -mf zoo:blender/blender_90M/model -t blended_skill_talk"
),
"result": (
"Enter Your Message: Hi what's up?\n"
"[TransformerGenerator]: hello , how are you ? i just got back from working at a law firm , how about you ?"
),
},
{
"title": "Blender 2.7B",
"id": "blender",
"path": "zoo:blender/blender_3B/model",
"agent": "transformer/generator",
"task": "blended_skill_talk",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/blender",
"description": (
"2.7B parameter generative model finetuned on blended_skill_talk tasks."
),
"example": (
"python parlai/scripts/safe_interactive.py -mf zoo:blender/blender_3B/model -t blended_skill_talk"
),
"result": (
"Enter Your Message: Hi how are you?\n"
"[TransformerGenerator]: I'm doing well. How are you doing? What do you like to do in your spare time?"
),
},
{
"title": "Blender 9.4B",
"id": "blender",
"path": "zoo:blender/blender_9B/model",
"agent": "transformer/generator",
"task": "blended_skill_talk",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/blender",
"description": (
"9.4B parameter generative model finetuned on blended_skill_talk tasks."
),
"example": (
"python parlai/scripts/safe_interactive.py -mf zoo:blender/blender_9B/model -t blended_skill_talk"
),
"result": (
"Enter Your Message: Hi!\n"
"[TransformerGenerator]: What do you do for a living? I'm a student at Miami University."
),
},
{
"title": "Reddit 2.7B",
"id": "blender",
"path": "zoo:blender/reddit_3B/model",
"agent": "transformer/generator",
"task": "pushshift.io",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/blender",
"description": (
"2.7B parameter generative model finetuned on blended_skill_talk tasks."
),
"example": (
"python examples/train_model.py -t blended_skill_talk,wizard_of_wikipedia,convai2:normalized,empathetic_dialogues --multitask-weights 1,3,3,3 -veps 0.25 --attention-dropout 0.0 --batchsize 128 --model transformer/generator --embedding-size 2560 --ffn-size 10240 --variant prelayernorm --n-heads 32 --n-positions 128 --n-encoder-layers 2 --n-decoder-layers 24 --history-add-global-end-token end --delimiter ' ' --dict-tokenizer bytelevelbpe --dropout 0.1 --fp16 True --init-model zoo:blender/reddit_3B/model --dict-file zoo:blender/reddit_3B/model.dict --label-truncate 128 --log_every_n_secs 10 -lr 7e-06 --lr-scheduler reduceonplateau --lr-scheduler-patience 3 --optimizer adam --relu-dropout 0.0 --activation gelu --model-parallel true --save-after-valid True --text-truncate 128 --truncate 128 --warmup_updates 100 --fp16-impl mem_efficient --update-freq 2 --gradient-clip 0.1 --skip-generation True -vp 10 -vmt ppl -vmm min --model-file /tmp/test_train_27B"
),
"result": ("Results vary."),
},
{
"title": "Reddit 9.4B",
"id": "blender",
"path": "zoo:blender/reddit_9B/model",
"agent": "transformer/generator",
"task": "pushshift.io",
"project": "https://github.com/facebookresearch/ParlAI/tree/master/projects/blender",
"description": (
"9.4B parameter generative model finetuned on blended_skill_talk tasks."
),
"example": (
"python examples/train_model.py -t blended_skill_talk,wizard_of_wikipedia,convai2:normalized,empathetic_dialogues --multitask-weights 1,3,3,3 -veps 0.25 --attention-dropout 0.0 --batchsize 8 --eval-batchsize 64 --model transformer/generator --embedding-size 4096 --ffn-size 16384 --variant prelayernorm --n-heads 32 --n-positions 128 --n-encoder-layers 4 --n-decoder-layers 32 --history-add-global-end-token end --dict-tokenizer bytelevelbpe --dropout 0.1 --fp16 True --init-model zoo:blender/reddit_9B/model --dict-file zoo:blender/reddit_9B/model.dict --label-truncate 128 -lr 3e-06 -dynb full --lr-scheduler cosine --max-lr-steps 9000 --lr-scheduler-patience 3 --optimizer adam --relu-dropout 0.0 --activation gelu --model-parallel true --save-after-valid False --text-truncate 128 --truncate 128 --warmup_updates 1000 --fp16-impl mem_efficient --update-freq 4 --log-every-n-secs 30 --gradient-clip 0.1 --skip-generation True -vp 10 --max-train-time 84600 -vmt ppl -vmm min --model-file /tmp/test_train_94B"
),
"result": ("Results vary."),
},
]
|
import copy
from pgdrive.constants import TerminationState
import logging
import os
import sys
from panda3d.bullet import BulletBodyNode
def import_pygame():
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame
return pygame
def setup_logger(debug=False):
logging.basicConfig(
level=logging.DEBUG if debug else logging.WARNING,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s'
)
def recursive_equal(data1, data2, need_assert=False):
from pgdrive.utils.config import Config
if isinstance(data1, Config):
data1 = data1.get_dict()
if isinstance(data2, Config):
data2 = data2.get_dict()
if isinstance(data1, dict):
is_ins = isinstance(data2, dict)
key_right = set(data1.keys()) == set(data2.keys())
if need_assert:
assert is_ins and key_right, (data1.keys(), data2.keys())
if not (is_ins and key_right):
return False
ret = []
for k in data1:
ret.append(recursive_equal(data1[k], data2[k]))
return all(ret)
elif isinstance(data1, list):
len_right = len(data1) == len(data2)
is_ins = isinstance(data2, list)
if need_assert:
assert len_right and is_ins, (len(data1), len(data2), data1, data2)
if not (is_ins and len_right):
return False
ret = []
for i in range(len(data1)):
ret.append(recursive_equal(data1[i], data2[i]))
return all(ret)
else:
ret = data1 == data2
if need_assert:
assert ret, (type(data1), type(data2), data1, data2)
return ret
def is_mac():
return sys.platform == "darwin"
def is_win():
return sys.platform == "win32"
def concat_step_infos(step_info_list):
"""We only conduct simply shallow update here!"""
old_dict = dict()
for new_dict in step_info_list:
old_dict = merge_dicts(old_dict, new_dict, allow_new_keys=True, without_copy=True)
return old_dict
# The following two functions is copied from ray/tune/utils/util.py, raise_error and pgconfig support is added by us!
def merge_dicts(old_dict, new_dict, allow_new_keys=False, without_copy=False):
"""
Args:
old_dict (dict, Config): Dict 1.
new_dict (dict, Config): Dict 2.
raise_error (bool): Whether to raise error if new key is found.
Returns:
dict: A new dict that is d1 and d2 deep merged.
"""
old_dict = old_dict or dict()
new_dict = new_dict or dict()
if without_copy:
merged = old_dict
else:
merged = copy.deepcopy(old_dict)
_deep_update(
merged, new_dict, new_keys_allowed=allow_new_keys, allow_new_subkey_list=[], raise_error=not allow_new_keys
)
return merged
def _deep_update(
original,
new_dict,
new_keys_allowed=False,
allow_new_subkey_list=None,
override_all_if_type_changes=None,
raise_error=True
):
allow_new_subkey_list = allow_new_subkey_list or []
override_all_if_type_changes = override_all_if_type_changes or []
for k, value in new_dict.items():
if k not in original and not new_keys_allowed:
if raise_error:
raise Exception("Unknown config parameter `{}` ".format(k))
else:
continue
# Both orginal value and new one are dicts.
if isinstance(original.get(k), dict) and isinstance(value, dict):
# Check old type vs old one. If different, override entire value.
if k in override_all_if_type_changes and \
"type" in value and "type" in original[k] and \
value["type"] != original[k]["type"]:
original[k] = value
# Allowed key -> ok to add new subkeys.
elif k in allow_new_subkey_list:
_deep_update(original[k], value, True, raise_error=raise_error)
# Non-allowed key.
else:
_deep_update(original[k], value, new_keys_allowed, raise_error=raise_error)
# Original value not a dict OR new value not a dict:
# Override entire value.
else:
original[k] = value
return original
def deprecation_warning(old, new, error=False) -> None:
"""Warns (via the `logger` object) or throws a deprecation warning/error.
Args:
old (str): A description of the "thing" that is to be deprecated.
new (Optional[str]): A description of the new "thing" that replaces it.
error (Optional[Union[bool, Exception]]): Whether or which exception to
throw. If True, throw ValueError. If False, just warn.
If Exception, throw that Exception.
"""
msg = "`{}` has been deprecated.{}".format(old, (" Use `{}` instead.".format(new) if new else ""))
if error is True:
raise ValueError(msg)
elif error and issubclass(error, Exception):
raise error(msg)
else:
logger = logging.getLogger(__name__)
logger.warning("DeprecationWarning: " + msg + " This will raise an error in the future!")
def get_object_from_node(node: BulletBodyNode):
"""
Use this api to get the python object from bullet RayCast/SweepTest/CollisionCallback result
"""
if node.getPythonTag(node.getName()) is None:
return None
from pgdrive.engine.engine_utils import get_object
ret = node.getPythonTag(node.getName()).base_object_name
if isinstance(ret, str):
return get_object(ret)[ret]
else:
return ret
def auto_termination(vehicle, should_done):
return {TerminationState.MAX_STEP: True if should_done else False}
|
import numpy as np
import sys
sys.path.append('../')
from interpolate import get_gradient, _bin_and_index
three_colors = ['#ffffff', '#000000', '#ff0000']
two_colors = ['#ffffff', '#000000']
equal = np.testing.assert_array_equal
close_enough = np.testing.assert_allclose
def test_bin_lower():
value = 0.3
size = 2
params = (value, size)
expected_answer = 0
equal(expected_answer, _bin_and_index(*params))
def test_bin_higher():
value = 0.9
size = 2
params = (value, size)
expected_answer = 1
equal(expected_answer, _bin_and_index(*params))
## test_<number of colors>_<value intensity between 0 and 1>
def test_3_half():
value = 0.5
params = (three_colors, value)
expected_answer = np.array([0, 0, 0])
close_enough( expected_answer, get_gradient(*params),atol = 1 )
def test_3_quarter():
value = 0.25
params = (three_colors, value)
expected_answer = np.array([127.5, 127.5, 127.5])
close_enough( expected_answer, get_gradient(*params),atol = 1 )
def test_3_3quarter():
value = 0.75
params = (three_colors, value)
expected_answer = np.array([127.5, 0, 0])
close_enough( expected_answer, get_gradient(*params),atol = 1 )
def test_2_half():
value = 0.5
params = (two_colors, value)
expected_answer = np.array([127.5, 127.5, 127.5])
close_enough( expected_answer, get_gradient(*params),atol = 1 )
def test_2_quarter():
value = 0.25
params = (two_colors, value)
expected_answer = np.array([191.25,191.25,191.25])
close_enough( expected_answer, get_gradient(*params),atol = 1 )
def test_2_3quarter():
value = 0.75
params = (two_colors, value)
expected_answer = np.array([63.75,63.75,63.75])
close_enough( expected_answer, get_gradient(*params),atol = 1 )
|
# Generated by Django 2.0.4 on 2018-04-03 19:50
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0002_auto_20180403_1907'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='pub_date',
field=models.DateField(default=datetime.date.today, verbose_name='Date Created'),
),
]
|
'''
This module contains all functions relating to feature engineering
'''
import datetime as dt
import re
import platform
import pandas as pd
import numpy as np
if platform.system() == "Darwin":
import matplotlib as plt
plt.use('TkAgg')
else:
import matplotlib.pyplot as plt
import seaborn as sns
from .structdata import get_cat_feats, get_num_feats, get_date_cols
from dateutil.parser import parse
def drop_missing(data=None, percent=99):
'''
Drops missing columns with [percent] of missing data.
Parameters:
-------------------------
data: Pandas DataFrame or Series.
percent: float, Default 99
Percentage of missing values to be in a column before it is eligible for removal.
Returns:
------------------
Pandas DataFrame or Series.
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
missing_percent = (data.isna().sum() / data.shape[0]) * 100
cols_2_drop = missing_percent[missing_percent.values >= percent].index
print("Dropped {}".format(list(cols_2_drop)))
#Drop missing values
df = data.drop(cols_2_drop, axis=1)
return df
def drop_redundant(data):
'''
Removes features with the same value in all cell. Drops feature If Nan is the second unique class as well.
Parameters:
-----------------------------
data: DataFrame or named series.
Returns:
DataFrame or named series.
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
#get columns
cols_2_drop = _nan_in_class(data)
print("Dropped {}".format(cols_2_drop))
df = data.drop(cols_2_drop, axis=1)
return df
def fill_missing_cats(data=None, cat_features=None, missing_encoding=None, missing_col=False):
'''
Fill missing values using the mode of the categorical features.
Parameters:
------------------------
data: DataFrame or name Series.
Data set to perform operation on.
cat_features: List, Series, Array.
categorical features to perform operation on. If not provided, we automatically infer the categoricals from the dataset.
missing_encoding: List, Series, Array.
Values used in place of missing. Popular formats are [-1, -999, -99, '', ' ']
missin_col: bool, Default True
Creates a new column to capture the missing values. 1 if missing and 0 otherwise. This can sometimes help a machine learning model.
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
if cat_features is None:
cat_features = get_cat_feats(data)
df = data.copy()
#change all possible missing values to NaN
if missing_encoding is None:
missing_encoding = ['', ' ', -99, -999]
df.replace(missing_encoding, np.NaN, inplace=True)
for feat in cat_features:
if missing_col:
df[feat + '_missing_value'] = (df[feat].isna()).astype('int64')
most_freq = df[feat].mode()[0]
df[feat] = df[feat].replace(np.NaN, most_freq)
return df
def fill_missing_num(data=None, num_features=None, method='mean', missing_col=False):
'''
fill missing values in numerical columns with specified [method] value
Parameters:
------------------------------
data: DataFrame or name Series.
The data set to fill
features: list.
List of columns to fill
method: str, Default 'mean'.
method to use in calculating fill value.
missing_col: bool, Default True
Creates a new column to capture the missing values. 1 if missing and 0 otherwise. This can sometimes help a machine learning model.
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
if num_features is None:
num_features = get_num_feats(data)
#get numerical features with missing values
temp_df = data[num_features].isna().sum()
features = list(temp_df[num_features][temp_df[num_features] > 0].index)
df = data.copy()
for feat in features:
if missing_col:
df[feat + '_missing_value'] = (df[feat].isna()).astype('int64')
if method is 'mean':
mean = df[feat].mean()
df[feat].fillna(mean, inplace=True)
elif method is 'median':
median = df[feat].median()
df[feat].fillna(median, inplace=True)
elif method is 'mode':
mode = df[feat].mode()[0]
df[feat].fillna(mode, inplace=True)
else:
raise ValueError("method: must specify a fill method, one of [mean, mode or median]'")
return df
def merge_groupby(data=None, cat_features=None, statistics=None, col_to_merge=None):
'''
Performs a groupby on the specified categorical features and merges
the result to the original dataframe.
Parameter:
-----------------------
data: DataFrame
Data set to perform operation on.
cat_features: list, series, 1D-array
categorical features to groupby.
statistics: list, series, 1D-array, Default ['mean', 'count]
aggregates to perform on grouped data.
col_to_merge: str
The column to merge on the dataset. Must be present in the data set.
Returns:
Dataframe.
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
if statistics is None:
statistics = ['mean', 'count']
if cat_features is None:
cat_features = get_num_feats(data)
if col_to_merge is None:
raise ValueError("col_to_merge: Expecting a string [column to merge on], got 'None'")
df = data.copy()
for cat in cat_features:
temp = df.groupby([cat]).agg(statistics)[col_to_merge]
#rename columns
temp = temp.rename(columns={'mean': cat + '_' + col_to_merge + '_mean', 'count': cat + '_' + col_to_merge + "_count"})
#merge the data sets
df = df.merge(temp, how='left', on=cat)
return df
def get_qcut(data=None, col=None, q=None, duplicates='drop', return_type='float64'):
'''
Cuts a series into bins using the pandas qcut function
and returns the resulting bins as a series for merging.
Parameter:
-------------
data: DataFrame, named Series
Data set to perform operation on.
col: str
column to cut/binnarize.
q: integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles.
duplicates: Default 'drop',
If bin edges are not unique drop non-uniques.
return_type: dtype, Default (float64)
Dtype of series to return. One of [float64, str, int64]
Returns:
--------
Series, 1D-Array
'''
temp_df = pd.qcut(data[col], q=q, duplicates=duplicates).to_frame().astype('str')
#retrieve only the qcut categories
df = temp_df[col].str.split(',').apply(lambda x: x[0][1:]).astype(return_type)
return df
def create_balanced_data(data=None, target=None, categories=None, class_sizes=None, replacement=False ):
'''
Creates a balanced data set from an imbalanced one. Used in a classification task.
Parameter:
----------------------------
data: DataFrame, name series.
The imbalanced dataset.
target: str
Name of the target column.
categories: list
Unique categories in the target column. If not set, we use infer the unique categories in the column.
class_sizes: list
Size of each specified class. Must be in order with categoriess parameter.
replacement: bool, Default True.
samples with or without replacement.
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
if target is None:
raise ValueError("target: Expecting a String got 'None'")
if categories is None:
categories = list(data[target].unique())
if class_sizes is None:
#set size for each class to same value
temp_val = int(data.shape[0] / len(data[target].unique()))
class_sizes = [temp_val for _ in list(data[target].unique())]
df = data.copy()
data_category = []
data_class_indx = []
#get data corrresponding to each of the categories
for cat in categories:
data_category.append(df[df[target] == cat])
#sample and get the index corresponding to each category
for class_size, cat in zip(class_sizes, data_category):
data_class_indx.append(cat.sample(class_size, replace=True).index)
#concat data together
new_data = pd.concat([df.loc[indx] for indx in data_class_indx], ignore_index=True).sample(sum(class_sizes)).reset_index(drop=True)
if not replacement:
for indx in data_class_indx:
df.drop(indx, inplace=True)
return new_data
def to_date(data):
'''
Automatically convert all date time columns to pandas Datetime format
'''
date_cols = get_date_cols(data)
for col in date_cols:
data[col] = pd.to_datetime(data[col])
return data
def haversine_distance(lat1, long1, lat2, long2):
'''
Calculates the Haversine distance between two location with latitude and longitude.
The haversine distance is the great-circle distance between two points on a sphere given their longitudes and latitudes.
Parameter:
---------------------------
lat1: scalar,float
Start point latitude of the location.
lat2: scalar,float
End point latitude of the location.
long1: scalar,float
Start point longitude of the location.
long2: scalar,float
End point longitude of the location.
Returns:
Series: The Harversine distance between (lat1, lat2), (long1, long2)
'''
lat1, long1, lat2, long2 = map(np.radians, (lat1, long1, lat2, long2))
AVG_EARTH_RADIUS = 6371 # in km
lat = lat2 - lat1
lng = long2 - long1
distance = np.sin(lat * 0.5) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(lng * 0.5) ** 2
harvesine_distance = 2 * AVG_EARTH_RADIUS * np.arcsin(np.sqrt(distance))
harvesine_distance_df = pd.Series(harvesine_distance)
return harvesine_distance_df
def manhattan_distance(lat1, long1, lat2, long2):
'''
Calculates the Manhattan distance between two points.
It is the sum of horizontal and vertical distance between any two points given their latitudes and longitudes.
Parameter:
-------------------
lat1: scalar,float
Start point latitude of the location.
lat2: scalar,float
End point latitude of the location.
long1: scalar,float
Start point longitude of the location.
long2: scalar,float
End point longitude of the location.
Returns: Series
The Manhattan distance between (lat1, lat2) and (long1, long2)
'''
a = np.abs(lat2 -lat1)
b = np.abs(long1 - long2)
manhattan_distance = a + b
manhattan_distance_df = pd.Series(manhattan_distance)
return manhattan_distance_df
def bearing(lat1, long1, lat2, long2):
'''
Calculates the Bearing between two points.
The bearing is the compass direction to travel from a starting point, and must be within the range 0 to 360.
Parameter:
-------------------------
lat1: scalar,float
Start point latitude of the location.
lat2: scalar,float
End point latitude of the location.
long1: scalar,float
Start point longitude of the location.
long2: scalar,float
End point longitude of the location.
Returns: Series
The Bearing between (lat1, lat2) and (long1, long2)
'''
AVG_EARTH_RADIUS = 6371
long_delta = np.radians(long2 - long1)
lat1, long1, lat2, long2 = map(np.radians, (lat1, long1, lat2, long2))
y = np.sin(long_delta) * np.cos(lat2)
x = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(long_delta)
bearing = np.degrees(np.arctan2(y, x))
bearing_df = pd.Series(bearing)
return bearing_df
def get_location_center(point1, point2):
'''
Calculates the center between two points.
Parameter:
---------------------------
point1: list, series, scalar
End point latitude of the location.
long1: list, series, scalar
Start point longitude of the location.
long2: list, series, scalar
End point longitude of the location.
Returns: Series
The center between point1 and point2
'''
center = (point1 + point2) / 2
center_df = pd.Series(center)
return center_df
def log_transform(data, columns, plot=True, figsize=(12,6)):
'''
Nomralizes the dataset to be as close to the gaussian distribution.
Parameter:
-----------------------------------------
data: DataFrame, Series.
Data to Log transform.
columns: List, Series
Columns to be transformed to normality using log transformation
plot: bool, default True
Plots a before and after log transformation plot
Returns:
Log-transformed dataframe
'''
if data is None:
raise ValueError("data: Expecting a DataFrame/ numpy2d array, got 'None'")
if columns is None:
raise ValueError("columns: Expecting at least a column in the list of columns but got 'None'")
df = data.copy()
for col in columns:
df[col] = np.log1p(df[col])
if plot:
for col in columns:
_ = plt.figure(figsize = figsize)
plt.subplot(1, 2, 1)
sns.distplot(data[col], color="m", label="Skewness : %.2f" % (df[col].skew()))
plt.title('Distribution of ' + col + " before Log transformation")
plt.legend(loc='best')
plt.subplot(1, 2, 2)
sns.distplot(df[col], color="m", label="Skewness : %.2f" % (df[col].skew()))
plt.title('Distribution of ' + col + " after Log transformation")
plt.legend(loc='best')
plt.tight_layout(2)
plt.show()
return df
def convert_dtype(df):
'''
Convert datatype of a feature to its original datatype.
If the datatype of a feature is being represented as a string while the initial datatype is an integer or a float
or even a datetime dtype. The convert_dtype() function iterates over the feature(s) in a pandas dataframe and convert the features to their appropriate datatype
Parameter:
---------------------------
df: DataFrame, Series
Dataset to convert data type
Returns:
-----------------
DataFrame or Series.
Example:
data = {'Name':['Tom', 'nick', 'jack'],
'Age':['20', '21', '19'],
'Date of Birth': ['1999-11-17','20 Sept 1998','Wed Sep 19 14:55:02 2000']}
df = pd.DataFrame(data)
df.info()
>>>
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 3 columns):
Name 3 non-null object
Age 3 non-null object
Date of Birth 3 non-null object
dtypes: object(3)
memory usage: 76.0+ bytes
conv = convert_dtype(df)
conv.info()
>>>
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 3 columns):
Name 3 non-null object
Age 3 non-null int32
Date of Birth 3 non-null datetime64[ns]
dtypes: datetime64[ns](1), int32(1), object(1)
memory usage: 88.0+ bytes
'''
if df.isnull().any().any() == True:
raise ValueError("DataFrame contain missing values")
else:
i = 0
changed_dtype = []
#Function to handle datetime dtype
def is_date(string, fuzzy=False):
try:
parse(string, fuzzy=fuzzy)
return True
except ValueError:
return False
while i <= (df.shape[1])-1:
val = df.iloc[:,i]
if str(val.dtypes) =='object':
val = val.apply(lambda x: re.sub(r"^\s+|\s+$", "",x, flags=re.UNICODE)) #Remove spaces between strings
try:
if str(val.dtypes) =='object':
if val.min().isdigit() == True: #Check if the string is an integer dtype
int_v = val.astype(int)
changed_dtype.append(int_v)
elif val.min().replace('.', '', 1).isdigit() == True: #Check if the string is a float type
float_v = val.astype(float)
changed_dtype.append(float_v)
elif is_date(val.min(),fuzzy=False) == True: #Check if the string is a datetime dtype
dtime = pd.to_datetime(val)
changed_dtype.append(dtime)
else:
changed_dtype.append(val) #This indicate the dtype is a string
else:
changed_dtype.append(val) #This could count for symbols in a feature
except ValueError:
raise ValueError("DataFrame columns contain one or more DataType")
except:
raise Exception()
i = i+1
data_f = pd.concat(changed_dtype,1)
return data_f
def bin_age(data, feature, bins, labels, fill_missing = None, drop_original = False):
'''
Categorize age data into separate bins
Parameter:
-----------------------------------------
data: DataFrame, Series.
Data for which feature to be binned exist.
feature: List, Series
Columns to be binned
Bins: List, numpy.ndarray
Specifies the different categories. Bins must be one greater labels.
labels: List, Series
Name identified to the various categories
fill_missing(default = None): int
mean : feature average.
mode : most occuring data in the feature.
median : middle point in the feature.
drop_original: bool
Drops original feature after beaning.
Returns:
Returns a binned dataframe.
'''
df = data.copy()
for col in feature:
if fill_missing == None:
if df[col].isnull().any():
raise ValueError("data: Mising Value found in table")
else:
df[col + '_binned'] = pd.cut(x=df[col], bins= bins, labels=labels)
elif fill_missing == 'mean':
df[col].fillna(int(df[col].mean()), inplace = True)
df[col + '_binned'] = pd.cut(x=df[col], bins=bins, labels=labels)
elif fill_missing == 'mode':
df[col].fillna(int(df[col].mode()), inplace = True)
df[col + '_binned'] = pd.cut(x=df[col], bins=bins, labels=labels)
elif fill_missing == 'median':
df[col].fillna(int(df[col].median()), inplace = True)
df[col + '_binned'] = pd.cut(x=df[col], bins=bins, labels=labels)
if drop_original == True:
df.drop(columns = col, inplace = True)
return df
def _nan_in_class(data):
cols = []
for col in data.columns:
if len(data[col].unique()) == 1:
cols.append(col)
if len(data[col].unique()) == 2:
if np.nan in list(data[col].unique()):
cols.append(col)
return cols
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_common_capacity import TapiCommonCapacity # noqa: F401,E501
from tapi_server import util
class TapiCommonCapacityPac(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, available_capacity=None, total_potential_capacity=None): # noqa: E501
"""TapiCommonCapacityPac - a model defined in OpenAPI
:param available_capacity: The available_capacity of this TapiCommonCapacityPac. # noqa: E501
:type available_capacity: TapiCommonCapacity
:param total_potential_capacity: The total_potential_capacity of this TapiCommonCapacityPac. # noqa: E501
:type total_potential_capacity: TapiCommonCapacity
"""
self.openapi_types = {
'available_capacity': TapiCommonCapacity,
'total_potential_capacity': TapiCommonCapacity
}
self.attribute_map = {
'available_capacity': 'available-capacity',
'total_potential_capacity': 'total-potential-capacity'
}
self._available_capacity = available_capacity
self._total_potential_capacity = total_potential_capacity
@classmethod
def from_dict(cls, dikt) -> 'TapiCommonCapacityPac':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.common.CapacityPac of this TapiCommonCapacityPac. # noqa: E501
:rtype: TapiCommonCapacityPac
"""
return util.deserialize_model(dikt, cls)
@property
def available_capacity(self):
"""Gets the available_capacity of this TapiCommonCapacityPac.
:return: The available_capacity of this TapiCommonCapacityPac.
:rtype: TapiCommonCapacity
"""
return self._available_capacity
@available_capacity.setter
def available_capacity(self, available_capacity):
"""Sets the available_capacity of this TapiCommonCapacityPac.
:param available_capacity: The available_capacity of this TapiCommonCapacityPac.
:type available_capacity: TapiCommonCapacity
"""
self._available_capacity = available_capacity
@property
def total_potential_capacity(self):
"""Gets the total_potential_capacity of this TapiCommonCapacityPac.
:return: The total_potential_capacity of this TapiCommonCapacityPac.
:rtype: TapiCommonCapacity
"""
return self._total_potential_capacity
@total_potential_capacity.setter
def total_potential_capacity(self, total_potential_capacity):
"""Sets the total_potential_capacity of this TapiCommonCapacityPac.
:param total_potential_capacity: The total_potential_capacity of this TapiCommonCapacityPac.
:type total_potential_capacity: TapiCommonCapacity
"""
self._total_potential_capacity = total_potential_capacity
|
import unittest
class TestIngest(unittest.TestCase):
return True
if __name__ == '__main__':
unittest.main() |
import logging
from Pegasus.db.admin.admin_loader import DBAdminError
from Pegasus.db.admin.versions.base_version import BaseVersion
DB_VERSION = 13
log = logging.getLogger(__name__)
class Version(BaseVersion):
def __init__(self, connection):
super().__init__(connection)
def update(self, force=False):
"""."""
log.debug("Updating to version %s" % DB_VERSION)
try:
self.db.execute("ALTER TABLE invocation ADD COLUMN maxrss INTEGER")
self.db.execute("ALTER TABLE invocation ADD COLUMN avg_cpu NUMERIC(16, 6)")
except Exception as e:
if "uplicate column name" not in str(
e
) and "no such table: invocation" not in str(e):
self.db.rollback()
raise DBAdminError(e)
def downgrade(self, force=False):
"""."""
log.debug("Downgrading from version %s" % DB_VERSION)
# no downgrade is necessary
|
""" Tests for asset pull """
import logging
import mock
import os
import sys
if sys.version_info.major < 3:
ConnectionRefusedError = Exception
else:
from urllib.error import HTTPError
import pytest
from refgenconf.const import *
from refgenconf.exceptions import *
from refgenconf.refgenconf import _download_url_progress
from refgenconf import RefGenConf
from .conftest import remove_asset_and_file
__author__ = "Vince Reuter"
__email__ = "[email protected]"
DOWNLOAD_FUNCTION = "refgenconf.refgenconf.{}".format(_download_url_progress.__name__)
@pytest.mark.parametrize(
["genome", "asset", "tag"], [("rCRSd", "fasta", "default"), ("rCRSd", "fasta", "default")])
def test_no_unpack(rgc, genome, asset, tag):
""" Tarballs must be unpacked. """
with pytest.raises(NotImplementedError):
rgc.pull_asset(genome, asset, tag, unpack=False)
@pytest.mark.parametrize(["gname", "aname"], [("human_repeats", 1), ("mouse_chrM2x", None)])
def test_pull_asset_illegal_asset_name(rgc, gname, aname):
""" TypeError occurs if asset argument is not iterable. """
with pytest.raises(TypeError):
rgc.pull_asset(gname, aname)
@pytest.mark.parametrize(["gname", "aname", "tname"],
[("human_repeats", "bowtie2_index", "default"), ("mouse_chrM2x", "bwa_index", "default")])
def test_negative_response_to_large_download_prompt(rgc, gname, aname, tname):
""" Test responsiveness to user abortion of pull request. """
with mock.patch("refgenconf.refgenconf._is_large_archive", return_value=True), \
mock.patch("refgenconf.refgenconf.query_yes_no", return_value=False):
gat, archive_dict, server_url = rgc.pull_asset(gname, aname, tname)
assert gat == [gname, aname, tname]
@pytest.mark.parametrize(["gname", "aname", "tname"],
[("human_repeats", "bowtie2_index", "default"), ("mouse_chrM2x", "bwa_index", "default")])
def test_download_interruption(my_rgc, gname, aname, tname, caplog):
""" Download interruption provides appropriate warning message and halts. """
import signal
print("filepath: " + my_rgc._file_path)
def kill_download(*args, **kwargs):
os.kill(os.getpid(), signal.SIGINT)
with mock.patch(DOWNLOAD_FUNCTION, side_effect=kill_download), \
mock.patch("refgenconf.refgenconf.query_yes_no", return_value=True), \
caplog.at_level(logging.WARNING), \
pytest.raises(SystemExit):
my_rgc.pull_asset(gname, aname, tname)
records = caplog.records
assert 1 == len(records)
r = records[0]
assert "WARNING" == r.levelname
assert "The download was interrupted" in r.msg
@pytest.mark.parametrize(["gname", "aname", "tname"], [("human_repeats", "fasta", "default"), ("mouse_chrM2x", "fasta", "default")])
def test_pull_asset(my_rgc, gname, aname, tname):
with mock.patch("refgenconf.refgenconf.query_yes_no", return_value=True):
print("\nPulling; genome: {}, asset: {}, tag: {}\n".format(gname, aname, tname))
my_rgc.pull_asset(gname, aname, tname)
@pytest.mark.parametrize(["gname", "aname", "tname"],
[("rCRSd", "bowtie2_index", "default"), ("mouse_chrM2x", "bwa_index", "default")])
def test_parent_asset_mismatch(my_rgc, gname, aname, tname):
""" Test that an exception is raised when remote and local parent checksums do not match on pull"""
with mock.patch("refgenconf.refgenconf.query_yes_no", return_value=True):
my_rgc.pull_asset(gname, "fasta", tname)
my_rgc.make_writable()
my_rgc.write()
ori = my_rgc[CFG_GENOMES_KEY][gname][CFG_ASSETS_KEY]["fasta"][CFG_ASSET_TAGS_KEY][tname][CFG_ASSET_CHECKSUM_KEY]
my_rgc[CFG_GENOMES_KEY][gname][CFG_ASSETS_KEY]["fasta"][CFG_ASSET_TAGS_KEY][tname][CFG_ASSET_CHECKSUM_KEY] = "wrong"
with mock.patch("refgenconf.refgenconf.query_yes_no", return_value=True):
with pytest.raises(RemoteDigestMismatchError):
my_rgc.pull_asset(gname, aname, tname)
with my_rgc as r:
r[CFG_GENOMES_KEY][gname][CFG_ASSETS_KEY]["fasta"][CFG_ASSET_TAGS_KEY][tname][CFG_ASSET_CHECKSUM_KEY] = ori
my_rgc.make_readonly()
@pytest.mark.parametrize(["gname", "aname", "tname"], [("rCRSd", "bowtie2_index", "default"),
("mouse_chrM2x", "bwa_index", "default")])
def test_pull_asset_updates_genome_config(cfg_file, gname, aname, tname):
"""
Test that the object that was identical prior to the asset pull differs afterwards
and the pulled asset metadata has been written to the config file
"""
ori_rgc = RefGenConf(filepath=cfg_file, writable=False)
rgc = RefGenConf(filepath=cfg_file, writable=False)
remove_asset_and_file(rgc, gname, aname, tname)
remove_asset_and_file(ori_rgc, gname, aname, tname)
# ori_rgc.remove_assets(gname, aname, tname)
assert ori_rgc.to_dict() == rgc.to_dict()
with mock.patch("refgenconf.refgenconf.query_yes_no", return_value=True):
print("\nPulling; genome: {}, asset: {}, tag: {}\n".format(gname, aname, tname))
rgc.pull_asset(gname, aname, tname)
assert not ori_rgc.to_dict() == rgc.to_dict()
post_rgc = RefGenConf(filepath=cfg_file, writable=False)
assert isinstance(post_rgc.get_asset(gname, aname, tname), str)
@pytest.mark.parametrize(["gname", "aname", "tname", "state"],
[("human_repeats", "fasta", "default", True),
("mouse_chrM2x", "fasta", "default", False)])
def test_pull_asset_works_with_nonwritable_and_writable_rgc(cfg_file, gname, aname, tname, state):
rgc = RefGenConf(filepath=cfg_file, writable=state)
remove_asset_and_file(rgc, gname, aname, tname)
with mock.patch("refgenconf.refgenconf.query_yes_no", return_value=True):
print("\nPulling; genome: {}, asset: {}, tag: {}\n".format(gname, aname, tname))
rgc.pull_asset(gname, aname, tname)
|
from __future__ import absolute_import
from collections import namedtuple
Observations = namedtuple('Observations', 'x y')
|
import unittest
from katas.kyu_7.bouncy_numbers import is_bouncy
class IsBouncyTestCase(unittest.TestCase):
def test_true_1(self):
self.assertTrue(is_bouncy(101))
def test_true_2(self):
self.assertTrue(is_bouncy(120))
def test_true_3(self):
self.assertTrue(is_bouncy(2351))
def test_false_1(self):
self.assertFalse(is_bouncy(0))
def test_false_2(self):
self.assertFalse(is_bouncy(99))
def test_false_3(self):
self.assertFalse(is_bouncy(122))
def test_false_4(self):
self.assertFalse(is_bouncy(221))
def test_false_5(self):
self.assertFalse(is_bouncy(1235))
def test_false_6(self):
self.assertFalse(is_bouncy(5321))
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools import config
class document_ftp_configuration(osv.osv_memory):
_name='document.ftp.configuration'
_description = 'Auto Directory Configuration'
_inherit = 'res.config'
_rec_name = 'host'
_columns = {
'host': fields.char('Address', size=64,
help="Server address or IP and port to which users should connect to for DMS access",
required=True),
}
_defaults = {
'host': config.get('ftp_server_host', 'localhost') + ':' + config.get('ftp_server_port', '8021'),
}
def execute(self, cr, uid, ids, context=None):
conf = self.browse(cr, uid, ids[0], context=context)
data_pool = self.pool.get('ir.model.data')
# Update the action for FTP browse.
aid = data_pool._get_id(cr, uid, 'document_ftp', 'action_document_browse')
aid = data_pool.browse(cr, uid, aid, context=context).res_id
self.pool.get('ir.actions.act_url').write(cr, uid, [aid],
{'url': 'ftp://'+(conf.host or 'localhost:8021')+'/' + cr.dbname+'/'})
document_ftp_configuration()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
"""
Run the test cases for the excercise as unittests.
"""
import unittest
from solution import solve
class Test(unittest.TestCase):
def test_case_1(self):
n = 4
k = 1
ar = [3, 10, 2, 9]
b = 12
answer = 5
self.assertEqual(answer, solve(n, k, b, ar))
def test_case_2(self):
n = 4
k = 1
ar = [3, 10, 2, 9]
b = 7
answer = "Bon Appetit"
self.assertEqual(answer, solve(n, k, b, ar))
if __name__ == "__main__":
unittest.main()
|
import board
import busio
import adafruit_bd3491fs
i2c = busio.I2C(board.SCL, board.SDA)
bd3491fs = adafruit_bd3491fs.BD3491FS(i2c)
bd3491fs.active_input = adafruit_bd3491fs.Input.A
bd3491fs.input_gain = adafruit_bd3491fs.Level.LEVEL_20DB
bd3491fs.channel_1_attenuation = 0
bd3491fs.channel_2_attenuation = 0
|
from django.urls import path
from .views import chapter_form, ChapterFormView
app_name = 'warhammer'
urlpatterns = [
path('', chapter_form, name='chapter_form'),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @FileName :__init__.py.py
# @Author :Lowell
# @Time :2022/3/30 08:59
import importlib
import os
import time
import warnings
from pathlib import Path
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import LazyObject, empty
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
USE_DEPRECATED_PYTZ_DEPRECATED_MSG = (
"The USE_DEPRECATED_PYTZ setting, and support for pytz timezones is "
"deprecated in favor of the stdlib zoneinfo module. Please update your "
"code to use zoneinfo and remove the USE_DEPRECATED_PYTZ setting."
)
USE_L10N_DEPRECATED_MSG = (
"The USE_L10N setting is deprecated. Starting with Django 5.0, localized "
"formatting of data will always be enabled. For example Django will "
"display numbers and dates using the format of the current locale."
)
# CSRF Cookie会在django5.0版本移除
CSRF_COOKIE_MASKED_DEPRECATED_MSG = (
"The CSRF_COOKIE_MASKED transitional setting is deprecated. Support for "
"it will be removed in Django 5.0."
)
class LazySettings(LazyObject):
"""
Django使用DJANGO_SETTINGS_MODULE所指向的模块
"""
def _setup(self, name=None):
"""
根据环境变量DJANGO_SETTINGS_MODULE加载settings模块
"""
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if not settings_module:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE)
)
self._wrapped = Settings(settings_module)
def __repr__(self):
if self._wrapped is empty:
return "<LazySettings [Unevaluated]>"
return '<LazySettings "%(settings_module)s">' % {
"settings_module": self._wrapped.SETTINGS_MODULE,
}
def __getattr__(self, name):
"""返回setting里的值, 并缓存到self.__dict__中"""
if (_wrapped := self._wrapped) is empty:
self._setup()
_wrapped = self._wrapped
val = getattr(_wrapped, name)
# 出于性能原因, 在此处执行此操作, 以便缓存修改后的值
if name in {"MEDIA_URL", "STATIC_URL"} and val is not None:
val = self._add_script_prefix(val)
elif name == "SECRET_KEY" and not val:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
self.__dict__[name] = val
return val
def __setattr__(self, name, value):
"""
更新所有的setting配置, 或者更新单个配置变量
"""
if name == "_wrapped":
self.__dict__.clear()
else:
self.__dict__.pop(name, None)
super().__setattr__(name, value)
def __delattr__(self, name):
"""删除某个配置"""
super().__delattr__(name)
self.__dict__.pop(name, None)
@property
def configured(self):
"""判断如果settings已经配置过了"""
return self._wrapped is not empty
@staticmethod
def _add_script_prefix(value):
"""
将脚本名称前缀加到相对路径
"""
# 不要把前缀加到绝对路径
if value.startswith(("http://", "https://", "/")):
return value
from django.urls import get_script_prefix
return "%s%s" % (get_script_prefix(), value)
class Settings:
def __init__(self, settings_module):
# 加载全局settings规则
for setting in dir(global_settings):
if setting.isupper():
# 加载所有全大写的变量
setattr(self, setting, getattr(global_settings, setting))
self.SETTINGS_MODULE = settings_module
mod = importlib.import_module(self.SETTINGS_MODULE)
tuple_settings = (
"ALLOWED_HOSTS",
"INSTALLED_APPS",
"TEMPLATE_DIRS",
"LOCALE_PATHS",
"SECRET_KEY_FALLBACKS",
)
self._explicit_settings = set()
# 加载用户自定义的settings配置
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and not isinstance(
setting_value, (list, tuple)
):
raise ImproperlyConfigured(
"The %s setting must be a list or a tuple." % setting
)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
if self.USE_TZ is False and not self.is_overridden("USE_TZ"):
warnings.warn(
"The default value of USE_TZ will change from False to True "
"in Django 5.0. Set USE_TZ to False in your project settings "
"if you want to keep the current default behavior.",
category=RemovedInDjango50Warning,
)
if self.is_overridden("USE_DEPRECATED_PYTZ"):
warnings.warn(USE_DEPRECATED_PYTZ_DEPRECATED_MSG, RemovedInDjango50Warning)
if self.is_overridden("CSRF_COOKIE_MASKED"):
warnings.warn(CSRF_COOKIE_MASKED_DEPRECATED_MSG, RemovedInDjango50Warning)
if hasattr(time, "tzset") and self.TIME_ZONE:
# 如果可以就验证系统时区, 如果没有就不做任何处理
zoneinfo_root = Path("usr/share/zoneinfo")
zone_info_file = zoneinfo_root.joinpath(*self.TIME_ZONE.split("/"))
if zoneinfo_root.exists() and not zone_info_file.exists():
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# 将时区设置为环境变量 (#2315)
os.environ["TZ"] = self.TIME_ZONE
time.tzset()
if self.is_overridden("USE_L10N"):
warnings.warn(USE_L10N_DEPRECATED_MSG, RemovedInDjango50Warning)
def is_overridden(self, setting):
return setting in self._explicit_settings
def __repr__(self):
return '<%(cls)s "%(settings_module)s">' % {
"cls": self.__class__.__name__,
"settings_module": self.SETTINGS_MODULE,
}
settings = LazySettings() |
#-*- coding: utf-8 -*
#
# Copyright 2011 shuotao.me
# Copyright 2012 msx.com
# by [email protected]
# 2011-10-9
#
# Sputnik Database Object
# 提供了对*关系*数据库操作的接口,并且对cache的使用进行了封装
# sql语句拼装
# 对象关系映射(ORM),只支持Mysql
#
# ToDoList:
# 支持force index(key) 2012-3-17
# find调用时支持[x:x](limit)
# find的参数直接支持字段,find(id=x,name=x)
#
import re
import time
import datetime
import copy
from inspect import *
import SpuException
import SpuUtil as util
from sputnik import global_assert_config
from SpuLogging import *
from SpuPythonObject import *
from SpuJson import *
from SpuSQLRollback import *
from SpuDateTime import SpuDateTime
from SpuException import NotImplInterface
from SpuDB import SpuDB, DBDuplicateEntry, SpuDBManager, default_server
from SpuHook import SpuHook as Hook
from SpuDBObjectProfile import SDBProfile
from SpuDebug import *
_logging = SpuLogging(module_name = 'SpuDBObject', app_log=False)
default_rollbackQueue = None
def init_spudb(spudb):
"""
兼容旧版本
"""
SpuDBManager.add_spudb(spudb)
class SpuDBObjectConfig(object):
"""
{
'sql_optimize_debug' : True,
'sql_optimize_in_subquery' : True,
'sql_optimize_notin_subquery' : True,
'sql_optimize_count' : True
}
"""
config = None
@classmethod
def set_config(cls, config):
cls.config = config
@classmethod
def get_config(cls, c, d = None):
if not cls.config:
return d
return cls.config.get(c, d)
def get_table_name(table, table_as=''):
tablename = ''
if type(table) == str:
tablename = table
if table_as:
tablename += " as %s " % table_as
elif isinstance(table, (SpuDBObject, SpuTableScheme)):
tablename = table._table
table_as_t = table.get_table_as()
if table_as_t:
tablename += " as %s " % table_as_t
elif table_as:
tablename += " as %s " % table_as
elif hasattr(table, '_table_'):
tablename = table._table_
if table_as:
tablename += " as %s " % table_as
else:
assert None, "Unknow Table Type: %s Name: %s" % (type(table._table), table._table)
return tablename
def is_unique(field):
# 'field'
if type(field) == str:
if str == 'id':
return True
return False
# table.field
elif isinstance(field, Field):
return field.auto_inc or field.unique
# Alias('field', 'alias')
elif isinstance(field, Alias):
if field._field:
return field._field.auto_inc or field.unique
else:
if field.name == 'id':
return True
return False
def get_field_name(field):
# 'field'
if type(field) == str:
fieldname = field
# table.field
elif isinstance(field, Field) or hasattr(field, 'get_field_name'):
fieldname = field.get_field_name()
# Alias('field', 'alias')
elif isinstance(field, Alias):
fieldname = field.alias
else:
assert None, "Unknow Field Type: %s" % str(field)
return fieldname
def get_field_original_name(field):
# 'field'
if type(field) == str:
fieldname = field
# table.field
elif isinstance(field, Field):
fieldname = field.get_field_name()
# Alias('field', 'alias')
elif isinstance(field, Alias):
fieldname = field.name
else:
assert None, "Unknow Field Type: %s" % str(field)
return fieldname
def get_where_cond(where_cond):
if type(where_cond) == str:
return where_cond
where = where_cond.sql()
where_cond.clear_obj()
return where
def get_join_cond(join_cond):
if type(join_cond) == str:
return join_cond
cond = join_cond.sql(check_stack = False)
join_cond.remove_nodes_head()
return cond
def sql_join(sql_nodes):
try:
return ''.join(sql_nodes)
except Exception as m:
_logging.spu_error("Sql Join Faild Msg:%s Node:%s" % (m, sql_nodes))
return ''
def db_execsql(spudb, cache, sql, remove_cache=True):
"""
remove all sql cache on process cache
"""
r = spudb.execsql(sql)
if cache and remove_cache:
cache.remove_all(cache.ProcessCache, cache_event=False)
return r
def db_query(spudb, cache, sql, cache_expire=1):
"""
cache duplicate sql query on session, so only use process cache
no send local and global cache event
cache_expire: default 1 second
"""
if cache:
value = cache.get_value(sql, cache.ProcessCache)
if value != None:
return value
r = spudb.query(sql)
if cache:
cache.set_value(sql, r, cache_expire,
cache.ProcessCache, cache_event=False)
return r
class FieldDefaultValue(object):
def __init__(self, fields):
self._fields = fields
self._default = {}
self._init_default_table()
def _type_default(self, _type):
if _type == str or _type in Field.string_types:
return ""
elif _type == int or _type in Field.integer_types:
return 0
elif _type == float or _type in Field.float_types:
return 0.0
elif _type == datetime or _type == Field.datetime:
return '0-0-0 0:0:0'
return None
def _add_default_value(self, field):
if isinstance(field, Field):
self._default[field.name] = self._type_default(field.type)
elif isinstance(field, Alias):
self._default[field.alias] = self._type_default(field.type)
def _init_default_table(self):
if type(self._fields) in (list, tuple):
for field in self._fields:
self._add_default_value(field)
else:
self._add_default_value(self._fields)
def get_default_value(self, field, default = None):
return self._default.get(field, default)
class UnknowCond(SpuException.SpuException):
def __init__(self):
pass
def __str__(self):
return 'Unknow condition'
def __repr__(self):
return 'Unknow condition'
class CondNode(object):
_sql = (1, '%s')
_eq = (11, '%s = %s')
_ne = (12, '%s != %s')
_lt = (13, '%s < %s')
_gt = (14, '%s > %s')
_le = (15, '%s <= %s')
_ge = (16, '%s >= %s')
_in = (17, '%s in (%s)')
_not_in = (18, '%s not in (%s)')
_like = (19, '(%s like %s)')
_and = (30, '(%s and %s)')
_or = (31, '(%s or %s)')
def __init__(self):
self._nodes = []
self._objs = []
# optimized
self._close_query = False
def _add_obj(self, obj):
if type(obj) == list or type(obj) == tuple:
self._objs += obj
elif isinstance(obj, CondNode):
self._objs.append(obj)
def clear_obj(self):
for obj in self._objs:
obj._nodes = []
obj._objs = []
self._nodes = []
self._objs = []
def _get_lvalue(self, x):
if isinstance(x, SubQuery):
s = x.sql()
elif isinstance(x, SqlNode):
s = x.sqlnode_sql()
else:
s = get_field_name(x)
return s
def _get_rvalue(self, y):
if isinstance(y, SubQuery):
s = y.sql()
elif isinstance(y, SqlNode):
s = y.sqlnode_sql()
elif isinstance(y, FieldName):
s = get_field_name(y.field)
elif isinstance(y, Field):
s = self._escape(y.value)
else:
s = self._escape(y)
return s
def _escape(self, v):
t = type(v)
if t == str or t == unicode:
s = Field.escape(v)
elif isinstance(v, datetime.datetime):
s = SpuDateTime.datetime2str(v)
else:
s = v
return s
def __eq__(self, y):
"""x == y"""
self._add_obj(y)
x = self._get_lvalue(self)
s = self._get_rvalue(y)
node = (CondNode._eq, x, s)
self._nodes.append(node)
return self
def __ne__(self, y):
"""x != y"""
self._add_obj(y)
x = self._get_lvalue(self)
s = self._get_rvalue(y)
node = (CondNode._ne, x, s)
self._nodes.append(node)
return self
def __lt__(self, y):
"""x < y"""
x = self._get_lvalue(self)
s = self._get_rvalue(y)
node = (CondNode._lt, x, s)
self._nodes.append(node)
return self
def __gt__(self, y):
"""x > y"""
self._add_obj(y)
x = self._get_lvalue(self)
s = self._get_rvalue(y)
node = (CondNode._gt, x, s)
self._nodes.append(node)
return self
def __le__(self, y):
"""x <= y"""
self._add_obj(y)
x = self._get_lvalue(self)
s = self._get_rvalue(y)
node = (CondNode._le, x, s)
self._nodes.append(node)
return self
def __ge__(self, y):
"""x >= y"""
self._add_obj(y)
x = self._get_lvalue(self)
s = self._get_rvalue(y)
node = (CondNode._ge, x, s)
self._nodes.append(node)
return self
def _double_node(self, y, opcode):
self._add_obj(y)
if id(self) == id(y):
x = self.pop_node()
y = self.pop_node()
else:
x = self.pop_node()
y = y.pop_node()
node = (opcode, x, y)
self._nodes.append(node)
def ignore_none_node(self, y):
if isinstance(self, SqlNoneNode) and \
len(self._nodes) == 0:
if not isinstance(y, SqlNoneNode):
self._add_obj(y)
self._nodes.append(y.pop_node())
return True
return False
def __and__(self, y):
""" x and y"""
# ignore SqlNoneNode
if self.ignore_none_node(y):
return self
if not isinstance(y, SqlNoneNode):
self._double_node(y, Field._and)
return self
def __or__(self, y):
""" x or y"""
# ignore SqlNoneNode
if self.ignore_none_node(y):
return self
if not isinstance(y, SqlNoneNode):
self._double_node(y, Field._or)
return self
def _gen_sql(self, node):
opcode = node[0][0]
template = node[0][1]
ldata = node[1]
rdata = node[2]
if opcode < Field._eq[0]:
return template % ldata
if opcode < Field._and[0]:
return template % (ldata, rdata)
lc = self._gen_sql(ldata)
rc = self._gen_sql(rdata)
return template % (lc, rc)
def pop_node(self):
assert len(self._nodes) > 0, 'Node Stack is Empty, Please Check Cond , a Complete Bracket'
return self._nodes.pop()
def node_count(self):
return len(self._nodes)
def remove_nodes_head(self):
self._nodes.pop(0)
def sql(self, check_stack = True):
if isinstance(self, SqlNoneNode) and len(self._nodes) == 0:
return ''
if check_stack and len(self._nodes) != 1:
_logging.spu_error("[SqlGenError]Node Stack Len: %s Stack: %s" % (len(self._nodes), self._nodes))
self._nodes = []
assert 0
s = self._gen_sql(self._nodes[0])
if self._close_query:
_logging.set_class_func('CondNode', 'sql')
_logging.flowpath_db('Optimized Close Query: (%s)' % s)
return None
return s
def cond_sql(self):
sql = self.sql()
self.clear_obj()
return sql
class Field(CondNode):
""" Field Class """
# Field Type
unknow = 'UNKNOW'
none = 'NONE'
tinyint = 'TINYINT'
smallint = 'SMALLINT'
mediumint = 'MEDIUMINT'
int = 'INT'
integer = 'INTEGER'
bigint = 'BIGINT'
float = 'FLOAT'
double = 'DOUBLE'
numeric = 'NUMERIC'
date = 'DATE'
datetime = 'DATETIME'
timestamp = 'TIMESTAMP'
time = 'TIME'
year = 'YEAR'
char = 'CHAR'
varchar = 'VARCHAR'
tinyblob = 'TINYBLOB'
tinytext = 'TINYTEXT'
blob = 'BLOB'
text = 'TEXT'
mediumblob = 'MEDIUMBLOB'
mediumtext = 'MEDIUMTEXT'
longblob = 'LONGBLOB'
longtext = 'LONGTEXT'
enum = 'ENUM'
set = 'SET'
type_list = (tinyint, smallint, mediumint, int, integer, bigint, float, double,
numeric, date, datetime, timestamp, time, year, char, varchar,
tinyblob, tinytext, blob, text, mediumblob, mediumtext, longblob,
longtext, enum, set)
number_types = (tinyint, smallint, mediumint, int, integer, bigint, float, double,
numeric)
integer_types = (tinyint, smallint, mediumint, int, integer, bigint)
float_types = (float, double, numeric)
datetime_types = (date, datetime, timestamp, time, year)
string_types = (char, varchar, tinyblob, tinytext, blob, text, mediumblob,
mediumtext, longblob, longtext)
@classmethod
def field_type_list(cls):
return cls.type_list
@classmethod
def escape(cls, v):
spudb = SpuDBManager.get_spudb()
return spudb.escape(v)
def __init__(self, _type, value,
_len=0, auto_inc=False,
unique=False, primarykey=False):
CondNode.__init__(self)
self.value = value
self.len = _len
self.auto_inc = auto_inc
self.unique = unique
self.primarykey = primarykey
self.writed = False
self.name = None
self.table = None
self.table_as = ''
self.__get_field_type(_type)
def __str__(self):
return "<FieldName:%s Type:%s Table:%s As:%s Value:%s Len:%s AutoInc:%s" \
" Unique:%s>" % (
self.name,
self.type,
self.table,
self.table_as,
self.value,
self.len,
self.auto_inc,
self.unique)
def __repr__(self):
return self.__str__()
def __get_field_type(self, _type):
if _type in (str, unicode):
if self.len == 0:
t = self.text
else:
t = self.varchar
elif _type is int:
t = self.int
elif _type in (datetime, SpuDateTime):
t = self.datetime
elif _type is float:
t = self.float
elif _type is bool:
t = self.tinyint
elif _type is long:
t = self.bigint
elif type(_type) is str:
t = _type
elif _type is type(None):
t = self.none
else:
assert 0, 'Unknow Field Type: %s' % _type
self.type = t
def clone(self):
return copy.deepcopy(self)
def set_field_name(self, name):
self.name = name
def set_table(self, table):
self.table = table
def set_table_as(self, table_as):
self.table_as = table_as
def get_table_as(self):
return self.table_as
def get_table_name(self):
return self.table
def get_table_name_sql(self):
table_name = self.table
table_as = self.get_table_as()
if table_as:
return "%s as %s" % (self.table, table_as)
return self.table
def get_field_name(self):
if self.table_as:
fieldname = "%s.%s" % (self.table_as, self.name)
elif self.table:
fieldname = "%s.%s" % (self.table, self.name)
else:
fieldname = self.name
return fieldname
def set_writed(self):
self.writed = True
def no_writed(self):
self.writed = False
class FieldName(object):
def __init__(self, field):
self.field = field
FN = FieldName
class FieldLink(object):
def __init__(self):
self._join = []
def addlink(self, ffield, tfield, left = True):
"""
ffield: join table
"""
table = ffield.get_table_name_sql()
join = Join(table, tfield == FN(ffield))
if left:
join.left()
self._join.append(join)
def sql(self):
joinsql = []
for join in self._join:
joinsql.append(join.sql())
# The same foreign key join multiple tables
for join in self._join:
join.cond_clear_obj()
return sql_join(joinsql)
class FieldView(object):
CacheCondKey = 1
def __init__(self, field_view):
self._field_view = []
self._cache_condkey = None
self._cache_condkey_field_origname = None
self.parse_fieldview(field_view)
def get_field_view(self):
return self._field_view
def get_cache_condkey(self):
return self._cache_condkey
def get_cache_condkey_field_origname(self):
return self._cache_condkey_field_origname
def parse_condkey(self, field):
if type(field) == tuple:
if field[1] == self.CacheCondKey:
assert not self._cache_condkey, 'Already Cache Cond Key: %s' % field[0]
self._cache_condkey = get_field_name(field[0])
self._cache_condkey_field_origname = get_field_original_name(field[0])
else:
assert 0, 'Unknow FieldView Type'
field = field[0]
return field
def parse_fieldview(self, field_view):
field_view_new = []
cache_condkey = None
origname = None
if type(field_view) != list:
self.parse_condkey(field_view)
self._field_view = field_view
return
for field in field_view:
_field = self.parse_condkey(field)
field_view_new.append(_field)
self._field_view = field_view_new
class Select(object):
def __init__(self, table):
self._table = table
self.reset()
def reset(self):
if hasattr(self, '_pageinfo'):
self._lastone_pageinfo = self._pageinfo
else:
self._lastone_pageinfo = None
self._pageinfo = None
self._sort = None
self._groupby = None
self._join = None
self._subquery = None
self._table_list = None
self._limit = None
self._distinct = False
self._table_as = ''
def get_table_name(self):
return self._table
def set_table_name(self, table_name):
self._table = table_name
def get_pageinfo(self):
return self._pageinfo
def set_lastone_pageinfo(self, pageinfo):
self._lastone_pageinfo = pageinfo
def get_lastone_pageinfo(self):
return self._lastone_pageinfo
def pageinfo(self, pageinfo):
self.set_lastone_pageinfo(pageinfo)
self._pageinfo = pageinfo
return self
def limit(self, row_count, row_start = None):
if row_start is None:
self._limit = " limit %s" % row_count
else:
self._limit = " limit %s,%s" % (row_start, row_count)
return self
def distinct(self):
self._distinct = True
return self
def sort(self, sort):
self._sort = sort
return self
def groupby(self, groupby):
self._groupby = groupby
return self
def join(self, join):
if not self._join:
self._join = []
if hasattr(join, "sql"):
join = join.sql()
self._join.append(join)
return self
def fieldlink(self, fl):
return self.join(fl)
def sub_table(self, table, table_as=''):
if not self._table_list:
self._table_list = []
self._table_list.append((table, table_as))
return self
def table_as(self, _table_as):
self._table_as = _table_as
def get_table_as(self):
return self._table_as
def clean_table_as(self):
self.table_as('')
def subquery(self, subquery):
self._subquery = subquery
return self
def where_cond(self, where_cond):
return get_where_cond(where_cond)
def get_field_def(self, f):
# process string
if type(f) == str:
fl = f
# process field
elif isinstance(f, Field):
fl = f.get_field_name()
# process alias
elif isinstance(f, Alias):
fl = "%s as %s" % (f.name, f.alias)
# process function
elif isinstance(f, Function):
fl = f.sql()
else:
assert None, "Unknow Field Type: %s Field:%s" % (type(f), f)
return fl
def select(self,
where_cond,
table_list = None,
join_query = None,
sub_query = None,
union_query = None,
groupby_cond = None,
orderby_cond = None,
count_cond = None,
limit_cond = None,
distinct = False,
fields = []):
sql = []
sql.append("select ")
if distinct:
sql.append("distinct ")
if fields:
if type(fields) == list:
for f in fields:
fl = self.get_field_def(f)
sql.append(fl)
sql.append(", ")
sql.pop()
else:
fl = self.get_field_def(fields)
sql.append(fl)
else:
sql.append("*")
sql.append(" from ")
sql.append(self._table)
if self._table_as:
sql.append(" as %s " % self._table_as)
if table_list:
sql.append(', ')
sql.append(table_list)
if join_query:
sql.append(join_query)
if where_cond:
sql.append(" where %s" % where_cond)
if groupby_cond:
sql.append(groupby_cond)
if orderby_cond:
sql.append(orderby_cond)
if limit_cond:
sql.append(limit_cond)
return sql_join(sql)
def sql(self, where, fields=[], real=False):
where_cond = None
if where:
# has where , where_cond return None ,so it Optimized
where_cond = self.where_cond(where)
if where_cond == None:
return None
table_list = None
if self._table_list:
tables = []
for (table, table_as) in self._table_list:
tables.append(get_table_name(table, table_as))
table_list = ','.join(tables)
join_query = None
if self._join:
joins = []
for join in self._join:
if type(join) == str:
joins.append(join)
else:
joins.append(join.sql())
join_query = sql_join(joins)
orderby_cond = None
if self._sort:
if type(self._sort) == str:
orderby_cond = " " + self._sort
else:
orderby_cond = self._sort.sql()
limit_cond = None
if self._pageinfo:
if type(self._pageinfo) == str:
limit_cond = " " + self._pageinfo
else:
self._pageinfo.set_db_info(self._spudb, self._table)
self._pageinfo.eval_total_pagenumber(where,
table_list,
join_query,
where_cond,
real_eval = real)
limit_cond = self._pageinfo.sql()
elif self._limit:
limit_cond = self._limit
groupby_cond = None
if self._groupby:
if type(self._groupby) == str:
groupby_cond = " " + self._groupby
else:
groupby_cond = self._groupby.sql()
sql = self.select(
table_list = table_list,
where_cond = where_cond,
groupby_cond = groupby_cond,
orderby_cond = orderby_cond,
limit_cond = limit_cond,
join_query = join_query,
distinct = self._distinct,
fields = fields)
self.reset()
return sql
class ObjectValue(object):
def __init__(self, obj, obj_dict):
self._obj = obj
self._obj_dict = obj_dict
def __getattr__(self, name):
try:
name = self._obj._c_name(name)
value = self._obj_dict[name]
return value
except KeyError:
raise AttributeError(name)
return None
class SpuTableScheme(object):
def __init__(self, db_cls, table_as=''):
self._table_ = db_cls._table_
self._table = db_cls._table_
self._table_as = table_as
self._field_names_and_types = []
self._field_and_type_dict = {}
self._primarykey = None
obj = db_cls(None, None, False)
scheme = obj.make_table_fields()
for key in scheme.keys():
field = scheme[key]
field.set_table_as(table_as)
self.__dict__[key] = field
self._field_names_and_types.append((key, field.type))
self._field_and_type_dict[key] = field.type
obj.map_db_field(update=True)
self._primarykey = obj.get_primarykey()
self._auto_inc = obj.get_autoinc()
del scheme
del obj
def __str__(self):
s = "<%s Table Scheme:\n" % self._table_
for (key, _) in self._field_names_and_types:
s += " %s : %s\n" % (key, str(self.__dict__[key]))
s += '>'
return s
def __repr__(self):
return self.__str__()
def set_table_name(self, table_name):
self._table = table_name
self._table_ = table_name
for (key, _) in self._field_names_and_types:
self.__dict__[key].set_table(table_name)
def get_autoinc(self):
return self._auto_inc
def get_primarykey(self):
return self._primarykey
def table_as(self, _table_as):
self._table_as = _table_as
for (field_name, _) in self._field_names_and_types:
field = self.__dict__[field_name]
field.set_table_as(_table_as)
def get_table_as(self):
return self._table_as
def clean_table_as(self):
self.table_as('')
def field_names(self):
return [field_name for (field_name, _) in self._field_names_and_types]
def field_names_and_types(self):
return self._field_names_and_types
def field_and_type_dict(self):
return self._field_and_type_dict
def is_field(self, field_name):
for (_field_name, _) in self._field_names_and_types:
if _field_name == field_name:
return True
return False
class SpuDBObject(Select, SpuPythonObject):
"""
首先继承Select, SpuPythonObject中的pageinfo不会从写select中的pageinfo
cache的使用,初始化SpuDBObject对象时,如果传入spucache对象则默认开启cache功能
可以通过nouse_cache关闭.在cache开启的前提下cache_key不为None则数据使用cache
"""
short_obj = None
@classmethod
def short(cls):
from SpuDBObjectShort import SpuDBObjectShort
if not cls.short_obj:
cls.short_obj = SpuDBObjectShort(cls)
return cls.short_obj
@classmethod
def scheme(cls, table_as='', nocache=False):
if nocache:
return SpuTableScheme(cls, table_as)
if not hasattr(cls, '_table_fields'):
scheme = SpuTableScheme(cls, table_as)
cls._table_fields = scheme
return scheme
else:
return cls._table_fields
@classmethod
def table(cls, table_as='', nocache=False):
"""
compatible old version
"""
return cls.scheme(table_as=table_as, nocache=nocache)
@classmethod
def new_table(cls, table_as=''):
scheme = SpuTableScheme(cls, table_as)
cls._table_fields = scheme
return scheme
@classmethod
def object(cls, from_dict=None, server=default_server,
use_cache=True):
from SpuFactory import create_object
obj = create_object(cls, server=server, use_cache=use_cache)
obj.map_db_field()
if from_dict:
obj.from_dict(from_dict)
return obj
@classmethod
def objectlist(cls, server=default_server, use_cache=True):
from SpuFactory import create_listobject
objlist = create_listobject(SpuDBObjectList, cls, server=server,
use_cache=use_cache)
return objlist
@classmethod
def find_one(cls,
cond,
fields=[],
new_field=False,
cache_key=None,
real=False,
fieldlink=None,
join=None,
table=None,
table_as=None,
server=default_server,
use_cache=True,
contain_type=False):
"""
table : (table, table_as)
"""
from SpuFactory import create_object
obj = create_object(cls, server=server, use_cache=use_cache)
if fieldlink:
obj.fieldlink(fieldlink)
if join:
obj.join(join)
if table:
obj.table(table)
if table_as:
obj.table_as(table_as)
if not obj.find(cond, fields, new_field, cache_key, real,
contain_type=contain_type):
return None
return obj
@classmethod
def find_list(cls,
find_cond=None,
fields=None,
cache_key=None,
real=False,
new=True,
fieldlink=None,
join=None,
table=None,
table_as=None,
sort=None,
groupby=None,
pageinfo=None,
limit=None,
server=default_server,
contain_type=False):
"""
table : (table, table_as)
"""
from SpuFactory import create_listobject
objlist = create_listobject(SpuDBObjectList, cls, server=server)
if fieldlink:
objlist.fieldlink(fieldlink)
if join:
objlist.join(join)
if table:
objlist.table(table)
if table_as:
objlist.table_as(table_as)
if sort:
objlist.sort(sort)
if groupby:
objlist.groupby(groupby)
if pageinfo:
objlist.pageinfo(pageinfo)
if limit:
objlist.limit(limit)
if not objlist.find(find_cond,
fields,
cache_key,
real,
new,
contain_type=contain_type):
return None
return objlist
def __init__(self,
dbobject_or_table,
spudb,
spucache = None,
debug = False,
filter_null = True,
rollbackQueue = None):
SpuPythonObject.__init__(self)
if type(dbobject_or_table) is str:
table = dbobject_or_table
# spudbobject instance or spudbobject subclass
elif isinstance(dbobject_or_table, SpuDBObject) or (isclass(dbobject_or_table) and issubclass(dbobject_or_table, SpuDBObject)):
table = dbobject_or_table._table_
else:
assert 0, "SpuDBObject table type failed"
Select.__init__(self, table)
self._filter_null = filter_null
self._value = None
self._spudb = spudb
self._spucache = spucache
self._use_cache = spucache != None
self._table = table
self._debug = debug
self._field_attrs = {}
self._fields = []
self._auto_inc = None
self._primarykey = None
self._default_value = None
self._field_view = None
self._new_field = False
self._rollback_queue = rollbackQueue if rollbackQueue else default_rollbackQueue
def _cache(self, cache_key):
return self._use_cache and cache_key
def _is_field(self, value):
if not hasattr(value, '__class__'):
return False
return value.__class__ == Field
def _is_autoinc_field(self, value):
if not self._is_field(value):
return False
return value.auto_inc
def _is_primarykey_field(self, value):
if not self._is_field(value):
return False
return value.primarykey
def _is_field_attr_name(self, name):
if name[0:13] == "__field_attr_" and name[-2:] == '__':
return True
return False
def _get_field_attr_name(self, name):
return "__field_attr_" + name + "__"
def _get_field_name(self, field_name):
if field_name[0:13] == "__field_attr_":
return field_name[13:-2]
return None
def _c_name(self, name):
field_attrs = self.__dict__.get('_field_attrs', None)
if not field_attrs:
return name
if field_attrs.get(name, None):
return self._get_field_attr_name(name)
return name
def _get_db_fields(self):
fields = []
for key in self.__dict__.keys():
value = self.__dict__[key]
if self._is_field(value):
if not self._is_field_attr_name(key):
key = self._set_field_attr(key, value)
fields.append(key)
if self._is_autoinc_field(value):
self._auto_inc = key
if self._is_primarykey_field(value):
self._primarykey = key
if not self._value:
self._value = ObjectValue(self, self.__dict__)
return fields
def _get_fields(self):
field_names = self.db_field()
fields = []
for name in field_names:
value = self._field(name)
fields.append(value)
return fields
def _field(self, name):
"""
get Field class object
return None if name not field"""
value = self.__dict__.get(name, None)
if not value or not self._is_field(value):
return None
return value
def _field_value(self, name):
field = self._field(name)
if not field:
return None
return field.value
def _set_field_value(self, name, value, new=False):
name = self._c_name(name)
if not self.__dict__.has_key(name):
if not new and global_assert_config.get('model_field_define_assert', True):
assert None, "%s Module Not Default %s Field" % (self.__class__.__name__, name)
self.add_field(name, value)
name = self._c_name(name)
field_value = self.__dict__[name]
assert self._is_field(field_value), "%s Not Is Field Type" % name
field_value.value = value
def _set_field_attr(self, name, value):
if self.__dict__.get(name, None):
del self.__dict__[name]
value.set_field_name(name)
value.set_table(self._table)
field_name = self._get_field_attr_name(name)
self.__dict__[field_name] = value
self._field_attrs[name] = True
return field_name
def _set_field_default_value_and_type(self):
scheme = self.scheme()
field_types = scheme.field_and_type_dict()
for (field_name, field_type) in field_types.items():
default_value = self.get(field_name, '')
self._set_field_value(field_name,
(field_types.get(field_name, Field.unknow),
default_value))
def _python_object(self):
field_names = self.db_field()
fields = {}
for name in field_names:
value = self._field(name)
# no use field
if value.value == None and self._filter_null:
continue
self.setup_field_filter(value.value)
if self.is_python_object(value.value):
pvalue = value.value.python_object()
elif isinstance(value.value, datetime.datetime):
pvalue = SpuDateTime.datetime2str(value.value)
else:
pvalue = value.value
field_name = self._get_field_name(name)
fields[field_name] = pvalue
self.process_field_filter(fields)
return fields
def __setattr__(self, name, value):
return self.set_db_field_value(name, value)
def __setitem__(self, name, value):
return self.set_db_field_value(name, value)
def __getattr__(self, name):
return self.get_db_field_value(name)
def __getitem__(self, name):
return self.get_db_field_value(name)
def __str__(self):
table = get_table_name(self)
if not table:
table = 'Unknow'
field_dict = self.python_object()
string = json_string(field_dict, format = True)
return "<SpuDBObject\n DBServer:%s\n Table:%s Detail:%s>" % (
self._spudb,
table,
util.to_string(string))
def __repr__(self):
return "<SpuDBObject>"
def __add__(self, obj):
new_obj = SpuDBObject(self._table, self._spudb, self._spucache, self._debug)
new_obj.append(self)
new_obj.append(obj)
return new_obj
def __iadd__(self, obj):
self.append(obj)
return self
def set_db_field_value(self, name, value):
name = self._c_name(name)
# new attribute
if not self.__dict__.has_key(name):
self.__dict__[name] = value
else:
filed_value = self.__dict__[name]
# write field
if self._is_field(filed_value) and not self._is_field(value):
filed_value.value = value
filed_value.set_writed()
else:
self.__dict__[name] = value
def get(self, name, default):
name = self._c_name(name)
value = self.__dict__.get(name, default)
if default is value:
return default
if self._is_field(value):
return value.value
else:
return value
def get_db_field_value(self, name):
try:
name = self._c_name(name)
value = self.__dict__[name]
if self._is_field(value):
return value.value
else:
return value
except KeyError:
raise AttributeError(name)
return None
def clone(self):
object = SpuDBObject(self._table,
self._spudb,
self._spucache,
self._debug,
self._filter_null)
return object
@property
def values(self):
return self._value
def append(self, obj):
field_names = obj.db_field()
for name in field_names:
value = obj._field(name)
if value.value == None:
continue
name = obj._get_field_name(name)
self.add_field(name, value)
return self
def make_table_fields(self):
table_fields = {}
for key in self.__dict__.keys():
value = self.__dict__[key]
if self._is_field(value):
value.set_field_name(key)
value.set_table(self._table)
table_fields[key] = value
return table_fields
def use_cache(self):
self._use_cache = True
def nouse_cache(self):
self._use_cache = False
def get_field_view_object(self):
return self._field_view
def field_view(self, field_view):
self._field_view = FieldView(field_view)
def get_field_view(self):
if self._field_view:
return self._field_view.get_field_view()
return None
def set_field_default(self, default):
self._default_value = default
def add_field(self, name, value):
if isinstance(value, Field):
field = value
else:
field = Field(type(value), value)
field_name = self._set_field_attr(name, field)
self._fields.append(field_name)
def clear_all_field(self):
fields = self._get_fields()
for field in fields:
field.value = None
def map_db_field(self, update=False):
if not self._fields or update:
self._fields = self._get_db_fields()
def from_db(self, row, new=False, contain_type=False):
# add new field into _fields
# _fields is not empty, db_field not execute
# so, execute map_db_field init _fields
if new:
self.map_db_field()
field_types = None
if contain_type:
scheme = self.scheme()
field_types = scheme.field_and_type_dict()
for f in row.keys():
field = f
value = row[f]
if value == None:
value = self._default_value.get_default_value(field)
if contain_type:
value = (field_types.get(field, Field.unknow), value)
self._set_field_value(field, value, new)
def db_field(self):
"""
return original field name
"""
self.map_db_field()
return self._fields
def object_field_names(self):
"""
return current object all field, contain call add_field
"""
obj_fields = self.make_table_fields()
fields = []
for key in obj_fields.keys():
fields.append(self._get_field_name(key))
return fields
def db_writed_field_and_value(self):
fields = self.db_field()
writed_fields = []
for field in fields:
value = self._field(field)
if value and value.writed:
writed_fields.append((field, value))
return writed_fields
def execsql(self, sql):
"""exec raw sql, update, insert, delete"""
return db_execsql(self._spudb, self._spucache, sql)
def query(self, sql):
"""query database by raw sql"""
return db_query(self._spudb, self._spucache, sql)
def get_autoinc(self):
"""
return autoinc filed name
"""
if self._auto_inc:
return self._get_field_name(self._auto_inc)
return None
def get_primarykey(self):
"""
return primarykey filed name
"""
if self._primarykey:
return self._get_field_name(self._primarykey)
return None
def count(self, cond):
"""return count if find result > 0, other return None
"""
field = Alias('*', 'count')
if self.find(cond, fields=FuncCount(field), new_field=True):
return self['count']
return None
def from_dict(self, _dict):
for key, value in _dict.items():
self[key] = value
def sub_table(self, table, table_as=''):
self._new_field = True
return super(SpuDBObject, self).sub_table(table, table_as=table_as)
def find(self, cond, fields=[], new_field=False,
cache_key=None, real=False, contain_type=False):
""" return True if find result > 0, other return False"""
# first use class object define
if self._new_field:
new_field = self._new_field
self._new_field = False
if fields:
self.field_view(fields)
fields = self.get_field_view()
self._default_value = FieldDefaultValue(fields)
sql = self.sql(where=cond, fields=fields, real=real)
if sql == None:
return False
r = self.query(sql)
if not r:
if contain_type:
self._set_field_default_value_and_type()
return False
r = r[0]
# if return False, access object field is default value, not is None
self.clear_all_field()
self.from_db(r, new=new_field, contain_type=contain_type)
return True
def gen_insert_sql(self, autoinc=True, ignore=False):
fields = self.db_field()
if not fields:
return (None, None)
sql = []
values = []
reset = []
ignore_sql = ' ignore ' if ignore else ''
sql.append("insert %s into " % ignore_sql)
sql.append(self._table)
sql_fields = []
for f in fields:
field = self._field(f)
if autoinc and self._is_autoinc_field(field):
continue
reset.append(field)
value = self._field_value(f)
sql_fields.append(self._get_field_name(f))
values.append(value)
sql.append("(%s) " % ','.join(sql_fields))
sql.append("values (%s)" % ','.join(self._spudb.escape(values)))
sql = sql_join(sql)
return (sql, reset)
def insert(self, autoinc=True, rollback=False, ignore=False):
"""
return value:
-1 no fields
-2 Duplicate entry
other number last id
"""
(sql, reset) = self.gen_insert_sql(autoinc, ignore=ignore)
if sql is None:
return -1
try:
lastid = self.execsql(sql)
except DBDuplicateEntry:
return -2
if rollback and self._rollback_queue:
self._rollback_queue.add_rollback_point(SpuInsertRollbackPoint(self, lastid))
if self._auto_inc:
idvalue = self._field(self._auto_inc)
assert idvalue, "No Auto Inc Field"
idvalue.value = lastid
# reset
for v in reset:
v.no_writed()
sql = None
values = None
reset = None
return lastid
def _get_default_cond(self, cond):
if type(cond) == str and cond == "":
if self._primarykey:
default_key = self._primarykey
else:
default_key = self._auto_inc
assert default_key, "Not Setting primarykey or auto_inc, Please Check " \
"%s Define" % self
cond_value = self._field(default_key)
assert cond_value, ("default_key: %s Value Is None" %
self._get_field_name(default_key))
cond = "%s = %s" % (self._get_field_name(default_key), cond_value.value)
cond = get_where_cond(cond)
return cond
def gen_update_sql(self, cond):
cond = self._get_default_cond(cond)
fields = self.db_writed_field_and_value()
if not fields:
return (None, None)
sql = []
sql.append("update ")
sql.append(self._table)
sql.append(" set ")
for f in fields:
sql.append("%s = %s" % ((self._get_field_name(f[0]), self._spudb.escape(f[1].value))))
sql.append(", ")
sql.pop()
sql.append(" where %s" % cond)
sql = sql_join(sql)
return (sql, fields)
def update(self, cond = "", rollback = False):
""" update self(auto_inc field) if not cond"""
if rollback and self._rollback_queue:
self._rollback_queue.add_rollback_point(SpuUpdateRollbackPoint(self, cond))
(sql, reset) = self.gen_update_sql(cond)
if not sql:
return
self.execsql(sql)
# reset
for n, v in reset:
v.no_writed()
sql = None
def gen_delete_sql(self, cond):
cond = self._get_default_cond(cond)
sql = []
sql.append("delete from ")
sql.append(self._table)
sql.append(" where %s" % cond)
sql = sql_join(sql)
return sql
def delete(self, cond = "", rollback = False):
"""delete self(auto_inc field) if not cond"""
if rollback and self._rollback_queue:
self._rollback_queue.add_rollback_point(SpuDeleteRollbackPoint(self, cond))
sql = self.gen_delete_sql(cond)
self.execsql(sql)
self.clear_all_field()
sql = None
def set_db(self, spudb):
self._spudb = spudb
def db(self):
return self._spudb
def set_cache(self, spucache):
self._spucache = spucache
def cache(self):
return self._spucache
class Function(object):
def __init__(self, field, alias = None):
self._field = field
self._field_attrs = None
self._field_name = get_field_original_name(field)
if isinstance(field, Alias):
assert not alias, 'Function field alias is duplicate'
self._field_alias = field.alias
if alias:
self._field_alias = alias
def field_name(self):
return self._field_name
def _sql(self):
raise NotImplInterface(self.__class__, '_sql')
def sql(self):
s = self._sql()
if hasattr(self, '_field_alias') and self._field_alias:
s = "%s as %s" % (s, self._field_alias)
return s
class FuncCount(Function):
def _sql(self):
return " count(%s)" % self.field_name()
class FuncSum(Function):
def _sql(self):
return " sum(%s)" % self.field_name()
class FuncMax(Function):
def _sql(self):
return " max(%s)" % self.field_name()
class FuncMin(Function):
def _sql(self):
return " min(%s)" % self.field_name()
class Alias(CondNode):
def __init__(self, name, alias):
CondNode.__init__(self)
self._field = None
if isinstance(name, Field):
self._field = name
self.name = name.get_field_name()
self.type = name.type
else:
self.name = name
self.type = None
self.alias = alias
class SqlNode(CondNode):
def __init__(self, sql):
CondNode.__init__(self)
self._sql = sql
node = (CondNode._sql, sql, None)
self._nodes.append(node)
def sqlnode_sql(self):
return self._sql
RawSql = SqlNode
class SqlNoneNode(CondNode):
def __init__(self):
CondNode.__init__(self)
SqlNone = SqlNoneNode
class PageInfo(object):
def __init__(self, pagenumber, pagecount = 10, debug = None, countcache = None):
self.current_pagenumber = pagenumber
self.pagenumber = pagenumber
self.pagecount = pagecount
self._debug = debug
self.total_pagenumber = None
self.total_record = None
self.page_start = None
self._spudb = None
self._table = None
self._sql_table_list = None
self._sql_join = None
self._sql_cond = None
self._count_cache = countcache
def set_current_pagenumber(self, pagenumber):
self.current_pagenumber = pagenumber
def count_cache(self, count_cache):
self._count_cache = count_cache
def set_db_info(self, spudb, table):
self._spudb = spudb
self._table = table
def fixed_pagetotal(self, total):
self.total_pagenumber = total
def get_total_pagenumber(self):
self.total_pagenumber = self.total_record / self.pagecount + (1 if self.total_record % self.pagecount else 0)
return self.total_pagenumber
def count_sql(self):
sql = "select count(*) from %s" % self._table
if self._sql_table_list:
sql += self._sql_table_list
if self._sql_join:
sql += self._sql_join
if self._sql_cond:
sql += " where %s" % self._sql_cond
return sql
def get_record_count(self, where):
_logging.set_class_func('PageInfo', 'get_record_count')
optimize_count = SpuDBObjectConfig.get_config('sql_optimize_count', True)
sql = self.count_sql()
if optimize_count:
c = SpuSqlOptimize.optimize_count(where)
if c != None:
_logging.flowpath_db('Optimized Count Query: (%s) result: %s' % (sql, c))
return c
count = None
if self._count_cache:
count = self._count_cache.get_count(sql)
if count == None:
_logging.flowpath_db('Count Cache Miss')
r = self._spudb.query(sql)
if r:
count = r[0].values()[0]
self._count_cache.set_count(sql, count)
else:
r = self._spudb.query(sql)
if r:
count = r[0].values()[0]
return count
def eval_total_pagenumber(self, where = None,
table_list = None,
join = None,
cond = None,
real_eval = False):
if self.total_pagenumber != None and not real_eval:
return self.total_pagenumber
assert self._spudb, "spudb is None, use only pageinfo on SpuDBObjectList. please check whether the object is SpuDBObjectList"
self._sql_table_list = table_list
self._sql_join = join
self._sql_cond = cond
count = self.get_record_count(where)
if not count:
self.total_pagenumber = 0
self.total_record = 0
self.page_start = 0
return
total_record = count
self.total_record = total_record
return self.get_total_pagenumber()
def eval_page_start(self):
page = self.pagenumber
if page <= 0:
page = 1
page -= 1
self.page_start = page * self.pagecount
return self.page_start
def sql(self):
start = self.eval_page_start()
count = self.pagecount
return ' limit %s,%s' % (start, count)
class SubQuery(CondNode, Select):
def __init__(self, table):
CondNode.__init__(self)
Select.__init__(self, table)
self._sql = ''
if type(table) != str:
if hasattr(table, '_table'):
table = table._table
elif hasattr(table, '_table_'):
table = table._table_
else:
assert None, "Error Table"
self._table = table
self._where = None
self._fields = None
def find(self, where=None, fields=None):
if where:
self._where = get_where_cond(where)
if fields:
self._fields = fields
return self
def subquery(self, subquery):
assert None, 'SubQuery not to contain SubQuery'
def sql(self):
return '(' + Select.sql(self, self._where,
fields = self._fields) + ')'
class Join(object):
"""in cond, Field Type is get value, FN(FieldName) is get FieldName """
def __init__(self, table, cond, table_as=''):
self._table = table
self._cond = cond
self._type = 0
self._table_as = table_as
def _get_join_type(self):
if self._type == 1:
return 'left'
elif self._type == 2:
return 'right'
elif self._type == 3:
return 'full'
else:
return ''
def left(self):
self._type = 1
return self
def right(self):
self._type = 2
return self
def full(self):
self._type = 3
return self
def sql(self):
s = [' ', self._get_join_type(), ' join (']
s.append(get_table_name(self._table, self._table_as))
s.append(')')
s.append(' on (')
s.append(get_join_cond(self._cond))
s.append(')')
return sql_join(s)
def cond_clear_obj(self):
self._cond.clear_obj()
class Table(object):
def __init__(self, table, table_as):
self._table = table
self._table_as = table_as
def sql(self):
tablename = get_table_name(self._table, self._table_as)
return ", %s" % tablename
class FieldCondList(object):
def add_field(self, expression, fields):
cond = [expression]
if type(fields) == list:
for f in fields:
cond.append(get_field_name(f))
cond.append(',')
cond.pop()
else:
cond.append(get_field_name(fields))
return sql_join(cond)
class Sort(FieldCondList):
desc = 'desc'
asc = 'asc'
def __init__(self, fields):
self._fields = fields
def field_and_type(self, ft):
field = ft[0]
sort_type = ft[1]
if isinstance(field, Field) or hasattr(field, 'get_field_name'):
field = field.get_field_name()
elif isinstance(field, Alias):
field = field.alias
elif isinstance(field, Function):
field = field.sql()
else:
assert type(field) == str, "Unknow Field Type: %s" % str(field)
return "%s %s" % (field, sort_type)
def sql(self):
cond = [' order by ']
# ('field', desc)
if type(self._fields) == tuple:
cond.append(self.field_and_type(self._fields))
# ['field', table.field, Alias, ('field', desc)]
elif type(self._fields) == list:
for f in self._fields:
if type(f) == tuple:
cond.append(self.field_and_type(f))
else:
cond.append(get_field_name(self._fields))
cond.append(',')
cond.pop()
else:
cond.append(get_field_name(self._fields))
sql = sql_join(cond)
return sql
class Like(CondNode):
def __init__(self, field, pattern):
"""field like 'pattern'"""
CondNode.__init__(self)
fieldname = get_field_name(field)
node = (CondNode._like, fieldname, self._escape(pattern))
self._nodes.append(node)
class FuzzyLike(CondNode):
def __init__(self, field, pattern):
"""field like '%pattern%'"""
CondNode.__init__(self)
fieldname = get_field_name(field)
pattern = "%%%s%%" % pattern
node = (CondNode._like, fieldname, self._escape(pattern))
self._nodes.append(node)
class In(CondNode):
def __init__(self, field, set, optimize_subquery = None):
"""in (field, 2, 'value')"""
self._field = field
self._set = set
self._optimize = False
CondNode.__init__(self)
if optimize_subquery == None:
optimize_subquery = SpuDBObjectConfig.get_config('sql_optimize_in_subquery', True)
fieldname = get_field_name(field)
if isinstance(set, SubQuery):
if optimize_subquery:
self._set = SpuSqlOptimize.optimize_in_subquery(self, set)
self._optimize = True
if self._set:
s = ','.join(self._set)
else:
s = None
else:
s = set.sql()
else:
s = self.value_list(set)
node = (CondNode._in, fieldname, s)
self._nodes.append(node)
def value_list(self, set):
vlist = []
for value in set:
if type(value) == str:
vlist.append(Field.escape(value))
elif isinstance(value, Field):
vlist.append(str(value.value))
else:
vlist.append(str(value))
vlist.append(', ')
if vlist:
vlist.pop()
return sql_join(vlist)
class NotIn(In):
def __init__(self, field, set):
"""not in (field, 2, 'value')"""
CondNode.__init__(self)
fieldname = get_field_name(field)
if isinstance(set, SubQuery):
s = set.sql()
else:
s = self.value_list(set)
node = (CondNode._not_in, fieldname, s)
self._nodes.append(node)
class GroupBy(FieldCondList):
def __init__(self, fields):
self._fields = fields
def sql(self):
return self.add_field(' group by ', self._fields)
class SpuDBObjectList(SpuPythonObjectList, Select):
def __init__(self, dbobject_or_table, spudb, spucache = None, debug = False, filter_null = True):
SpuPythonObjectList.__init__(self)
table = None
DBObject = None
if type(dbobject_or_table) is str:
table = dbobject_or_table
DBObject = None
# spudbobject instance or spudbobject subclass
elif isinstance(dbobject_or_table, SpuDBObject) or (isclass(dbobject_or_table) and issubclass(dbobject_or_table, SpuDBObject)):
table = dbobject_or_table._table_
DBObject = dbobject_or_table
else:
assert 0, "SpuDBObjectList table type failed"
Select.__init__(self, table)
self._filter_null = filter_null
self._table = table
self._spudb = spudb
self._spucache = spucache
self._use_cache = spucache != None
self._debug = debug
self._objlist = []
self._field_view = None
self._dbobject = DBObject
def _cache(self, cache_key):
return self._use_cache and cache_key
def __iter__(self):
return self._objlist.__iter__()
def __str__(self):
string = json_string(self._objlist, format = True)
return "<SpuDBObjectList Table:%s Detail:%s>" % (self._table, string)
def __repr__(self):
objlist = self._objlist if hasattr(self, '_objlist') else []
string = json_string(objlist, format = True)
return "<SpuDBObjectList Table:%s Detail:%s>" % (self._table, string)
def __add__(self, objlist):
new_obj = SpuDBObjectList(self._spudb, self._spucache, self._debug)
for obj in self._objlist:
new_obj.append(obj)
if isinstance(objlist, (list, SpuDBObjectList)):
for obj in objlist:
new_obj.append(obj)
else:
new_obj.append(objlist)
return new_obj
def __iadd__(self, objlist):
if isinstance(objlist, (list, SpuDBObjectList)):
for obj in objlist:
self.append(obj)
else:
self.append(objlist)
return self
def __len__(self):
return len(self._objlist)
def __getslice__(self, index1, index2):
new_obj = SpuDBObjectList(self._spudb, self._spucache, self._debug)
objlist = self._objlist[index1:index2]
for obj in objlist:
new_obj.append(obj)
return new_obj
def __delitem__(self, index):
if type(index) != int:
raise TypeError('index type not int')
del self._objlist[index]
def __getitem__(self, index):
return self._objlist[index]
def remove(self, value):
self._objlist.remove(value)
def get(self, key, default):
return default
def clone(self):
object_list = SpuDBObjectList(self._table,
self._spudb,
self._spucache,
self._debug,
self._filter_null)
return object_list
def append(self, obj):
self._objlist.append(obj)
def insert(self, idx, obj):
self._objlist.insert(idx, obj)
def clear(self):
self._objlist = []
def get_field_view_object(self):
return self._field_view
def field_view(self, field_view):
self._field_view = FieldView(field_view)
def get_field_view(self):
if self._field_view:
return self._field_view.get_field_view()
return None
def get_pythonobject_list(self):
return self._objlist
def _find(self,
find_cond,
fields,
cache_key,
real,
new,
contain_type):
"""
sort_type: 'desc' or 'asc', default asc
return result total
"""
if new:
self._objlist = []
field_default = FieldDefaultValue(fields)
sql = self.sql(find_cond, fields, real = real)
if sql == None:
return 0
r = db_query(self._spudb, self._spucache, sql)
if not r:
return 0
for item in r:
# no use cache
if self._dbobject:
obj = self._dbobject(self._spudb, self._spucache, self._debug)
else:
obj = SpuDBObject(self._table, self._spudb, None, self._debug,
filter_null = self._filter_null)
obj.clear_all_field()
obj.map_db_field()
obj.set_field_default(field_default)
obj.from_db(item, new=True, contain_type=contain_type)
self._objlist.append(obj)
return len(r)
def find(self,
find_cond=None,
fields=None,
cache_key=None,
real=False,
new=True,
contain_type=False):
"""
return result count
"""
if fields:
self.field_view(fields)
fields = self.get_field_view()
r = self._find(find_cond, fields, cache_key,
real, new, contain_type=contain_type)
return r
def update(self, field_value_dict, cond=""):
"""
batch SpuDBObject update
"""
for dbobj in self._objlist:
if isinstance(dbobj, SpuDBObject):
for field in field_value_dict:
dbobj.set_db_field_value(field, field_value_dict[field])
dbobj.update(cond=cond)
class SpuSqlOptimize(object):
@classmethod
def optimize_in_subquery(cls, object, subquery):
"""return [x,x,x] return None where Not Optimize"""
spudb = SpuDBManager.get_spudb()
sql = subquery.sql()
r = spudb.query(sql)
if not r:
object._close_query = True
return None
set = []
for record in r:
assert len(record.keys()) == 1, 'Optimize Error Record Key Not is 1'
set.append(str(record.items()[0][1]))
return set
@classmethod
def optimize_count(cls, where):
"""return count number, return None where Not Optimize"""
# count cond is in and
# set is python list and field is primary
if isinstance(where, In) and type(where._set) == list:
if is_unique(where._field):
return len(where._set)
return None
|
from ..utils import Object
class UpdateTermsOfService(Object):
"""
New terms of service must be accepted by the user. If the terms of service are declined, then the deleteAccount method should be called with the reason "Decline ToS update"
Attributes:
ID (:obj:`str`): ``UpdateTermsOfService``
Args:
terms_of_service_id (:obj:`str`):
Identifier of the terms of service
terms_of_service (:class:`telegram.api.types.termsOfService`):
The new terms of service
Returns:
Update
Raises:
:class:`telegram.Error`
"""
ID = "updateTermsOfService"
def __init__(self, terms_of_service_id, terms_of_service, **kwargs):
self.terms_of_service_id = terms_of_service_id # str
self.terms_of_service = terms_of_service # TermsOfService
@staticmethod
def read(q: dict, *args) -> "UpdateTermsOfService":
terms_of_service_id = q.get('terms_of_service_id')
terms_of_service = Object.read(q.get('terms_of_service'))
return UpdateTermsOfService(terms_of_service_id, terms_of_service)
|
import PIL
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
# font = ImageFont.truetype("Arial-Bold.ttf",14)
font = ImageFont.truetype("Arial.ttf",14)
img=Image.new("RGBA", (500,250),(255,255,255))
draw = ImageDraw.Draw(img)
draw.text((0, 0),"This is a test",(0,0,0),font=font)
draw = ImageDraw.Draw(img)
img.save("a_test.png")
|
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
# return sorted(t) == sorted(s)
# return Counter(t) == Counter(s)
if len(s) != len(t):
return False
a, b = {}, {}
for i in range(len(s)):
a[s[i]] = a.get(s[i], 0) + 1
b[t[i]] = b.get(t[i], 0) + 1
return a == b
# hashmap存储字母和频率 |
# MIT License
#
# (C) Copyright [2020] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
""" Nested Dict class """
from collections.abc import Mapping
from copy import deepcopy
def deepupdate(self, other, shallow=False):
"""Recursivley updates `self` with items from `other`.
By default, values from `other` will be deepcopy-ed into `self`. If
`shallow=True`, values will simply be assigned, resulting in a "shallow"
copy.
"""
# pylint: disable=invalid-name
for k, v in other.items():
# Cases: (self.get(k), v) is
# * (Mapping, Mapping) -> deepupdate(self[k], v)
# * (Mapping, not Mapping) -> self[k] = v
# * (not Mapping, Mapping) -> self[k] = v
# * (not Mapping, not Mapping) -> self[k] = v
self_k = self.get(k)
if isinstance(self_k, Mapping) and isinstance(v, Mapping):
deepupdate(self_k, v)
else:
self[k] = v if shallow else deepcopy(v)
return self
class NestedDict(dict):
"""dict object that allows for period separated gets:
a_config.get('some.key', default) ==
a_config.get('some', {}).get('key', default)
given:
a_config == {"some": {"key": "some value"}}"""
def __repr__(self):
dictrepr = dict.__repr__(self)
return f"{type(self).__name__}({dictrepr})"
@classmethod
def to_yaml(cls, representer, node):
""" How to represent for yaml lib """
return representer.represent_dict(dict(node))
def set_deep(self, key, value, update=False):
""" Deep set a value. \n
Ex: `d.set_deep('a.b.c', 'foo')` is the same as: \n
`d.setdefault('a', {}).setdefault('b', {})['c'] = 'foo'`
"""
setter = self
keys = key.split('.')
last = keys.pop()
# pylint: disable=invalid-name
for k in keys:
setter = setter.setdefault(k, {})
if update and last in setter:
deepupdate(setter[last], value)
else:
setter[last] = deepcopy(value)
def get(self, key, default=None):
""" Deep get a value. \n
E: `d.get('a.b.c', 'bar')` is the same as: \n
`d.get('a', {}).get('b', {}).get('c', 'bar')`
"""
keys = key.split('.')
found = {}
# pylint: disable=invalid-name
for k, v in self.items():
found[k] = v
for k in keys:
if not isinstance(found, dict):
return default
found = found.get(k)
if found is None:
return default
return found
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import subprocess
import linecache
import click
# Number of problems present in problems.txt
TOTAL_PROBLEMS = 202
def get_filename(problem):
"""Returns filename in the form `001.py`"""
return '{:03d}.py'.format(problem)
def get_solution(problem):
"""Returns the solution to a given problem"""
solutionsFile = os.path.join(os.path.dirname(__file__), 'solutions.txt')
line = linecache.getline(solutionsFile, problem)
return line.split(".", 1)[1][1:-1] # strip space following "." and newline
def verify_answer(problem):
filename = get_filename(problem)
if not os.path.isfile(filename):
click.secho('Error: "{}" not found.'.format(filename), fg='red')
sys.exit(1)
click.echo('Checking "{}" against solution: '.format(filename), nl=False)
try:
cmd = 'python {}'.format(filename)
output = subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError:
click.secho('Error calling "{}".'.format(filename), fg='red')
sys.exit(1)
# Strip newline from end of output if output is not a lone newline.
# This behaviour is favourable to stripping all whitespace with strip()
# as stripping all newlines from the output may inhib debugging done by
# the user (if they were to use multiple print statements in their code
# while in the process of atempting to solve the problem).
try:
if output[-1] == '\n':
output = output[:-1]
except IndexError:
output = "[no output]"
isCorrect = output == get_solution(problem)
click.secho(output, bold=True, fg=('green' if isCorrect else 'red'))
return isCorrect
def get_problem(problem):
problemsFile = os.path.join(os.path.dirname(__file__), 'problems.txt')
problemLines = []
with open(problemsFile, 'r') as file:
isProblemText = False
sequentialBreaks = 0
for line in file:
if line == 'Problem {}\n'.format(problem):
isProblemText = True
if isProblemText:
if line == '\n':
sequentialBreaks += 1
else:
sequentialBreaks = 0
if sequentialBreaks >= 2:
break
else:
problemLines.append(line[:-1])
return '\n'.join(problemLines[3:])
def generate_file(problem, default=True):
click.confirm("Generate file for problem #{}?".format(problem), default=default, abort=True)
problemText = get_problem(problem)
filename = get_filename(problem)
if os.path.isfile(filename):
click.secho('"{}" already exists. Overwrite?'.format(filename), fg='red', nl=False)
click.confirm('', abort=True)
problemHeader = 'Project Euler Problem #{}\n'.format(problem)
problemHeader += '=' * len(problemHeader) + '\n\n'
with open(filename, 'w') as file:
file.write('"""\n')
file.write(problemHeader)
file.write(problemText)
file.write('"""\n\n\n')
click.echo('Successfully created "{}".'.format(filename))
def generate_first_problem():
click.echo("No Project Euler files found in the current directory.")
generate_file(1)
sys.exit()
def view_solution(problem):
click.confirm("View answer to problem #{}?".format(problem), abort=True)
click.echo("The answer to problem #{} is ".format(problem), nl=False)
click.secho(get_solution(problem), bold=True, nl=False)
click.echo(".")
def preview_problem(problem):
click.secho("Project Euler Problem #{}".format(problem), bold=True)
click.echo(get_problem(problem)[:-1]) # strip trailing newline
def determine_largest_problem():
for problem in reversed(xrange(1, TOTAL_PROBLEMS + 1)):
if os.path.isfile(get_filename(problem)):
return problem
else:
return False
help = {
'cheat': "View the answer to a problem.",
'generate': "Generates Python file for a problem.",
'skip': "Generates Python file for the next problem.",
'preview': "Prints the text of a problem.",
'verify': "Verifies the solution to a problem.",
}
@click.command(name='EulerPy')
@click.argument('problem', default=0, type=click.IntRange(0, TOTAL_PROBLEMS))
@click.option('--cheat', '-c', 'option', flag_value='cheat', help=help['cheat'])
@click.option('--generate', '-g', 'option', flag_value='generate', help=help['generate'])
@click.option('--skip', '-s', 'option', flag_value='skip', help=help['skip'])
@click.option('--preview', '-p', 'option', flag_value='preview', help=help['preview'])
@click.option('--verify', '-v', 'option', flag_value='verify', help=help['verify'])
def main(option, problem):
"""Python tool to streamline Project Euler."""
# No option given
if option is None:
if problem == 0:
problem = determine_largest_problem()
# No Project Euler files in current directory
if not problem:
generate_first_problem()
# If correct answer was given, generate next problem file
if verify_answer(problem):
generate_file(problem + 1)
else:
if os.path.isfile(get_filename(problem)):
verify_answer(problem)
else:
generate_file(problem)
else:
# Handle options that ignore a problem argument
if option == 'skip':
problem = determine_largest_problem()
click.echo("Current problem is problem #{}.".format(problem))
generate_file(problem + 1, default=False)
# Handle other options
else:
if problem == 0:
problem = determine_largest_problem()
if not problem:
if option == 'preview':
problem = 1
else:
generate_first_problem()
funcs = {
'cheat': view_solution,
'generate': generate_file,
'preview': preview_problem,
'verify': verify_answer,
}
# Execute function
funcs[option](problem)
sys.exit()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 21:55:18 2020
@author: admangli
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dataset = pd.read_csv('Mall_Customers.csv')
print(dataset.head())
X = dataset.iloc[:, 3:].values
#%%
# Convert categorical variables
from sklearn.cluster import KMeans
WCSS = []
for i in range(1, 11):
clusterer = KMeans(n_clusters = i, init = 'k-means++', max_iter = 300, n_init = 100, verbose = 1)
clusterer.fit(X)
WCSS.append(clusterer.inertia_)
#%%
plt.plot(range(1, 11), WCSS)
plt.title('WCSS Elbow curve')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS / Inertia')
plt.show()
#%%
# Apply K means to whole dataset to see the clusters
clusterer = KMeans(n_clusters = 5)
y_kmeans = clusterer.fit_predict(X)
#%% Visualizing the clusters
plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, color = 'red', label = 'High Rollers')
plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, color = 'blue', label = 'Standard')
plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 100, color = 'green', label = 'Careless')
plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 100, color = 'yellow', label = 'Careful')
plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 100, color = 'black', label = 'Sensible')
plt.scatter(clusterer.cluster_centers_[:, 0], clusterer.cluster_centers_[:, 1], s = 200, color = 'magenta', label = 'Centroids')
plt.title('Clusters generated')
plt.xlabel('Annual Income')
plt.ylabel('Spending Score')
plt.legend()
plt.show() |
from seperation import seperate
#vriski to pinaka A and C, pairnei to arxeio kai to analyei se grammes
#stelnei tin kathe seira sto seperation kai auto to gyrnaei se pinaka X
#analogos pou vriskomaste p.x. stin grammi 1 oi pinakes C=X
#gia ton pinaka A , stelnoyme kathe grammi sto seperation kai otan gyrnaei pisw kanoume to exis
#A[grammi]=X kai synthetoume etsi ton pinaka A
def find_arrays_A_C(filename,number_of_rows,number_of_variables,error_counter):
#filename="LP02.LTX"
counter_of_line=0
#number_of_variables=4#tha pernaei ws orisma apo to kyrio programma
#number_of_rows=4#tha pernaei ws orisma to kyrio programma
C=[0]*number_of_variables#dimiourgw ton pinaka C
A=[[0]*number_of_variables]*number_of_rows#dimiourgw ton pinaka A
X=[0]*number_of_variables#tha xrisimopoiithis wste na gemisoume ton A
with open(filename, "r") as f:
data = f.readlines()
for line in data:
words = line.split()
counter_of_line=counter_of_line+1
counter_of_sentece=0 #midenizw tin stili kathe fora poy allazw grammi, giati xekinaw apo tin arxi
if('end' in words):
break
if(counter_of_line==1):
if("max" in words):
words.remove("max")
elif("min" in words):
words.remove("min")
#print(words)
error_counter,C=seperate(words,number_of_variables,counter_of_line,error_counter)
#print(C)
#print(seperate(words))
if(counter_of_line==2):
#print(words)
if('s.t.' in words):
words.remove('s.t.')
elif('st' in words):
words.remove('st')
elif('subject' in words):
words.remove('subject')
error_counter,X=seperate(words,number_of_variables,counter_of_line,error_counter)
#print(X)
A[counter_of_line-2]=X
#print(A)
#print(words,'apod')
if(counter_of_line >= 3 ):
error_counter,X=seperate(words,number_of_variables,counter_of_line,error_counter)
j=0
#print(X)
#print('counter of line=',counter_of_line)
try:
A[counter_of_line-2]=X
except IndexError:
print('')
"""
for i in X:
A[counter_of_line-1][j]=i
j=j+1 """
return error_counter,C ,A
|
from abc import ABC, abstractmethod
from typing import Optional
import base64
import hashlib
import re
import requests
from . import exceptions
from .parse.html import find_scripts
class RouterInterface(ABC):
@abstractmethod
def page(self, name: str, params: Optional[dict] = None) -> str:
pass
class RouterSession(RouterInterface):
def __init__(self, host: str, username: str, password: str, *, auto_reauth: bool = True, auth_retries: int = 3, timeout: Optional[int] = None):
self.host = host
self.auto_reauth = bool(auto_reauth)
self.auth_retries = max(auth_retries, 0)
self.timeout = timeout
password_hash = hashlib.md5(password.encode()).hexdigest()
basic_raw = f"{username}:{password_hash}".encode()
basic_token = base64.b64encode(basic_raw).decode("utf-8")
self.session = requests.Session()
self.session.cookies["Authorization"] = f"Basic {basic_token}"
self.refresh_token()
def is_session_valid(self) -> bool:
url = self.page_url("Index")
resp = self._get(url)
reauth = self._is_reauth_doc(resp.text)
return not reauth
def refresh_token(self):
attempts = self.auth_retries+1
for retry in range(attempts):
resp = self._get(f"{self.base_url()}/userRpm/LoginRpm.htm?Save=Save")
match = re.search(f"{self.host}/(\w+)/", resp.text)
if match:
self.token = match.group(1)
if self.is_session_valid():
return
raise exceptions.AuthError(f"Failed to get auth token with specified username and password after {attempts} attempts")
def base_url(self) -> str:
return f"http://{self.host}"
def page_url(self, name: str) -> str:
return f"{self.base_url()}/{self.token}/userRpm/{name}.htm"
def page(self, name: str, params: Optional[dict] = None) -> str:
retry = False
while True:
doc = self._page_load_attempt(name, params)
if not self._is_reauth_doc(doc):
break
if retry or not self.auto_reauth:
raise exceptions.PageLoadError(f"Failed to load page {name}. Firmware of the router may not support this feature")
retry = True
self.refresh_token()
return doc
def _page_load_attempt(self, name: str, params: Optional[dict] = None) -> str:
url = self.page_url(name)
referer = self.page_url("MenuRpm")
resp = self._get(url, params=params, headers={"Referer": referer})
if resp.status_code != requests.codes.OK:
raise exceptions.PageLoadError(f"HTTP code {resp.status_code}")
return resp.text
def _get(self, *args, **kwargs):
if "timeout" not in kwargs:
kwargs["timeout"] = self.timeout
try:
return self.session.get(*args, **kwargs)
except requests.RequestException as e:
raise exceptions.NetworkError(e)
REAUTH_SUBSTR = 'cookie="Authorization=;path=/"'
@classmethod
def _is_reauth_doc(cls, doc) -> bool:
first_script = find_scripts(doc)[0]
return cls.REAUTH_SUBSTR in first_script
|
from django.apps import apps
from django.conf import settings
from django.test import TestCase, override_settings
from django.utils import translation
from localized_fields.fields import LocalizedField
from localized_fields.value import LocalizedValue
from .fake_model import get_fake_model
@override_settings(LOCALIZED_FIELDS_EXPERIMENTAL=True)
class LocalizedLookupsTestCase(TestCase):
"""Tests whether localized lookups properly work with."""
TestModel1 = None
@classmethod
def setUpClass(cls):
"""Creates the test model in the database."""
super(LocalizedLookupsTestCase, cls).setUpClass()
# reload app as setting has changed
config = apps.get_app_config("localized_fields")
config.ready()
cls.TestModel = get_fake_model({"text": LocalizedField()})
def test_localized_lookup(self):
"""Tests whether localized lookup properly works."""
self.TestModel.objects.create(
text=LocalizedValue(dict(en="text_en", ro="text_ro", nl="text_nl"))
)
# assert that it properly lookups the currently active language
for lang_code, _ in settings.LANGUAGES:
translation.activate(lang_code)
assert self.TestModel.objects.filter(
text="text_" + lang_code
).exists()
# ensure that the default language is used in case no
# language is active at all
translation.deactivate_all()
assert self.TestModel.objects.filter(text="text_en").exists()
# ensure that hstore lookups still work
assert self.TestModel.objects.filter(text__ro="text_ro").exists()
class LocalizedRefLookupsTestCase(TestCase):
"""Tests whether ref lookups properly work with."""
TestModel1 = None
@classmethod
def setUpClass(cls):
"""Creates the test model in the database."""
super(LocalizedRefLookupsTestCase, cls).setUpClass()
cls.TestModel = get_fake_model({"text": LocalizedField()})
cls.TestModel.objects.create(
text=LocalizedValue(dict(en="text_en", ro="text_ro", nl="text_nl"))
)
def test_active_ref_lookup(self):
"""Tests whether active_ref lookup properly works."""
# assert that it properly lookups the currently active language
for lang_code, _ in settings.LANGUAGES:
translation.activate(lang_code)
assert self.TestModel.objects.filter(
text__active_ref=f"text_{lang_code}"
).exists()
# ensure that the default language is used in case no
# language is active at all
translation.deactivate_all()
assert self.TestModel.objects.filter(
text__active_ref="text_en"
).exists()
def test_translated_ref_lookup(self):
"""Tests whether translated_ref lookup properly works."""
# assert that it properly lookups the currently active language
for lang_code, _ in settings.LANGUAGES:
translation.activate(lang_code)
assert self.TestModel.objects.filter(
text__translated_ref=f"text_{lang_code}"
).exists()
# ensure that the default language is used in case no
# language is active at all
translation.deactivate_all()
assert self.TestModel.objects.filter(
text__translated_ref="text_en"
).exists()
fallbacks = {"cs": ["ru", "ro"], "pl": ["nl", "ro"]}
with override_settings(LOCALIZED_FIELDS_FALLBACKS=fallbacks):
with translation.override("cs"):
assert self.TestModel.objects.filter(
text__translated_ref="text_ro"
).exists()
with translation.override("pl"):
assert self.TestModel.objects.filter(
text__translated_ref="text_nl"
).exists()
# ensure that the default language is used in case no fallback is set
with translation.override("ru"):
assert self.TestModel.objects.filter(
text__translated_ref="text_en"
).exists()
|
from typing import *
import numpy as np
from .common import BIO
class BIOSmoothing:
def __init__(
self,
b_smooth: float = 0.0,
i_smooth: float = 0.0,
o_smooth: float = 0.0,
weight: float = 1.0
):
self.smooth = [b_smooth, i_smooth, o_smooth]
self.weight = weight
def apply_sequence(self, sequence: List[str]):
bio_tags = np.zeros([len(sequence), 3], np.float32)
for i, tag in enumerate(sequence):
bio_tags[i] = self.apply_tag(tag)
return bio_tags
def apply_tag(self, tag: str):
j = BIO.index(tag)
ret = np.zeros([3], np.float32)
if self.smooth[j] >= 0.0:
# Smooth
ret[j] = 1.0 - self.smooth[j]
for j_ in set(range(3)) - {j}:
ret[j_] = self.smooth[j] / 2
else:
# Marginalize
ret[:] = 1.0
return ret * self.weight
def __repr__(self):
ret = f'<W={self.weight:.2f}'
for j, tag in enumerate(BIO):
if self.smooth[j] != 0.0:
if self.smooth[j] < 0:
ret += f' [marginalize {tag}]'
else:
ret += f' [smooth {tag} by {self.smooth[j]:.2f}]'
return ret + '>'
def clone(self):
return BIOSmoothing(*self.smooth, self.weight)
def apply_bio_smoothing(
config: Optional[Union[BIOSmoothing, List[BIOSmoothing]]],
bio_seq: List[str]
) -> np.ndarray:
if config is None:
config = BIOSmoothing()
if isinstance(config, BIOSmoothing):
return config.apply_sequence(bio_seq)
else:
assert len(bio_seq) == len(config)
return np.stack([cfg.apply_tag(tag) for cfg, tag in zip(config, bio_seq)])
|
# -*- coding: utf-8 -*-
from api.base.settings.defaults import API_BASE
from api.citations.utils import display_absolute_url
from nose.tools import * # flake8: noqa
from osf_tests.factories import AuthUserFactory, PreprintFactory
from tests.base import ApiTestCase
class PreprintCitationsMixin(object):
def setUp(self):
super(PreprintCitationsMixin, self).setUp()
self.admin_contributor = AuthUserFactory()
self.published_preprint = PreprintFactory(creator=self.admin_contributor)
self.unpublished_preprint = PreprintFactory(creator=self.admin_contributor, is_published=False)
def test_unauthenticated_can_view_published_preprint_citations(self):
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
def test_unauthenticated_cannot_view_unpublished_preprint_citations(self):
res = self.app.get(self.unpublished_preprint_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_preprint_citations_are_read_only(self):
post_res = self.app.post_json_api(self.published_preprint_url, {}, auth=self.admin_contributor.auth, expect_errors=True)
assert_equal(post_res.status_code, 405)
put_res = self.app.put_json_api(self.published_preprint_url, {}, auth=self.admin_contributor.auth, expect_errors=True)
assert_equal(put_res.status_code, 405)
delete_res = self.app.delete_json_api(self.published_preprint_url, auth=self.admin_contributor.auth, expect_errors=True)
assert_equal(delete_res.status_code, 405)
class TestPreprintCitations(PreprintCitationsMixin, ApiTestCase):
def setUp(self):
super(TestPreprintCitations, self).setUp()
self.published_preprint_url = '/{}preprints/{}/citation/'.format(API_BASE, self.published_preprint._id)
self.unpublished_preprint_url = '/{}preprints/{}/citation/'.format(API_BASE, self.unpublished_preprint._id)
def test_citation_publisher_is_preprint_provider(self):
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['publisher'], self.published_preprint.provider.name)
def test_citation_url_is_preprint_url_not_project(self):
res = self.app.get(self.published_preprint_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['links']['self'], display_absolute_url(self.published_preprint))
class TestPreprintCitationsStyle(PreprintCitationsMixin, ApiTestCase):
def setUp(self):
super(TestPreprintCitationsStyle, self).setUp()
self.published_preprint_url = '/{}preprints/{}/citation/apa/'.format(API_BASE, self.published_preprint._id)
self.unpublished_preprint_url = '/{}preprints/{}/citation/apa/'.format(API_BASE, self.unpublished_preprint._id)
|
#!/usr/bin/env python
import sys
from cvangysel import argparse_utils, \
logging_utils, multiprocessing_utils, pyndri_utils, trec_utils, rank_utils
import sesh
from sesh import domain, scorers, sesh_pb2
import google.protobuf as pb
import argparse
import codecs
import collections
import io
import logging
import multiprocessing
import numpy as np
import os
import operator
import pyndri
import scipy
class DocumentCandidateGenerator(object):
def __init__(self, args, index, track_edition, **kwargs):
self.args = args
self.index = index
assert args.qrel
self.qrels = []
for qrel_path in args.qrel:
with open(qrel_path, 'r') as f_qrel:
self.qrels.append(sesh.parse_qrel(f_qrel, track_edition))
class LemurDocumentCandidateGenerator(DocumentCandidateGenerator):
def __init__(self, args, index, configuration, **kwargs):
super(LemurDocumentCandidateGenerator, self).__init__(
args, index, **kwargs)
assert configuration.top_candidate_limit
self.top_candidate_limit = configuration.top_candidate_limit
def generate(self, session):
indri_query = ' '.join(session.queries[-1])
logging.debug('Querying Indri for "%s" (%s).',
indri_query, session)
if not indri_query:
return []
return [
int_document_id
for int_document_id, _ in self.index.query(
indri_query,
results_requested=self.top_candidate_limit)]
class QrelDocumentCandidateGenerator(DocumentCandidateGenerator):
def __init__(self, args, index, session_id_to_topic_id, track_edition,
per_topic=True, **kwargs):
super(QrelDocumentCandidateGenerator, self).__init__(
args, index, track_edition)
self.session_id_to_topic_id = session_id_to_topic_id
self.per_topic = per_topic
self.candidate_documents_per_topic = collections.defaultdict(set)
for qrel in self.qrels:
for topic_id, document_ids_and_relevances in qrel.items():
ext_document_ids_for_topic = list(
document_id for document_id in
document_ids_and_relevances)
int_document_ids_for_topic = set(
int_document_id for _, int_document_id in
self.index.document_ids(ext_document_ids_for_topic))
self.candidate_documents_per_topic[topic_id] |= \
int_document_ids_for_topic
def generate(self, session):
assert session.session_id in self.session_id_to_topic_id
if self.per_topic:
return self.candidate_documents_per_topic[
str(self.session_id_to_topic_id[session.session_id])]
else:
return dict(
candidate_document
for topic_id, candidate_documents in
self.candidate_documents_per_topic.items()
for candidate_document in candidate_documents).items()
class ListDocumentCandidateGenerator(DocumentCandidateGenerator):
def __init__(self, args, index, configuration, **kwargs):
super(ListDocumentCandidateGenerator, self).__init__(
args, index, **kwargs)
assert configuration.document_list
with open(configuration.document_list, 'r') as f_document_list:
ext_document_ids = [line.strip() for line in f_document_list
if line.strip()]
self.internal_document_ids = list(map(
operator.itemgetter(1), index.document_ids(ext_document_ids)))
def generate(self, session):
return self.internal_document_ids
DOCUMENT_CANDIDATE_GENERATORS = {
sesh_pb2.ScoreSessionsConfig.LEMUR: LemurDocumentCandidateGenerator,
sesh_pb2.ScoreSessionsConfig.QREL: QrelDocumentCandidateGenerator,
sesh_pb2.ScoreSessionsConfig.DOCUMENT_LIST: ListDocumentCandidateGenerator,
}
def score_session_initializer(
_result_queue,
_args,
_configuration,
_out_base,
_background_prob_dist,
_candidate_generator,
_scorer_impls,
_index,
_dictionary,
_anchor_texts):
score_session_worker.result_queue = _result_queue
score_session_worker.args = _args
score_session_worker.configuration = _configuration
score_session_worker.out_base = _out_base
score_session_worker.background_prob_dist = _background_prob_dist
score_session_worker.candidate_generator = _candidate_generator
score_session_worker.scorer_impls = _scorer_impls
score_session_worker.index = _index
score_session_worker.dictionary = _dictionary
score_session_worker.anchor_texts = _anchor_texts
logging.info('Initialized worker.')
def score_session_worker_(session):
logger = logging.getLogger(str(session))
logger.setLevel(logging.DEBUG)
logger.propagate = False
log_formatter = logging_utils.get_formatter()
# Set the output stream handler to the same loglevel.
stderr_handler = logging.StreamHandler()
stderr_handler.setLevel(logging.getLogger().level)
stderr_handler.setFormatter(log_formatter)
logger.addHandler(stderr_handler)
logger.info('Scoring %s.', session)
if not session.queries:
logger.warning('Skipping %s due to no queries.', session)
return
session_id = session.session_id
logger.info('Generating set of candidate documents for %s.',
repr(session))
# Generate candidate documents.
candidate_internal_document_ids = score_session_worker.\
candidate_generator.generate(session)
word_frequency_index = pyndri_utils.create_word_frequency_index(
score_session_worker.index,
candidate_internal_document_ids,
background_prob_dist=score_session_worker.background_prob_dist)
candidate_doc_ids = set(iter(word_frequency_index))
assert(all(isinstance(document_id, str)
for document_id in candidate_doc_ids))
logger.info('Scoring %d documents for %s.',
len(candidate_doc_ids), session)
ndcg_per_scorer_per_qrel = collections.defaultdict(dict)
for scorer_name, scorer in score_session_worker.scorer_impls.items():
logger.info('Scoring %s using %s.', session, scorer_name)
f_scorer_debug_out = None
if score_session_worker.out_base:
debug_path = os.path.join(
score_session_worker.out_base,
'{scorer_name}_{session_id}'.format(
scorer_name=scorer_name,
session_id=session.session_id))
logger.info('Writing debug information for scorer %s to %s.',
scorer_name, debug_path)
f_scorer_debug_out = open(debug_path, 'w')
if f_scorer_debug_out is not None:
handler = logging.StreamHandler(f_scorer_debug_out)
else:
handler = logging.NullHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(log_formatter)
logger.addHandler(handler)
full_ranking = dict(scorer.score_documents_for_session(
session, candidate_doc_ids,
score_session_worker.index, score_session_worker.dictionary,
score_session_worker.anchor_texts,
word_frequency_index,
logger,
f_scorer_debug_out))
assert(all(isinstance(document_id, str)
for document_id in full_ranking))
logger.removeHandler(handler)
if f_scorer_debug_out:
f_scorer_debug_out.close()
if (not score_session_worker.
configuration.retain_non_candidate_documents):
filtered_ranking = {
document_id: score
for document_id, score in full_ranking.items()
if document_id in candidate_doc_ids}
num_dropped_documents = len(full_ranking) - len(filtered_ranking)
else:
filtered_ranking = full_ranking
num_dropped_documents = 0
if num_dropped_documents > 0:
logger.warning('Dropped %d documents from %s method '
'due to out of candidates.',
num_dropped_documents, scorer_name)
if scorer.DO_NOT_USE_qrels:
ranks = rank_utils.generate_ranks(
np.array(
[filtered_ranking[doc_id]
for doc_id in candidate_doc_ids
if doc_id in filtered_ranking]),
axis=0)
for qrel_idx, qrel in enumerate(scorer.DO_NOT_USE_qrels):
qrel_lookup = dict(qrel[session.session_id])
relevances = np.array(
[qrel_lookup.get(doc_id, 0)
for doc_id in candidate_doc_ids
if doc_id in filtered_ranking])
try:
# NDCG computation is biased towards the candidate set.
#
# Only use this value for tracking on-going progress,
# not as a final ranking quality measure.
ndcg = rank_utils.compute_ndcg(ranks, relevances)
except RuntimeError as e:
logging.error(e)
ndcg = np.nan
ndcg_per_scorer_per_qrel[qrel_idx][scorer_name] = ndcg
score_session_worker.result_queue.put(
(scorer_name, session_id, filtered_ranking))
for qrel, results in ndcg_per_scorer_per_qrel.items():
logger.info('Within-candidate set NDCG for %s: %s',
qrel, ' '.join(
'{} ({})'.format(scorer_name, ndcg)
for scorer_name, ndcg in results.items()))
logger.debug('Finished scoring %s.', session)
return len(score_session_worker.scorer_impls)
score_session_worker = \
multiprocessing_utils.WorkerFunction(score_session_worker_)
def binary_search_file(f, needle, key=lambda candidate: candidate):
position = [0]
f.seek(position[0], 2)
length = f.tell()
def goto_beginning_of_line():
while position[0] >= 0:
f.seek(position[0])
if f.read(1) == '\n':
break
position[0] -= 1
# Make sure we do not fall off the file.
if position[0] < 0:
f.seek(0)
low = 0
high = length
while low < high:
mid = (low + high) // 2
position[0] = mid
# Crawl back to beginning of line.
goto_beginning_of_line()
# Read current line.
candidate = f.readline().strip()
# Figure out which part of the file to search.
if key(candidate) < needle:
low = mid + 1
else:
high = mid
position[0] = low
# Crawl back to beginning of line.
goto_beginning_of_line()
result = []
while True:
candidate = f.readline().strip()
if not candidate or not key(candidate) == needle:
break
result.append(candidate)
return result
def load_anchor_texts(f_harvested_links, urls, encoding='utf8'):
urls = sorted(urls, reverse=True)
anchor_texts = collections.defaultdict(list)
for url in urls:
matches = binary_search_file(
f_harvested_links, url,
key=lambda candidate: candidate.split('\t')[0])
for match in matches:
splitted_match = match.split('\t', 3)
if len(splitted_match) != 3:
logging.error('Unable to parse line while loading '
'anchor texts: %s', match)
continue
_, _, anchor_text = splitted_match
anchor_texts[url].append(anchor_text)
return anchor_texts
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--loglevel', type=str, default='INFO')
parser.add_argument('index', type=argparse_utils.existing_file_path)
parser.add_argument('session_file', type=argparse_utils.existing_file_path)
parser.add_argument('--num_workers', type=int, default=1)
parser.add_argument('--harvested_links_file',
type=argparse_utils.existing_file_path,
default=None)
parser.add_argument('--qrel',
type=argparse_utils.existing_file_path,
nargs='*')
parser.add_argument('--configuration', type=str, nargs='+')
parser.add_argument('--top_sessions',
type=argparse_utils.positive_int,
default=None)
parser.add_argument('--out_base',
type=argparse_utils.nonexisting_file_path,
required=True)
args = parser.parse_args()
try:
logging_utils.configure_logging(args)
except IOError:
return -1
logging_utils.log_module_info(np, scipy)
configuration = sesh_pb2.ScoreSessionsConfig()
pb.text_format.Merge(' '.join(args.configuration), configuration)
if not configuration.modifier:
configuration.modifier.add() # Create an empty modifier.
elif len(configuration.modifier) > 1:
modifier_identifiers = [
modifier.identifier for modifier in configuration.modifier]
assert all(modifier_identifiers), \
'All session modifiers should have an identifier.'
assert len(modifier_identifiers) == len(set(modifier_identifiers)), \
'All session modifier identifiers should be unique: {}.'.format(
modifier_identifiers)
logging.info('Configuration: %s', configuration)
logging.info('Loading index.')
index = pyndri.Index(args.index)
num_documents = index.document_count()
logging.debug('Index contains %d documents.', num_documents)
logging.info('Loading dictionary.')
dictionary = pyndri.extract_dictionary(index)
logging.info('Loading background corpus.')
background_prob_dist = pyndri_utils.extract_background_prob_dist(index)
for modifier in configuration.modifier:
out_base = os.path.join(args.out_base, modifier.identifier)
assert not os.path.exists(out_base)
os.makedirs(out_base)
logging.info('Loading sessions using %s and outputting to %s.',
modifier or 'no modifier', out_base)
with codecs.open(args.session_file, 'r', 'utf8') as f_xml:
track_edition, _, sessions, session_id_to_topic_id = \
domain.construct_sessions(
f_xml, args.top_sessions, dictionary)
logging.info('Discovered %d sessions.', len(sessions))
sessions = domain.alter_sessions(sessions, modifier)
documents = domain.get_document_set(sessions.values())
logging.info('Retained %d sessions (%d SERP documents) '
'after filtering.',
len(sessions), len(documents))
# Load QRels for debugging and oracle runs.
qrels_per_session = []
for qrel_path in args.qrel:
with open(qrel_path, 'r') as f_qrel:
qrels_per_session.append(sesh.parse_qrel(f_qrel, None))
scorer_impls = {}
for scorer_desc in configuration.scorer:
assert scorer_desc.type in scorers.SESSION_SCORERS
identifier = scorer_desc.identifier or scorer_desc.type
assert identifier not in scorer_impls
scorer = scorers.create_scorer(scorer_desc, qrels_per_session)
logging.info('Scoring using %s.', repr(scorer))
scorer_impls[identifier] = scorer
anchor_texts = None
if args.harvested_links_file is not None:
urls = set(document.url for document in documents)
logging.info('Loading anchor texts for session SERPs (%d URLs).',
len(urls))
with codecs.open(args.harvested_links_file, 'r', 'latin1') \
as f_harvested_links:
anchor_texts = load_anchor_texts(f_harvested_links, urls)
logging.info('Discovered anchor texts for %d URLs (%d total).',
len(anchor_texts), len(urls))
else:
logging.info('No anchor texts loaded.')
# The following will hold all the rankings.
document_assessments_per_session_per_scorer = collections.defaultdict(
lambda: collections.defaultdict(
lambda: collections.defaultdict(float)))
assert configuration.candidate_generator in \
DOCUMENT_CANDIDATE_GENERATORS
# Document candidate generation.
candidate_generator = DOCUMENT_CANDIDATE_GENERATORS[
configuration.candidate_generator](**locals())
logging.info('Using %s for document candidate generation.',
candidate_generator)
result_queue = multiprocessing.Queue()
initargs = [
result_queue,
args,
configuration,
out_base,
background_prob_dist,
candidate_generator,
scorer_impls,
index,
dictionary,
anchor_texts]
pool = multiprocessing.Pool(
args.num_workers,
initializer=score_session_initializer,
initargs=initargs)
worker_result = pool.map_async(
score_session_worker,
sessions.values())
# We will not submit any more tasks to the pool.
pool.close()
it = multiprocessing_utils.QueueIterator(
pool, worker_result, result_queue)
while True:
try:
result = next(it)
except StopIteration:
break
scorer_name, session_id, ranking = result
document_assessments_per_session_per_scorer[
scorer_name][session_id] = ranking
for scorer_name in document_assessments_per_session_per_scorer:
# Switch object asssessments to lists.
for topic_id, object_assesments in \
document_assessments_per_session_per_scorer[
scorer_name].items():
document_assessments_per_session_per_scorer[
scorer_name][topic_id] = [
(score, document_id)
for document_id, score in object_assesments.items()]
# Write the runs.
for scorer_name in document_assessments_per_session_per_scorer:
run_out_path = os.path.join(
out_base, '{0}.run'.format(scorer_name))
with io.open(run_out_path, 'w', encoding='utf8') as f_run_out:
trec_utils.write_run(
scorer_name,
document_assessments_per_session_per_scorer[scorer_name],
f_run_out)
if __name__ == "__main__":
sys.exit(main())
|
Subsets and Splits