repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
cbdavide/Calidad-CAR | src/modelling/calidad_carV2.py | 1 | 40254 |
# -*- coding: utf-8 -*-
__author__ = 'Efraín Domínguez Calle, PhD - Wilfredo Marimón Bolivar, PhD'
__copyright__ = "Copyright 2017, Mathmodelling"
__credits__ = ["Efraín Domínguez Calle"]
__license__ = "Uso Libre"
__version__ = "1.0"
__maintainer__ = "Efraín Antonio Domínguez Calle"
__email__ = '[email protected], [email protected]'
__status__ = "En desarrollo"
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import xlrd, xlwt
from util import join, used_vars
BOLD_FONT_XLWT = xlwt.Style.easyxf('font: bold on;')
def read_sheet(workbook, name):
# Reading water depth sheet
sheet = workbook.sheet_by_name(name)
# Number of written Rows in sheet
r = sheet.nrows
# Number of written Columns in sheet
c = sheet.ncols
answ = np.zeros([r - 1, c])
# Reading each cell in excel sheet 'BC'
for i in xrange(1, r):
for j in xrange(c):
answ[i - 1, j] = float(sheet.cell_value(i, j))
return answ
def plot(ax, name, data):
ax.tick_params(labelsize=6)
ax.yaxis.get_offset_text().set_fontsize(6)
ax.plot(data[0], data[1])
ax.set_title(name, fontsize=8)
def save_plot(plt, title, xlabel, ylabel, data, path):
plt.plot(data[0], data[1])
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig("%s.png" % join(path, title), dpi=300)
plt.clf()
def save_sheet(book, name, data):
sheet = book.add_sheet(name)
for i in xrange(1, len(data) + 1):
sheet.write(i, 0, i - 1, BOLD_FONT_XLWT)
for i in xrange(1, len(data[0]) + 1):
sheet.write(0, i, i - 1, BOLD_FONT_XLWT)
for i in xrange(1, len(data) + 1):
for j in xrange(1, len(data[0]) + 1):
sheet.write(i, j, data[i -1 , j - 1])
def read_config_file(config_file, sheet_name_wd='WD', sheet_name_sl='SL', sheet_name_wv='WV', sheet_name_bc='BC',
sheet_name_ic='IC', sheet_name_ST='ST', sheet_name_SOD='SOD', sheet_name_SDBO='SDBO', sheet_name_SNH3='SNH4',
sheet_name_SNO2='SNO2', sheet_name_SNO3='SNO3', sheet_name_STDS='STDS', sheet_name_SGyA='SGyA',
sheet_name_SDQO='SDQO', sheet_name_SPdis='SPdis', sheet_name_SPorg='SPorg', sheet_name_SEC='SEC',
sheet_name_STC='STC', sheet_name_STSS='STSS', sheet_name_SSS='SSS', sheet_name_SpH='SpH',
sheet_name_SALK='SALK', sheet_name_Caudales='Caudales'):
"""
Reads an Excel configuration file with initial and boundary conditions and also time series for sinks and sources
:param config_file: The path to
cel file
:param sheet_name_wd: The name of Excel sheet containing water depth for each cross section in the water channel
:param sheet_name_sl: The name of Excel sheet containing bed slope for each cross section in the water channel
:param sheet_name_wv: The name of Excel sheet containing water velocity for each cross section in the water channel
:param sheet_name_bc: The name of Excel sheet with boundary condition information by default is 'BC'
:param sheet_name_ic: The name of Excel sheet with initial condition information by default is 'IC'
:param sheet_names_list_sources: A list with sheet names with the information about sinks and sources, default['S1']
:return:
"""
# Following if is to set a mutable parameter as default parameter
wb = xlrd.open_workbook(config_file)
# Reading water depth sheet
wd = read_sheet(wb, sheet_name_wd)
# Reading water bed slope
sl = read_sheet(wb, sheet_name_sl)
# Reading water velocities sheet
wv = read_sheet(wb, sheet_name_wv)
# Reading boundary conditions sheet
bc = read_sheet(wb, sheet_name_bc)
# Reading initial conditions sheet
ic = read_sheet(wb, sheet_name_ic)
# Reading sinks and sources sheet
ST = read_sheet(wb, sheet_name_ST)
# Reading sinks and sources sheet
SOD = read_sheet(wb, sheet_name_SOD)
# Reading sinks and sources sheet
SDBO = read_sheet(wb, sheet_name_SDBO)
# Reading sinks and sources sheet
SNO2 = read_sheet(wb, sheet_name_SNO2)
# Reading sinks and sources sheet
SNO3 = read_sheet(wb, sheet_name_SNO3)
# Reading sinks and sources sheet
SNH3 = read_sheet(wb, sheet_name_SNH3)
# Reading sinks and sources sheet
STDS = read_sheet(wb, sheet_name_STDS)
# Reading sinks and sources sheet
SGyA = read_sheet(wb, sheet_name_SGyA)
# Reading sinks and sources sheet
SDQO = read_sheet(wb, sheet_name_SDQO)
# Reading sinks and sources sheet
SPorg = read_sheet(wb, sheet_name_SPorg)
# Reading sinks and sources sheet
SPdis = read_sheet(wb, sheet_name_SPdis)
# Reading sinks and sources sheet
SEC = read_sheet(wb, sheet_name_SEC)
# Reading sinks and sources sheet
STC = read_sheet(wb, sheet_name_STC)
# Reading sinks and sources sheet
STSS = read_sheet(wb, sheet_name_STSS)
# Reading sinks and sources sheet
SSS = read_sheet(wb, sheet_name_SSS)
# Reading sinks and sources sheet
SpH = read_sheet(wb, sheet_name_SpH)
# Reading sinks and sources sheet
SALK = read_sheet(wb, sheet_name_SALK)
# Leer caudales primera fila no puede contener ceros
# La verificación se hace cuando se carga el archivo
Caudales = read_sheet(wb, sheet_name_Caudales)
SOD = SOD[0:, 1:] * Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
SDBO = SDBO[0:, 1:] * Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
SNH3 = SNH3[0:, 1:] * Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
SNO2 = SNO2[0:, 1:] * Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
SNO3 = SNO3[0:, 1:] * Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
SDQO = SDQO[0:, 1:] * Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
STDS = STDS[0:, 1:] * Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
SGyA = SGyA[0:, 1:] * Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
SEC = SEC[0:, 1:] * Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
STC = STC[0:, 1:] * Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
SPorg = SPorg[0:, 1:] * Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
SPdis = SPdis[0:, 1:] * Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
STSS = STSS[0:, 1:] * Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
SSS = SSS[0:, 1:] * Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
ST = (ST[0:, 1:] + 273)
SpH = (10 ** (-1 * (SpH[0:, 1:]))) * Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
SALK = SALK[0:, 1:] * Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
return wd, sl, wv, bc, ic, ST, SOD, SDBO, SNH3, SNO2, SNO3, STDS, SGyA, SDQO, SPdis, SPorg, SEC, STC, STSS, SSS, SpH, SALK, Caudales
def calidad_explicito(D, dx, ci_T, ci_OD, ci_DBO, ci_NH3, ci_NO2, ci_NO3, ci_DQO, ci_TDS, ci_EC, ci_TC, ci_GyA, ci_Porg, ci_Pdis, ci_TSS,
ci_SS, ci_pH, ci_ALK, v, d, S_T, S_OD, S_DBO, S_NH3, S_NO2, S_NO3, S_DQO, S_TDS, S_EC, S_TC, S_GyA, S_Porg, S_Pdis, S_TSS, S_SS,
S_pH, S_ALK, Caudales, variables):
"""
Esta función modela la transición de la concentración del momento t al momento t + dt para todos los
nodos espaciales de la corriente superficial
:param ci: matrix (bidimensional) de concentración inicial en el canal y su respectiva distancia x la concentración
va en g/m3, la distancia en metros
:param v: vector de velocidad promedio del agua en m/s, tiene las velocidades promedio para cada sección
y para cada momento de tiempo
:param d: vector de coeficiente de difusión
:return: c, dt: la concentración del contaminante en todos los nodos x para el momento de tiempo t + dt y el valor
de dt que cumple la condición de estabilidad de Courant o CFL
"""
c_T = ci_T
c_OD = ci_OD
c_DBO = ci_DBO
c_NH3 = ci_NH3
c_NO2 = ci_NO2
c_NO3 = ci_NO3
c_DQO = ci_DQO
c_TDS = ci_TDS
c_EC = ci_EC
c_TC = ci_TC
c_GyA = ci_GyA
c_Porg = ci_Porg
c_Pdis = ci_Pdis
c_TSS = ci_TSS
c_SS = ci_SS
c_pH = ci_pH
c_ALK = ci_ALK
maxv = abs(np.max(v))
maxd = abs(np.max(d))
pe = maxv * dx / maxd
if (np.abs(pe) >= 3) or (maxd == 0):
dt = dx / maxv
d = d * 0
#print "Se desconectó la Difusión, el número de peclet es: %s. Courant es igual a: %s. El paso en el " \
#"tiempo es de: %s segundos" % (round(pe, 2), str(maxv * (dt / dx)), dt)
elif (np.min(np.abs(pe)) >= 0.1) or (maxd == 0):
dt = 1 / (2 * maxd / (dx ** 2) + (maxv / dx))
#print "Se se tienen en cuenta difusion y adveccion, el número de peclet es: %s. Courant es igual a: %s. El paso en el tiempo es de: %s segundos" % (
#round(pe, 2), str(maxv * (dt / dx)), dt)
else:
dt = (dx * dx) / (2 * maxd)
#print 'Se calcula advección y difusión, el número de peclet es: %s. Courant es igual a: %s. El paso en el ' \
#'tiempo es de: %s segundos' % (round(pe, 2), str(maxv * (dt / dx)), dt)
# tfactor es un factor multiplicador del numero de nodos en el tiempo para llegar de t a t + dt, tfactor >= 1,
# se recomienda aumentarlo de 10 en 10 {10, 100, 1000, 10000... }
# ## AGREGAR TFACTOR
tfactor = variables['tfactor']
# Se guarda el dt inicial como dtini
dtini = dt
# Se ajusta el dt según tfactor, dt se hace más pequeño tfactor-veces
dt = dt / tfactor
#CONSTANTES
ki = np.where(v > 0, 0, 1)
kr = np.where(v < 0, 0, 1)
#VARIABLES ADICIONALES
k = variables['k']
den = variables['den']
Cp = variables['Cp']
Dt = k/(den*Cp)
As1 = variables['As1']
Jsn = variables['Jsn']
sbc = variables['sbc']
Tair = variables['Tair']
Aair = variables['Aair']
eair = variables['eair']
RL = variables['RL']
Uw = variables['Uw']
es = variables['es']
Kw = variables['Kw']
K1 = variables['K1']
K2 = variables['K2']
Vv = variables['Vv']
As = variables['As']
CO2S = variables['CO2S']
Wrp = variables['Wrp']
FrH = variables['FrH']
Diff = variables['Diff']
Da = variables['Da']
ko2 = variables['ko2']
cs = variables['cs']
knh3 = variables['knh3']
ksnh3 = variables['ksnh3']
alfa_nh3 = variables['alfa_nh3']
kdbo = variables['kdbo']
ks = variables['ks']
alfa_no2 = variables['alfa_no2']
ksod = variables['ksod']
knt = variables['knt']
NT = variables['NT']
kno2 = variables['kno2']
kno3 = variables['kno3']
kDQO = variables['kDQO']
kTDS = variables['kTDS']
A = variables['A']
alfa_1 = variables['alfa_1']
miu = variables['miu']
F = variables['F']
kTC = variables['kTC']
teta_TC = variables['teta_TC']
kEC = variables['kEC']
teta_EC = variables['teta_EC']
Jdbw = variables['Jdbw']
qtex = variables['qtex']
kN = variables['kN']
kH = variables['kH']
kOH = variables['kOH']
fdw = variables['fdw']
kf = variables['kf']
kb = variables['kb']
kv = variables['kv']
Cg = variables['Cg']
Henry = variables['Henry']
R = variables['R']
T = variables['T']
alfa_2 = variables['alfa_2']
resp = variables['resp']
kPorg = variables['kPorg']
kPsed = variables['kPsed']
sigma2 = variables['sigma2']
Ws = variables['Ws']
Rs = variables['Rs']
Rp = variables['Rp']
teta_DBO = variables['teta_DBO']
teta_NH3 = variables['teta_NH3']
teta_NO2 = variables['teta_NO2']
teta_DQO = variables['teta_DQO']
teta_NT = variables['teta_NT']
teta_NO3 = variables['teta_NO3']
#Creando variable de salida
cout_T = np.zeros(len(c_T))
cout_OD = np.zeros(len(c_OD))
cout_DBO = np.zeros(len(c_OD))
cout_NH3 = np.zeros(len(c_OD))
cout_NO2 = np.zeros(len(c_OD))
cout_NO3 = np.zeros(len(c_OD))
cout_DQO = np.zeros(len(c_OD))
cout_TDS = np.zeros(len(c_OD))
cout_EC = np.zeros(len(c_OD))
cout_TC = np.zeros(len(c_OD))
cout_GyA = np.zeros(len(c_OD))
cout_Porg = np.zeros(len(c_OD))
cout_Pdis = np.zeros(len(c_OD))
cout_TSS = np.zeros(len(c_OD))
cout_SS = np.zeros(len(c_OD))
cout_pH = np.zeros(len(c_OD))
cout_ALK = np.zeros(len(c_OD))
cout_T = c_T
cout_OD = c_OD
cout_DBO = c_DBO
cout_NH3 = c_NH3
cout_NO2 = c_NO2
cout_NO3 = c_NO3
cout_DQO = c_DQO
cout_TDS = c_TDS
cout_EC = c_EC
cout_TC = c_TC
cout_GyA = c_GyA
cout_Porg = c_Porg
cout_Pdis = c_Pdis
cout_TSS = c_TSS
cout_SS = c_SS
cout_pH = c_pH
cout_ALK = c_ALK
# range(int(dtini / dt)) determina el número de nodos temporales necesarios para llegar t + dt de forma estable
for i in range(int(dtini / dt)):
caudales = Caudales[0:, 1:] / (Caudales[0, 1:] + Caudales[0:, 1:])
adv_T = -((ki[2:] * v[2:] * c_T[2:] - ki[1:-1] * v[1:-1] * c_T[1:-1]) * (dt / dx) +
(kr[1:-1] * v[1:-1] * c_T[1:-1] - kr[0:-2] * v[0:-2] * c_T[0:-2]) * (dt / dx))
reac_T = (Jsn + sbc*((Tair + 273)**4)*(Aair + 0.031*((eair)**0.5))*(1-RL) - 0.97*sbc*((c_T[0:-2])**4) -
0.47*(19 + (0.95*(Uw**2)))*((c_T[0:-2]) - Tair - 273.15) - (19 + (0.95*(Uw**2)))*(es - eair))*D/(den*Cp*As1)
cout_T[1:-1] = c_T[1:-1] + adv_T + reac_T + ((S_T[1:-1] - c_T[1:-1])*caudales[1:-1, 1])
adv_OD = -((ki[2:] * v[2:] * c_OD[2:] - ki[1:-1] * v[1:-1] * c_OD[1:-1]) * (dt / dx) +
(kr[1:-1] * v[1:-1] * c_OD[1:-1] - kr[0:-2] * v[0:-2] * c_OD[0:-2]) * (dt / dx))
dif_OD = 0.5 * (d[2:] * c_OD[2:] - 2 * d[1:-1] * c_OD[1:-1] + d[0:-2] * c_OD[0:-2]) * (dt / dx ** 2)
p = (c_OD[0:-2]) / ((c_OD[0:-2]) + ks)
reac_OD = (Da + ko2 * (cs - c_OD[0:-2]) - kdbo * c_DBO[0:-2] * p * (teta_DBO ** (c_T[0:-2] - 293.15)) - alfa_nh3 * knh3 *
c_NH3[0:-2] * p * (teta_NH3 ** (c_T[0:-2] - 293.15)) - alfa_no2 * kno2 * c_NO2[0:-2] * p * (
teta_NO2 ** (c_T[0:-2] - 293.15)) - ksod / D) * dt
cout_OD[1:-1] = c_OD[1:-1] + adv_OD + dif_OD + reac_OD + ((S_OD[1:-1] - c_OD[1:-1])*caudales[1:-1, 1])
adv_DBO = -((ki[2:] * v[2:] * c_DBO[2:] - ki[1:-1] * v[1:-1] * c_DBO[1:-1]) * (dt / dx) +
(kr[1:-1] * v[1:-1] * c_DBO[1:-1] - kr[0:-2] * v[0:-2] * c_DBO[0:-2]) * (dt / dx))
dif_DBO = 0.5 * (d[2:] * c_DBO[2:] - 2 * d[1:-1] * c_DBO[1:-1] + d[0:-2] * c_DBO[0:-2]) * (dt / dx ** 2)
reac_DBO = (-kdbo * c_DBO[0:-2] * p * (teta_DBO ** (c_T[0:-2] - 293.15))) * dt
cout_DBO[1:-1] = c_DBO[1:-1] + adv_DBO + dif_DBO + reac_DBO + ((S_DBO[1:-1] - c_DBO[1:-1])*caudales[1:-1, 1])
adv_NH3 = -((ki[2:] * v[2:] * c_NH3[2:] - ki[1:-1] * v[1:-1] * c_NH3[1:-1]) * (dt / dx) +
(kr[1:-1] * v[1:-1] * c_NH3[1:-1] - kr[0:-2] * v[0:-2] * c_NH3[0:-2]) * (dt / dx))
dif_NH3 = 0.5 * (d[2:] * c_NH3[2:] - 2 * d[1:-1] * c_NH3[1:-1] + d[0:-2] * c_NH3[0:-2]) * (dt / dx ** 2)
reac_NH3 = (knt * NT * (teta_NT ** (c_T[0:-2] - 293.15)) - knh3 * c_NH3[0:-2] * p * (
teta_NH3 ** (c_T[0:-2] - 293.15)) + ksnh3 / D - F * alfa_1 * miu * A) * dt
cout_NH3[1:-1] = c_NH3[1:-1] + adv_NH3 + dif_NH3 + reac_NH3 + ((S_NH3[1:-1] - c_NH3[1:-1])*caudales[1:-1, 1])
adv_NO2 = -((ki[2:] * v[2:] * c_NO2[2:] - ki[1:-1] * v[1:-1] * c_NO2[1:-1]) * (dt / dx) +
(kr[1:-1] * v[1:-1] * c_NO2[1:-1] - kr[0:-2] * v[0:-2] * c_NO2[0:-2]) * (dt / dx))
dif_NO2 = 0.5 * (d[2:] * c_NO2[2:] - 2 * d[1:-1] * c_NO2[1:-1] + d[0:-2] * c_NO2[0:-2]) * (dt / dx ** 2)
reac_NO2 = (knh3 * c_NH3[0:-2] * p * (teta_NH3 ** (c_T[0:-2] - 293.15)) - kno2 * c_NO2[0:-2] * p * (
teta_NO2 ** (c_T[0:-2] - 293.15)) + kno3 * c_NO3[0:-2] * (teta_NO3 ** (c_T[0:-2] - 293.15))) * dt
cout_NO2[1:-1] = c_NO2[1:-1] + adv_NO2 + dif_NO2 + reac_NO2 + ((S_NO2[1:-1] - c_NO2[1:-1])*caudales[1:-1, 1])
adv_NO3 = -((ki[2:] * v[2:] * c_NO3[2:] - ki[1:-1] * v[1:-1] * c_NO3[1:-1]) * (dt / dx) +
(kr[1:-1] * v[1:-1] * c_NO3[1:-1] - kr[0:-2] * v[0:-2] * c_NO3[0:-2]) * (dt / dx))
dif_NO3 = 0.5 * (d[2:] * c_NO3[2:] - 2 * d[1:-1] * c_NO3[1:-1] + d[0:-2] * c_NO3[0:-2]) * (dt / dx ** 2)
reac_NO3 = (kno2 * c_NO2[0:-2] * p * (teta_NO2 ** (c_T[0:-2] - 293.15)) - kno3 * c_NO3[0:-2] * (
teta_NO3 ** (c_T[0:-2] - 293.15)) - (1 - F) * alfa_1 * miu * A) * dt
cout_NO3[1:-1] = c_NO3[1:-1] + adv_NO3 + dif_NO3 + reac_NO3 + ((S_NO3[1:-1] - c_NO3[1:-1])*caudales[1:-1, 1])
adv_DQO = -((ki[2:] * v[2:] * c_DQO[2:] - ki[1:-1] * v[1:-1] * c_DQO[1:-1]) * (dt / dx) +
(kr[1:-1] * v[1:-1] * c_DQO[1:-1] - kr[0:-2] * v[0:-2] * c_DQO[0:-2]) * (dt / dx))
dif_DQO = 0.5 * (d[2:] * c_DQO[2:] - 2 * d[1:-1] * c_DQO[1:-1] + d[0:-2] * c_DQO[0:-2]) * (dt / dx ** 2)
reac_DQO = (-kDQO * c_DQO[0:-2] * p * (teta_DQO ** (c_T[0:-2] - 293.15))) * dt
cout_DQO[1:-1] = c_DQO[1:-1] + adv_DQO + dif_DQO + reac_DQO + + ((S_DQO[1:-1] - c_DQO[1:-1])*caudales[1:-1, 1])
adv_TDS = -((ki[2:] * v[2:] * c_TDS[2:] - ki[1:-1] * v[1:-1] * c_TDS[1:-1]) * (dt / dx) +
(kr[1:-1] * v[1:-1] * c_TDS[1:-1] - kr[0:-2] * v[0:-2] * c_TDS[0:-2]) * (dt / dx))
dif_TDS = 0.5 * (d[2:] * c_TDS[2:] - 2 * d[1:-1] * c_TDS[1:-1] + d[0:-2] * c_TDS[0:-2]) * (dt / dx ** 2)
reac_TDS = (-kTDS * c_TDS[0:-2]) * dt
cout_TDS[1:-1] = c_TDS[1:-1] + adv_TDS + dif_TDS + reac_TDS + ((S_TDS[1:-1] - c_TDS[1:-1])*caudales[1:-1, 1])
adv_EC = -((ki[2:] * v[2:] * c_EC[2:] - ki[1:-1] * v[1:-1] * c_EC[1:-1]) * (dt / dx) +
(kr[1:-1] * v[1:-1] * c_EC[1:-1] - kr[0:-2] * v[0:-2] * c_EC[0:-2]) * (dt / dx))
dif_EC = 0.5 * (d[2:] * c_EC[2:] - 2 * d[1:-1] * c_EC[1:-1] + d[0:-2] * c_EC[0:-2]) * (dt / dx ** 2)
reac_EC = (-kEC * c_EC[0:-2] * (teta_EC ** (c_T[0:-2] - 293.15))) * dt
cout_EC[1:-1] = c_EC[1:-1] + adv_EC + dif_EC + reac_EC + ((S_EC[1:-1] - c_EC[1:-1])*caudales[1:-1, 1])
adv_TC = -((ki[2:] * v[2:] * c_TC[2:] - ki[1:-1] * v[1:-1] * c_TC[1:-1]) * (dt / dx) +
(kr[1:-1] * v[1:-1] * c_TC[1:-1] - kr[0:-2] * v[0:-2] * c_TC[0:-2]) * (dt / dx))
dif_TC = 0.5 * (d[2:] * c_TC[2:] - 2 * d[1:-1] * c_TC[1:-1] + d[0:-2] * c_TC[0:-2]) * (dt / dx ** 2)
reac_TC = (-kTC * c_TC[0:-2] * (teta_TC ** (c_T[0:-2] - 293.15))) * dt
cout_TC[1:-1] = c_TC[1:-1] + adv_TC + dif_TC + reac_TC + ((S_TC[1:-1] - c_TC[1:-1])*caudales[1:-1, 1])
adv_GyA = -((ki[2:] * v[2:] * c_GyA[2:] - ki[1:-1] * v[1:-1] * c_GyA[1:-1]) * (dt / dx) +
(kr[1:-1] * v[1:-1] * c_GyA[1:-1] - kr[0:-2] * v[0:-2] * c_GyA[0:-2]) * (dt / dx))
dif_GyA = 0.5 * (d[2:] * c_GyA[2:] - 2 * d[1:-1] * c_GyA[1:-1] + d[0:-2] * c_GyA[0:-2]) * (dt / dx ** 2)
reac_GyA = (Jdbw / D + qtex / D - (kN + kH * c_pH[0:-2] - kOH * (Kw / c_pH[0:-2])) * fdw * c_GyA[0:-2] - kf * c_GyA[0:-2] - kb * c_GyA[
0:-2] - kv * ((Cg / (Henry / (R * c_T[0:-2]))) - fdw * c_GyA[0:-2])) * dt/(10*tfactor)
cout_GyA[1:-1] = c_GyA[1:-1] + adv_GyA + dif_GyA + reac_GyA + ((S_GyA[1:-1] - c_GyA[1:-1])*caudales[1:-1, 1])
adv_Porg = -((ki[2:] * v[2:] * c_Porg[2:] - ki[1:-1] * v[1:-1] * c_Porg[1:-1]) * (dt / dx) +
(kr[1:-1] * v[1:-1] * c_Porg[1:-1] - kr[0:-2] * v[0:-2] * c_Porg[0:-2]) * (dt / dx))
dif_Porg = 0.5 * (d[2:] * c_Porg[2:] - 2 * d[1:-1] * c_Porg[1:-1] + d[0:-2] * c_Porg[0:-2]) * (dt / dx ** 2)
reac_Porg = (alfa_2 * resp * A - kPorg * c_Porg[0:-2] - kPsed * c_Porg[0:-2]) * dt
cout_Porg[1:-1] = c_Porg[1:-1] + adv_Porg + dif_Porg + reac_Porg + ((S_Porg[1:-1] - c_Porg[1:-1])*caudales[1:-1, 1])
adv_Pdis = -((ki[2:] * v[2:] * c_Pdis[2:] - ki[1:-1] * v[1:-1] * c_Pdis[1:-1]) * (dt / dx) +
(kr[1:-1] * v[1:-1] * c_Pdis[1:-1] - kr[0:-2] * v[0:-2] * c_Pdis[0:-2]) * (dt / dx))
dif_Pdis = 0.5 * (d[2:] * c_Pdis[2:] - 2 * d[1:-1] * c_Pdis[1:-1] + d[0:-2] * c_Pdis[0:-2]) * (dt / dx ** 2)
reac_Pdis = (kPorg * c_Porg[1:-1] + kPsed / D - sigma2 * miu * A) * dt
cout_Pdis[1:-1] = c_Pdis[1:-1] + adv_Pdis + dif_Pdis + reac_Pdis + ((S_Pdis[1:-1] - c_Pdis[1:-1])*caudales[1:-1, 1])
adv_TSS = -((ki[2:] * v[2:] * c_TSS[2:] - ki[1:-1] * v[1:-1] * c_TSS[1:-1]) * (dt / dx) +
(kr[1:-1] * v[1:-1] * c_TSS[1:-1] - kr[0:-2] * v[0:-2] * c_TSS[0:-2]) * (dt / dx))
dif_TSS = 0.5 * (d[2:] * c_TSS[2:] - 2 * d[1:-1] * c_TSS[1:-1] + d[0:-2] * c_TSS[0:-2]) * (dt / dx ** 2)
reac_TSS = qtex * (-Ws * c_TSS[0:-2] / D + Rs / D + Rp / D) * dt
cout_TSS[1:-1] = c_TSS[1:-1] + adv_TSS + dif_TSS + reac_TSS + ((S_TSS[1:-1] - c_TSS[1:-1])*caudales[1:-1, 1])
adv_SS = -((ki[2:] * v[2:] * c_SS[2:] - ki[1:-1] * v[1:-1] * c_SS[1:-1]) * (dt / dx) +
(kr[1:-1] * v[1:-1] * c_SS[1:-1] - kr[0:-2] * v[0:-2] * c_SS[0:-2]) * (dt / dx))
dif_SS = 0.5 * (d[2:] * c_SS[2:] - 2 * d[1:-1] * c_SS[1:-1] + d[0:-2] * c_SS[0:-2]) * (dt / dx ** 2)
reac_SS = qtex * (-Ws * c_SS[0:-2] / D + Rs / D + Rp / D) * dt
cout_SS[1:-1] = c_SS[1:-1] + adv_SS + dif_SS + reac_SS + ((S_SS[1:-1] - c_SS[1:-1])*caudales[1:-1, 1])
adv_ALK = -((ki[2:] * v[2:] * c_ALK[2:] - ki[1:-1] * v[1:-1] * c_ALK[1:-1]) * (dt / dx) + (
kr[1:-1] * v[1:-1] * c_ALK[1:-1] - kr[0:-2] * v[0:-2] * c_ALK[0:-2]) * (dt / dx))
dif_ALK = 0.5 * (d[2:] * c_ALK[2:] - 2 * d[1:-1] * c_ALK[1:-1] + d[0:-2] * c_ALK[0:-2]) * (dt / dx ** 2)
reac_ALK = Wrp + Vv * As * (CO2S - ((c_pH[0:-2]) * (c_pH[0:-2]) / (((c_pH[0:-2]) * (c_pH[0:-2])) + K1 * (c_pH[0:-2]) + K1 * K2)) * c_ALK[0:-2])
cout_ALK[1:-1] = c_ALK[1:-1] + adv_ALK + dif_ALK + reac_ALK + ((S_ALK[1:-1])*caudales[1:-1, 1])
adv_pH = -((ki[2:] * v[2:] * c_pH[2:] - ki[1:-1] * v[1:-1] * c_pH[1:-1]) * (dt / dx) +
(kr[1:-1] * v[1:-1] * c_pH[1:-1] - kr[0:-2] * v[0:-2] * c_pH[0:-2]) * (dt / dx))
dif_pH = 0.5 * (d[2:] * c_pH[2:] - 2 * d[1:-1] * c_pH[1:-1] + d[0:-2] * c_pH[0:-2]) * (dt / dx ** 2)
reac_pH = ((Kw / (FrH * (c_ALK[0:-2])) ** 0.5))
cout_pH[1:-1] = c_pH[1:-1] + adv_pH + dif_pH + reac_pH + ((S_pH[1:-1] - c_pH[1:-1])*caudales[1:-1, 1])
c_T = cout_T
c_OD = cout_OD
c_DBO = cout_DBO
c_NH3 = cout_NH3
c_NO2 = cout_NO2
c_NO3 = cout_NO3
c_DQO = cout_DQO
c_TDS = cout_TDS
c_EC = cout_EC
c_TC = cout_TC
c_GyA = cout_GyA
c_Porg = cout_Porg
c_Pdis = cout_Pdis
c_SS = cout_SS
c_pH = cout_pH
c_ALK = cout_ALK
cout_T[-1] = cout_T[-2]
cout_OD[-1] = cout_OD[-2]
cout_DBO[-1] = cout_DBO[-2]
cout_NH3[-1] = cout_NH3[-2]
cout_OD[-1] = cout_OD[-2]
cout_NO2[-1] = cout_NO2[-2]
cout_NO3[-1] = cout_NO3[-2]
cout_DQO[-1] = cout_DQO[-2]
cout_TDS[-1] = cout_TDS[-2]
cout_EC[-1] = cout_EC[-2]
cout_TC[-1] = cout_TC[-2]
cout_GyA[-1] = cout_GyA[-2]
cout_Porg[-1] = cout_Porg[-2]
cout_Pdis[-1] = cout_Pdis[-2]
cout_SS[-1] = cout_SS[-2]
cout_pH[-1] = cout_pH[-2]
cout_ALK[-1] = cout_ALK[-2]
return c_T, c_OD, c_DBO, c_NH3, c_NO2, c_NO3, c_DQO, c_TDS, c_EC, c_TC, c_GyA, c_Porg, c_Pdis, c_TSS, c_SS, c_pH, c_ALK, dt
def run(arhivo_entrada, tiempo, directorio_salida, variables, show, export):
# Numero de pasos en el tiempo a ejecutar
nt = tiempo
ct = (np.arange(1, nt))
# Reading input data fron Excel file
# xls_config = "Rio_Los_Ranchos_prueba_00.xlsx"
hmed, slope, vel, b_c, i_c, ST, SOD, SDBO, SNH3, SNO2, SNO3, STDS, SGyA, SDQO, SPorg, SPdis, SEC, STC, STSS, SSS, SpH, SALK, Caudales = read_config_file(arhivo_entrada)
#Discretizacion en el espacio
dx = hmed[1, 0] - hmed[0, 0]
# velocidad del agua en cada punto de monitoreo
va = vel[:, 1]
# coeficiente de difusión en cada punto de monitoreo
Diff = variables['Diff']
cd = np.zeros(len(va)) + Diff
v = np.zeros(len(va)) + np.mean(vel)
D = np.mean(hmed)
# Condiciones de Frontera
# TEMPERATURA
b_c_T = b_c[:, 14]
b_c_T = b_c_T + 273.15
# OXIGENO DISUELTO
b_c_OD = b_c[:, 1]
# DBO
b_c_DBO = b_c[:, 2]
# Amonio
b_c_NH3 = b_c[:, 3]
# Nitritos
b_c_NO2 = b_c[:, 4]
# Nitratos
b_c_NO3 = b_c[:, 5]
# DQO
b_c_DQO = b_c[:, 9]
# TDS
b_c_TDS = b_c[:, 6]
kcondt = 1.92
# EC
b_c_EC = b_c[:, 12]
# TC
b_c_TC = b_c[:, 13]
# GyA
b_c_GyA = b_c[:, 7]
# P organico
b_c_Porg = b_c[:, 10]
# P disuelto
b_c_Pdis = b_c[:, 11]
# Solidos suspendidos
b_c_TSS = b_c[:, 15]
# Solidos sedimentables
b_c_SS = b_c[:, 16]
# pH
b_c_pH = b_c[:, 17]
b_c_pH = 10 ** (-1 * (b_c_pH))
# Alkalinidad
b_c_ALK = b_c[:, 18]
# Condiciones Iniciales
# TEMPERATURA
i_c_T = i_c[:, 14]
i_c_T = i_c_T + 273.15
# OXIGENO DISUELTO
i_c_OD = i_c[:, 1]
# DBO
i_c_DBO = i_c[:, 2]
# Amonio
i_c_NH3 = i_c[:, 3]
# Nitritos
i_c_NO2 = i_c[:, 4]
# Nitratos
i_c_NO3 = i_c[:, 5]
# DQO
i_c_DQO = i_c[:, 9]
# TDS
i_c_TDS = i_c[:, 6]
# EC
i_c_EC = i_c[:, 12]
# TC
i_c_TC = i_c[:, 13]
# GyA
i_c_GyA = i_c[:, 7]
# P organico
i_c_Porg = i_c[:, 10]
# P disuelto
i_c_Pdis = i_c[:, 11]
# Solidos suspendidos
i_c_TSS = i_c[:, 15]
# Solidos sedimentables
i_c_SS = i_c[:, 16]
# pH
i_c_pH = i_c[:, 17]
i_c_pH = 10 ** (-1 * (i_c_pH))
# Alkalinidad
i_c_ALK = i_c[:, 18]
mconT = np.empty((nt, np.size(i_c_T, axis=0)))
mconT[0, :] = i_c_T
mconOD = np.empty((nt, np.size(i_c_OD, axis=0)))
mconOD[0, :] = i_c_OD
mconDBO = np.empty((nt, np.size(i_c_DBO, axis=0)))
mconDBO[0, :] = i_c_DBO
mconNH3 = np.empty((nt, np.size(i_c_NH3, axis=0)))
mconNH3[0, :] = i_c_NH3
mconNO2 = np.empty((nt, np.size(i_c_NO2, axis=0)))
mconNO2[0, :] = i_c_NO2
mconNO3 = np.empty((nt, np.size(i_c_NO3, axis=0)))
mconNO3[0, :] = i_c_NO3
mconDQO = np.empty((nt, np.size(i_c_DQO, axis=0)))
mconDQO[0, :] = i_c_DQO
mconTDS = np.empty((nt, np.size(i_c_TDS, axis=0)))
mconTDS[0, :] = i_c_TDS
mconEC = np.empty((nt, np.size(i_c_EC, axis=0)))
mconEC[0, :] = i_c_EC
mconTC = np.empty((nt, np.size(i_c_TC, axis=0)))
mconTC[0, :] = i_c_TC
mconGyA = np.empty((nt, np.size(i_c_GyA, axis=0)))
mconGyA[0, :] = i_c_GyA
mconPorg = np.empty((nt, np.size(i_c_Porg, axis=0)))
mconPorg[0, :] = i_c_Porg
mconPdis = np.empty((nt, np.size(i_c_Pdis, axis=0)))
mconPdis[0, :] = i_c_Pdis
mconTSS = np.empty((nt, np.size(i_c_TSS, axis=0)))
mconTSS[0, :] = i_c_TSS
mconSS = np.empty((nt, np.size(i_c_SS, axis=0)))
mconSS[0, :] = i_c_SS
mconpH = np.empty((nt, np.size(i_c_pH, axis=0)))
mconpH[0, :] = i_c_pH
mconALK = np.empty((nt, np.size(i_c_ALK, axis=0)))
mconALK[0, :] = i_c_ALK
ST = ST[:, 1:]
SOD = SOD[:, 1:]
SDBO = SDBO[:, 1:]
SNH3 = SNH3[:, 1:]
SNO2 = SNO2[:, 1:]
SNO3 = SNO3[:, 1:]
STDS = STDS[:, 1:]
SGyA = SGyA[:, 1:]
SDQO = SDQO[:, 1:]
SPorg = SPorg[:, 1:]
SPdis = SPdis[:, 1:]
SEC = SEC[:, 1:]
STC = STC[:, 1:]
STSS = STSS[:, 1:]
SSS = SSS[:, 1:]
SpH = SpH[:, 1:]
SALK = SALK[:, 1:]
for i in range(1, nt):
muestra = int(i / 3600)
i_c_T[0] = b_c_T[muestra]
i_c_OD[0] = b_c_OD[muestra]
i_c_DBO[0] = b_c_DBO[muestra]
i_c_NH3[0] = b_c_NH3[muestra]
i_c_NO2[0] = b_c_NO2[muestra]
i_c_NO3[0] = b_c_NO3[muestra]
i_c_DQO[0] = b_c_DQO[muestra]
i_c_TDS[0] = b_c_TDS[muestra]
i_c_EC[0] = b_c_EC[muestra]
i_c_TC[0] = b_c_TC[muestra]
i_c_GyA[0] = b_c_GyA[muestra]
i_c_Porg[0] = b_c_Porg[muestra]
i_c_Pdis[0] = b_c_Pdis[muestra]
i_c_TSS[0] = b_c_TSS[muestra]
i_c_SS[0] = b_c_SS[muestra]
i_c_pH[0] = b_c_pH[muestra]
i_c_ALK[0] = b_c_ALK[muestra]
S_T = ST[:, muestra]
S_OD = SOD[:, muestra]
S_DBO = SDBO[:, muestra]
S_NH3 = SNH3[:, muestra]
S_NO2 = SNO2[:, muestra]
S_NO3 = SNO3[:, muestra]
S_TDS = STDS[:, muestra]
S_GyA = SGyA[:, muestra]
S_DQO = SDQO[:, muestra]
S_Porg = SPorg[:, muestra]
S_Pdis = SPdis[:, muestra]
S_EC = SEC[:, muestra]
S_TC = STC[:, muestra]
S_TSS = STSS[:, muestra]
S_SS = SSS[:, muestra]
S_pH = SpH[:, muestra]
S_ALK = SALK[:, muestra]
# Evolución de la concentración para t + dt
T, OD, DBO, NH3, NO2, NO3, DQO, TDS, EC, TC, GyA, Porg, Pdis, TSS, SS, pH, ALK, paso_t = calidad_explicito(D, dx, i_c_T,
i_c_OD, i_c_DBO, i_c_NH3, i_c_NO2, i_c_NO3, i_c_DQO, i_c_TDS, i_c_EC, i_c_TC, i_c_GyA,i_c_Porg, i_c_Pdis, i_c_TSS, i_c_SS,
i_c_pH, i_c_ALK, v, cd, S_T, S_OD, S_DBO, S_NH3, S_NO2, S_NO3, S_DQO, S_TDS, S_EC, S_TC, S_GyA, S_Porg, S_Pdis, S_TSS, S_SS,
S_pH, S_ALK, Caudales, variables)
# Se guardan las concentraciones del momento t+dt
mconT[i, :] = T
mconOD[i, :] = OD
mconDBO[i, :] = DBO
mconNH3[i, :] = NH3
mconNO2[i, :] = NO2
mconNO3[i, :] = NO3
mconDQO[i, :] = DQO
mconTDS[i, :] = TDS
mconEC[i, :] = EC
mconTC[i, :] = TC
mconGyA[i, :] = GyA
mconPorg[i, :] = Porg
mconPdis[i, :] = Pdis
mconTSS[i, :] = TSS
mconSS[i, :] = SS
mconpH[i, :] = pH
mconALK[i, :] = ALK
# Actualizar condición inicial
i_c_T = T
i_c_OD = OD
i_c_DBO = DBO
i_c_NH3 = NH3
i_c_NO2 = NO2
i_c_NO3 = NO3
i_c_DQO = DQO
i_c_TDS = TDS
i_c_EC = EC
i_c_TC = TC
i_c_GyA = GyA
i_c_Porg = Porg
i_c_Pdis = Pdis
i_c_TSS = TSS
i_c_SS = SS
i_c_pH = pH
i_c_ALK = ALK
paso_de_tiempo = paso_t
mconConduct = kcondt * mconTDS
mconT = mconT - 273.15
mconpH = (np.log10(mconpH))*(-1)
pH = (np.log10(pH))*(-1)
print "Guardando datos de salida..."
book = xlwt.Workbook()
save_sheet(book, 'T', mconT[0::3600, :])
save_sheet(book, 'OD', mconOD[0::3600, :])
save_sheet(book, 'DBO', mconDBO[0::3600, :])
save_sheet(book, 'NH3', mconNH3[0::3600, :])
save_sheet(book, 'NO2', mconNO2[0::3600, :])
save_sheet(book, 'NO3', mconNO3[0::3600, :])
save_sheet(book, 'DQO', mconDQO[0::3600, :])
save_sheet(book, 'TDS', mconTDS[0::3600, :])
save_sheet(book, 'EC', mconEC[0::3600, :])
save_sheet(book, 'TC', mconTC[0::3600, :])
save_sheet(book, 'GyA', mconGyA[0::3600, :])
save_sheet(book, 'Conduct', mconConduct[0::3600, :])
save_sheet(book, 'Porg', mconPorg[0::3600, :])
save_sheet(book, 'Pdis', mconPdis[0::3600, :])
save_sheet(book, 'TSS', mconTSS[0::3600, :])
save_sheet(book, 'SS', mconSS[0::3600, :])
save_sheet(book, 'pH', mconpH[0::3600, :])
save_sheet(book, 'ALK', mconALK[0::3600, :])
used_vars(book, variables)
book.save(join(directorio_salida, "Resultados.xls"))
if show:
print u"Creando Graficas"
#Graficas en el tiempo
xlabel = 'Tiempo(s)'
ylabel = 'Concentracion (mg/L)'
x_data = ct[1::3600]
fig, ax = plt.subplots(5, 3, sharex=True)
fig.add_subplot("111", frameon=False)
fig.canvas.set_window_title('Graficas de Tiempo.')
plot(ax[0,0], 'Evalucion T en punto final', [x_data, mconT[1::3600, -1]])
plot(ax[0,1], 'Evalucion OD en punto final', [x_data, mconOD[1::3600, -1]])
plot(ax[0,2], 'Evalucion DBO en punto final', [x_data, mconDBO[1::3600, -1]])
plot(ax[1,0], 'Evalucion NH3 en punto final', [x_data, mconNH3[1::3600, -1]])
plot(ax[1,1], 'Evalucion NO2 en punto final', [x_data, mconNO2[1::3600, -1]])
plot(ax[1,2], 'Evalucion NO3 en punto final', [x_data, mconNO3[1::3600, -1]])
plot(ax[2,0], 'Evalucion DQO en punto final', [x_data, mconDQO[1::3600, -1]])
plot(ax[2,1], 'Evalucion TDS en punto final', [x_data, mconTDS[1::3600, -1]])
plot(ax[2,2], 'Evalucion EC en punto final', [x_data, mconEC[1::3600, -1]])
plot(ax[3,0], 'Evalucion TC en punto final', [x_data, mconTC[1::3600, -1]])
plot(ax[3,1], 'Evalucion Grasas y Aceites en punto final', [x_data, mconGyA[1::3600, -1]])
plot(ax[3,2], 'Evalucion P org en punto final', [x_data, mconPorg[1::3600, -1]])
plot(ax[4,0], 'Evalucion P disuelto en punto final', [x_data, mconPdis[1::3600, -1]])
plot(ax[4,1], 'Evalucion del pH en punto final', [x_data, mconpH[1::3600, -1]])
plot(ax[4,2], 'Evalucion Alcanilidad en punto final', [x_data, mconALK[1::3600, -1]])
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.subplots_adjust(hspace=0.5)
plt.grid(False)
plt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
plt.xlabel(xlabel, fontsize=15)
plt.ylabel(ylabel, fontsize=15)
fig.show()
#Graficas en el espacio
xlabel = 'Distancia(m)'
ylabel = 'Concentracion (mg/L)'
c_x = hmed[:, 0]
fig2, ax2 = plt.subplots(5, 3, sharex=True)
fig2.add_subplot(111, frameon=False)
fig2.canvas.set_window_title('Graficas de espacio.')
plot(ax2[0,0], 'Evalucion T en el espacio', [c_x, T])
plot(ax2[0,1], 'Evalucion OD en el espacio', [c_x, OD])
plot(ax2[0,2], 'Evalucion DBO en el espacio', [c_x, DBO])
plot(ax2[1,0], 'Evalucion NH3 en el espacio', [c_x, NH3])
plot(ax2[1,1], 'Evalucion NO2 en el espacio', [c_x, NO2])
plot(ax2[1,2], 'Evalucion NO3 en el espacio', [c_x, NO3])
plot(ax2[2,0], 'Evalucion DQO en el espacio', [c_x, DQO])
plot(ax2[2,1], 'Evalucion TDS en el espacio', [c_x, TDS])
plot(ax2[2,2], 'Evalucion EC en el espacio', [c_x, EC])
plot(ax2[3,0], 'Evalucion TC en el espacio', [c_x, TC])
plot(ax2[3,1], 'Evalucion Grasas y Aceites en el espacio', [c_x, GyA])
plot(ax2[3,2], 'Evalucion P organico en el espacio', [c_x, Porg])
plot(ax2[4,0], 'Evalucion P disuelto en el espacio', [c_x, Pdis])
plot(ax2[4,1], 'Evalucion pH en espacio', [c_x, pH])
plot(ax2[4,2], 'Evalucion Alcanilidad en espacio', [c_x, ALK])
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.subplots_adjust(hspace=0.5)
plt.grid(False)
plt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0), useOffset=True)
plt.xlabel(xlabel, fontsize=15)
plt.ylabel(ylabel, fontsize=15)
fig2.show()
if export:
print u"Guardando Graficas..."
xlabel = 'Tiempo(s)'
ylabel = 'Concentracion (mg/L)'
x_data = ct[1::3600]
plt.figure('tmp')
# Gráficas de tiempo
save_plot(plt, 'Evalucion T en punto final en tiempo', xlabel, ylabel, [x_data, mconT[1::3600, -1]], directorio_salida)
save_plot(plt, 'Evalucion OD en punto final en tiempo', xlabel, ylabel, [x_data, mconOD[1::3600, -1]], directorio_salida)
save_plot(plt, 'Evalucion DBO en punto final en tiempo', xlabel, ylabel, [x_data, mconDBO[1::3600, -1]], directorio_salida)
save_plot(plt, 'Evalucion NH3 en punto final en tiempo', xlabel, ylabel, [x_data, mconNH3[1::3600, -1]], directorio_salida)
save_plot(plt, 'Evalucion NO2 en punto final en tiempo', xlabel, ylabel, [x_data, mconNO2[1::3600, -1]], directorio_salida)
save_plot(plt, 'Evalucion NO3 en punto final en tiempo', xlabel, ylabel, [x_data, mconNO3[1::3600, -1]], directorio_salida)
save_plot(plt, 'Evalucion DQO en punto final en tiempo', xlabel, ylabel, [x_data, mconDQO[1::3600, -1]], directorio_salida)
save_plot(plt, 'Evalucion TDS en punto final en tiempo', xlabel, ylabel, [x_data, mconTDS[1::3600, -1]], directorio_salida)
save_plot(plt, 'Evalucion EC en punto final en tiempo', xlabel, ylabel, [x_data, mconEC[1::3600, -1]], directorio_salida)
save_plot(plt, 'Evalucion TC en punto final en tiempo', xlabel, ylabel, [x_data, mconTC[1::3600, -1]], directorio_salida)
save_plot(plt, 'Evalucion Grasas y Aceites en punto final en tiempo', xlabel, ylabel, [x_data, mconGyA[1::3600, -1]], directorio_salida)
save_plot(plt, 'Evalucion P org en punto final en tiempo', xlabel, ylabel, [x_data, mconPorg[1::3600, -1]], directorio_salida)
save_plot(plt, 'Evalucion P disuelto en punto final en tiempo', xlabel, ylabel, [x_data, mconPdis[1::3600, -1]], directorio_salida)
save_plot(plt, 'Evalucion del pH en punto final', xlabel, 'pH', [x_data, mconpH[1::3600, -1]], directorio_salida)
save_plot(plt, 'Evalucion Alcanilidad en punto final', xlabel, 'CaCO3/L', [x_data, mconALK[1::3600, -1]], directorio_salida)
# Gráficas de espacio
xlabel = 'Distancia(m)'
ylabel = 'Concentracion (mg/L)'
c_x = hmed[:, 0]
save_plot(plt, 'Evalucion T en el espacio',xlabel, ylabel, [c_x, T], directorio_salida)
save_plot(plt, 'Evalucion OD en el espacio',xlabel, ylabel, [c_x, OD], directorio_salida)
save_plot(plt, 'Evalucion DBO en el espacio',xlabel, ylabel, [c_x, DBO], directorio_salida)
save_plot(plt, 'Evalucion NH3 en el espacio',xlabel, ylabel, [c_x, NH3], directorio_salida)
save_plot(plt, 'Evalucion NO2 en el espacio',xlabel, ylabel, [c_x, NO2], directorio_salida)
save_plot(plt, 'Evalucion NO3 en el espacio',xlabel, ylabel, [c_x, NO3], directorio_salida)
save_plot(plt, 'Evalucion DQO en el espacio',xlabel, ylabel, [c_x, DQO], directorio_salida)
save_plot(plt, 'Evalucion TDS en el espacio',xlabel, ylabel, [c_x, TDS], directorio_salida)
save_plot(plt, 'Evalucion EC en el espacio',xlabel, ylabel, [c_x, EC], directorio_salida)
save_plot(plt, 'Evalucion TC en el espacio',xlabel, ylabel, [c_x, TC], directorio_salida)
save_plot(plt, 'Evalucion Grasas y Aceites en el espacio',xlabel, ylabel, [c_x, GyA], directorio_salida)
save_plot(plt, 'Evalucion P organico en el espacio',xlabel, ylabel, [c_x, Porg], directorio_salida)
save_plot(plt, 'Evalucion P disuelto en el espacio',xlabel, ylabel, [c_x, Pdis], directorio_salida)
save_plot(plt, 'Evalucion del pH en espacio',xlabel, 'pH', [c_x, pH], directorio_salida)
save_plot(plt, 'Evalucion Alcanilidad en espacio',xlabel, 'CaCO3', [c_x, ALK], directorio_salida)
print "El proceso ha finalizado."
if __name__ == '__main__':
pass
# book = xlrd.open_workbook("sample3.xlsx")
# print read_sheet(book, 'WD')
| gpl-3.0 |
linsalrob/EdwardsLab | covid19/nCoV-Viz.py | 1 | 24082 | #!/usr/bin/python
# Download WHO geographic distribution of COVID-19 cases worldwide
# Source : European Centre for Disease Prevention and Control
# Plot cases and deaths for selected countries
# The downloaded spreadsheet is stored locally in Covid-19.csv
# To use cached local spreadsheet, use "-l" option
# Intermediate data for cases/deaths and also for each country are stored in relevant .csv files
# All plots can be aligned to :
# First date of detection or death, in that country (default)
# First date of detection in China, 2019-12-31 (-n)
# Data can be plotted as daily values (default) cumulative values (-c)
# Countries to plot and line colours are specified in the appropriate tables at the top of this file
# Dependencies : pandas, matplotlib, numpy, google-auth-httplib2, beautifulsoup4, xlrd
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import urllib.request
from bs4 import BeautifulSoup
topLevelPage = "https://www.ecdc.europa.eu/en/publications-data/download-todays-data-geographic-distribution-covid-19-cases-worldwide"
localFileName = "Covid-19.csv"
countries = ["China","Germany","Italy","United_Kingdom","United_States_of_America", "Australia", "Sweden", "Brazil"]
colours = ["red", "black", "green","blue", "orange", "pink", "grey", "violet"]
# countries = ["China","Germany","Italy","United_Kingdom","United_States_of_America", "Australia"]
# colours = ["red", "black", "green","blue", "orange", "pink"]
country_single = ["United_Kingdom"] # Default value, can be overwritten
# Extract cases and deaths and align day 0 to first date of detection or death
def extractCountries(covidData, country, dates, noAlignFlag):
print("Country: " + country)
countryData = pd.DataFrame(index = dates) # Create dataframe for country data
# Extract the data for the required country
# We need to copy it to the new countryData array so that dates are pre-pended back to 2019-12-31
countryData_tmp = covidData[covidData["countriesAndTerritories"].str.match(country)]
countryData_tmp = countryData_tmp.iloc[::-1] # Invert table - top to bottom
countryData[list(countryData_tmp.columns.values)] = countryData_tmp[list(countryData_tmp.columns.values)]
countryData=countryData.fillna(0) # Replace NaN with 0
# countryFileName = country + '.csv'
# # countryData.to_csv (countryFileName, index = False, header=True)
# countryData.to_csv (countryFileName, index = True, header=True)
# Fill columns : countriesAndTerritories geoId countryterritoryCode popData2019
countryData['countriesAndTerritories'] = countryData['countriesAndTerritories'].iloc[-1]
countryData['geoId'] = countryData['geoId'].iloc[-1]
countryData['countryterritoryCode'] = countryData['countryterritoryCode'].iloc[-1]
countryData['popData2019'] = countryData['popData2019'].iloc[-1]
countryData=countryData.fillna(0) # Replace NaN with 0
# Create cumulative cases column and cumulative deaths column - Rename column titles
countryDataCS = countryData.cumsum(axis = 0)
countryDataCS = countryDataCS.rename(columns={"cases": "casesCumulative", "deaths": "deathsCumulative"})
countryData['casesCumulative'] = countryDataCS['casesCumulative'] # Copy cumulative columns to countryData
countryData['deathsCumulative'] = countryDataCS['deathsCumulative']
countryData['casesMA'] = countryData['cases'].rolling(7).mean() # Calculate moving averages
countryData['deathsMA'] = countryData['deaths'].rolling(7).mean()
countryData['casesCumulativeMA'] = countryData['casesCumulative'].rolling(7).mean()
countryData['deathsCumulativeMA'] = countryData['deathsCumulative'].rolling(7).mean()
# Calculate fatality rates and clip to 100%
countryData['fatalityPercentage'] = countryData['deaths'] * 100./countryData['cases']
countryData['fatalityPercentage'] = countryData['fatalityPercentage'].where(countryData['fatalityPercentage'] <= 100., 100.)
countryData.loc[countryData.cases == 0, "fatalityPercentage"] = 0 # When cases 0= 0 set percentage to 0
countryData['fatalityPercentageCumulative'] = countryData['deathsCumulative'] * 100./countryData['casesCumulative']
countryData['fatalityPercentageCumulative'] = countryData['fatalityPercentageCumulative'].where(countryData['fatalityPercentageCumulative'] <= 100., 100.)
countryData.loc[countryData.casesCumulative == 0, "fatalityPercentageCumulative"] = 0 # When cases 0= 0 set percentage to 0
countryData['fatalityPercentageMA'] = countryData['fatalityPercentage'].rolling(7).mean()
countryData['fatalityPercentageCumulativeMA'] = countryData['fatalityPercentageCumulative'].rolling(7).mean()
outputFileName = country + ".csv"
countryData.to_csv(outputFileName, index=True)
latestCases=countryData['cases'].iloc[-1] # Print latest cases and deaths count
latestDeaths=countryData['deaths'].iloc[-1]
print('Latest cases : ' + str(latestCases))
print('Latest deaths : ' + str(latestDeaths))
print("Latest fatality rate : %.2f %%" % ((latestDeaths*100.)/latestCases))
dc = countryData.index[countryData['cases'] != 0].tolist() # Print first data of cases
print("First Case : " + str(dc[0]).replace(' 00:00:00',''))
dd = countryData.index[countryData['deaths'] != 0].tolist() # Print first data of deaths
print("First Death : " + str(dd[0]).replace(' 00:00:00',''))
totalCases=countryData['casesCumulative'].iloc[-1]
totalDeaths=countryData['deathsCumulative'].iloc[-1]
fatalityRate=totalDeaths*100./totalCases
# population=int(countryData['popData2019'].iloc[0])
population=countryData['popData2019'].iloc[0]
print("Total number of cases : " + str(totalCases))
print("Total number of deaths : " + str(totalDeaths))
print("Total number of cases (Per 1 million pop.) : %.2f" % (totalCases / (population/1000000.)))
print("Total number of deaths (Per 1 million pop.): %.2f" % (totalDeaths / (population/1000000.)))
print("Overall Fatality rate : %.2f %%" % (fatalityRate))
print("Population (2019) : %.2f (Million)" % (population / 1000000.))
print('')
# If we are not aligning first case or death then just return the data
if noAlignFlag == True:
return country, population, countryData, countryData;
# Align to first case or death by removing leading zeros and resetting index
# Get names of indexes for which column casesCumulative has value 0
else:
indexNames = countryData[ countryData['casesCumulative'] == 0 ].index # Remove leading zeros from cases
extractedCases = countryData.drop(indexNames)
extractedCases = extractedCases.reset_index()
indexNames = countryData[ countryData['deathsCumulative'] == 0 ].index # Remove leading zeros from deaths
extractedDeaths = countryData.drop(indexNames)
extractedDeaths = extractedDeaths.reset_index()
return country, population, extractedCases, extractedDeaths;
# main
def main(useCachedFileFlag, cumulativeResultsFlag, noAlignFlag, noPlotFlag, fileSavePlotFlag, popNormalizeFlag):
global countries
cachedFilePresentFlag = False
try:
f = open(localFileName)
cachedFilePresentFlag = True
f.close()
except IOError:
cachedFilePresentFlag = False
# If cached file not present or we have requested to refresh then get the file
if (cachedFilePresentFlag == False) or (useCachedFileFlag == False):
cachedFilePresentFlag = False
resp = urllib.request.urlopen(topLevelPage)
soup = BeautifulSoup(resp, "html.parser", from_encoding=resp.info().get_param('charset'))
for link in soup.find_all('a', href=True):
# print(link['href'])
if ("xlsx" in link['href']): # If data in .xlsx format then retrieve and store as local .csv format
xlsxfileurl = link['href']
try:
xlsx_tmp = pd.read_excel(xlsxfileurl, index_col=0)
xlsx_tmp.to_csv(localFileName, index=True)
cachedFilePresentFlag = True
print("Cached spreadheet updated (xlsx)")
except:
cachedFilePresentFlag = False
print("Spreadheet failed to download (xlsx)")
break
if (cachedFilePresentFlag == False): # If data NOT in .xlsx format then retrieve and store .csv format
for link in soup.find_all(class_="btn btn-primary", href=True):
if ("csv" in link['href']):
csvfileurl = link['href']
try:
urllib.request.urlretrieve(csvfileurl, localFileName)
cachedFilePresentFlag = True
print("Cached spreadheet updated (csv)")
except:
cachedFilePresentFlag = False
print("Spreadheet failed to download (csv)")
break
if (cachedFilePresentFlag == False):
print("No spreadsheet found at the URL, use \"-l\" to use local cached file")
exit(0)
numberOfCountries = len(countries)
extractedCountry = {} # Create empty dictionaries to store result data frames for each country
extractedPopulation = {}
extractedCases = {}
extractedDeaths = {}
if (cachedFilePresentFlag == True):
covidData = pd.read_csv(localFileName, index_col=0, encoding="utf-8-sig")
# Spreadsheet columns :
# dateRep day month year cases deaths countriesAndTerritories geoId countryterritoryCode popData2019
covidData=covidData.fillna(0) # Replace NaN with 0
clen = 0 # For longest sequency
dlen = 0
# Extract Chinese dates to create index - this allows for countries that do not have full data supplied
dates_tmp = covidData[covidData["countriesAndTerritories"].str.match("China")]
dates_tmp = dates_tmp.iloc[::-1] # Invert table - top to bottom
dates_tmp=dates_tmp.reset_index()
dates=list(dates_tmp['dateRep'])
countryIndex = 0
for country in countries:
# Extract the data for each country
# Data can be aligned on 2019-12-29 or first instance
extractedCountry[countryIndex], extractedPopulation[countryIndex], extractedCases[countryIndex], extractedDeaths[countryIndex] = \
extractCountries(covidData, country, dates, noAlignFlag)
# print(extractedCases)
# print(extractedDeaths)
if (popNormalizeFlag == True):
extractedCases[countryIndex]['cases'] = extractedCases[countryIndex]['cases'] * (1000000.0 / extractedPopulation[countryIndex])
extractedCases[countryIndex]['deaths'] = extractedCases[countryIndex]['deaths'] * (1000000.0 / extractedPopulation[countryIndex])
extractedCases[countryIndex]['casesCumulative'] = extractedCases[countryIndex]['casesCumulative'] * (1000000.0 / extractedPopulation[countryIndex])
extractedCases[countryIndex]['deathsCumulative'] = extractedCases[countryIndex]['deathsCumulative'] * (1000000.0 / extractedPopulation[countryIndex])
extractedCases[countryIndex]['casesMA'] = extractedCases[countryIndex]['casesMA'] * (1000000.0 / extractedPopulation[countryIndex])
extractedCases[countryIndex]['deathsMA'] = extractedCases[countryIndex]['deathsMA'] * (1000000.0 / extractedPopulation[countryIndex])
extractedDeaths[countryIndex]['casesCumulativeMA'] = extractedDeaths[countryIndex]['casesCumulativeMA'] * (1000000.0 / extractedPopulation[countryIndex])
extractedDeaths[countryIndex]['deathsCumulativeMA'] = extractedDeaths[countryIndex]['deathsCumulativeMA'] * (1000000.0 / extractedPopulation[countryIndex])
clen = np.maximum(clen, extractedCases[countryIndex].shape[0])
dlen = np.maximum(dlen, extractedDeaths[countryIndex].shape[0])
countryIndex = countryIndex+1
lastDate = str(covidData.first_valid_index()) # Get last date in combinedCases
lastDate = lastDate.replace(' 00:00:00','')
if len(countries) == 1: # Single country - Cases And Deaths
# Select daily or cumulative results
if (cumulativeResultsFlag == True):
casesType = 'casesCumulative'
deathsType = 'deathsCumulative'
casesMAType = 'casesCumulativeMA'
deathsMAType = 'deathsCumulativeMA'
percentageType = 'fatalityPercentageCumulative'
percentageMAType = 'fatalityPercentageCumulativeMA'
else:
casesType = 'cases'
deathsType = 'deaths'
casesMAType = 'casesMA'
deathsMAType = 'deathsMA'
percentageType = 'fatalityPercentage'
percentageMAType = 'fatalityPercentageMA'
# Plot titles
titleStr='Covid-19 '
if (cumulativeResultsFlag == True):
titleStr=titleStr + ' Cumulative Cases And Deaths: '
else:
titleStr=titleStr + ' Daily Cases And Deaths: '
ax = plt.gca() # Create plot - get current axis
ax.autoscale(enable=True, tight=True)
# Plot daily cases and deaths AND moving average
extractedCases[0].plot(kind='line', y=casesType, label='Cases', color='dodgerblue',ax=ax)
extractedCases[0].plot(kind='line', y=casesMAType, label='Cases - 7 Day Moving Average', color='blue',ax=ax)
extractedDeaths[0].plot(kind='line',y=deathsType, label='Deaths',color='lime',ax=ax)
extractedDeaths[0].plot(kind='line',y=deathsMAType, label='Deaths - 7 Day Moving Average',color='seagreen',ax=ax)
# Plot daily mortality rate
ax2 = plt.gca().twinx()
extractedDeaths[0].plot(kind='line',y=percentageType,label='Mortality Rate (%)',color='orange',linewidth=.75,ax=ax2)
extractedDeaths[0].plot(kind='line',y=percentageMAType,label='Mortality Rate Moving Average (%)',color='red',linewidth=.75,ax=ax2)
ax2.set_ylabel('Mortality Rate (%)', color='red')
ax2.tick_params(axis='y', labelcolor='red')
ax2.set_ylim(0, 40)
ax2.get_legend().remove()
plt.title(extractedCountry[0] + '\n' + titleStr + str(lastDate) + "\nSource: European Centre for Disease Prevention and Control")
ax.set_ylim(bottom=0.0) # Don't plot numbers < zero
if (fileSavePlotFlag == True):
titleStr = titleStr.split(':', 1)[0]
plt.savefig(titleStr.replace(" ", "")+extractedCountry[0]+'.png')
if noPlotFlag == False: # Plot the data
plt.show()
else:
plt.close()
else: # Multiple countries
# Select daily or cumulative results
if (cumulativeResultsFlag == True):
casesType = 'casesCumulative'
deathsType = 'deathsCumulative'
percentageType = 'fatalityPercentageCumulative'
else:
casesType = 'casesMA'
deathsType = 'deathsMA'
percentageType = 'fatalityPercentage'
# Plot titles
titleStr='Covid-19 '
if (noAlignFlag == False):
titleStr=titleStr + 'Aligned '
if (popNormalizeFlag == True):
titleStr=titleStr + 'Pop Noramlized '
if (cumulativeResultsFlag == True):
titleStr=titleStr + 'Cumulative Cases: '
else:
titleStr=titleStr + 'Daily Cases (7 day moving average): '
ax = plt.gca() # Create plot - get current axis
countryIndex = 0
for country in countries:
extractedCases[countryIndex].plot(kind='line',y=casesType,title=titleStr + str(lastDate) + "\nSource: European Centre for Disease Prevention and Control",label=extractedCountry[countryIndex],color=colours[countryIndex],ax=ax)
countryIndex = countryIndex+1
ax.set_ylim(bottom=0.0) # Don't plot numbers < zero
if (fileSavePlotFlag == True):
titleStr = titleStr.split(':', 1)[0]
plt.savefig(titleStr.replace(" ", "")+'.png')
if noPlotFlag == False: # Plot the data
plt.show()
else:
plt.close()
# Plot titles
titleStr='Covid-19 '
if (noAlignFlag == False):
titleStr=titleStr + 'Aligned '
if (popNormalizeFlag == True):
titleStr=titleStr + 'Pop Noramlized '
if (cumulativeResultsFlag == True):
titleStr=titleStr + 'Cumulative Deaths: '
else:
titleStr=titleStr + 'Daily Deaths (7 day moving average): '
ax = plt.gca() # Create plot - get current axis
countryIndex = 0
for country in countries:
extractedDeaths[countryIndex].plot(kind='line',y=deathsType,title=titleStr + str(lastDate) + "\nSource: European Centre for Disease Prevention and Control",label=extractedCountry[countryIndex],color=colours[countryIndex],ax=ax)
countryIndex = countryIndex+1
ax.set_ylim(bottom=0.0) # Don't plot numbers < zero
if (fileSavePlotFlag == True):
titleStr = titleStr.split(':', 1)[0]
plt.savefig(titleStr.replace(" ", "")+'.png')
if noPlotFlag == False: # Plot the data
plt.show()
else:
plt.close()
# Plot titles
titleStr='Covid-19 '
if (noAlignFlag == False):
titleStr=titleStr + 'Aligned '
if (popNormalizeFlag == True):
titleStr=titleStr + 'Pop Noramlized '
if (cumulativeResultsFlag == True):
titleStr=titleStr + 'Cumulative Fatality Percentage: '
else:
titleStr=titleStr + 'Daily Fatality Percentage (7 day moving average): '
ax = plt.gca() # Create plot - get current axis
countryIndex = 0
for country in countries:
# extractedDeaths[countryIndex].plot(kind='line',y=percentageType,title=titleStr + str(lastDate) + "\nSource: European Centre for Disease Prevention and Control",label=extractedCountry[countryIndex],color=colours[countryIndex],ax=ax)
extractedDeaths[countryIndex].plot(kind='line',y=percentageType,title=titleStr + str(lastDate) + "\nSource: European Centre for Disease Prevention and Control",label=extractedCountry[countryIndex],color=colours[countryIndex],ax=ax)
countryIndex = countryIndex+1
ax.set_ylim(bottom=0.0) # Don't plot numbers < zero
if (fileSavePlotFlag == True):
titleStr = titleStr.split(':', 1)[0]
plt.savefig(titleStr.replace(" ", "")+'.png')
if noPlotFlag == False: # Plot the data
plt.show()
else:
plt.close()
else:
print("Cached spreadsheet file not found on computer")
exit()
if __name__ == '__main__':
useCachedFileFlag = False
cumulativeResultsFlag = False
noAlignFlag = False
noPlotFlag = False
fileSavePlotFlag = False
popNormalizeFlag = False
if ((len(countries) > 1) and (len(countries) != len(colours))):
print("The number of colours must equal the number of countries")
exit()
parser = argparse.ArgumentParser(description='Covid-19 Visualizer')
parser.add_argument("-c", "--cumulative", action="store_true", help="Display cumulative results")
parser.add_argument("-l", "--local", action="store_true", help="Use local cached Covid-19.csv")
parser.add_argument("-n", "--noalign", action="store_true", help="Do not align first instance dates - all graphs start 2019-12-31")
parser.add_argument("-f", "--file", action="store_true", help="Save plot to file")
parser.add_argument("-p", "--population", action="store_true", help="Use population to normalize data to cases per 1 Million")
parser.add_argument("-q", "--quiet", action="store_true", help="Quiet - Do not plot graphs")
parser.add_argument("-s", "--single", nargs='?', const=1, help="Process a single country - Specify the countriesAndTerritories string used in the spreadsheet")
parser.add_argument("-m", "--ma", action="store_true", help="Plot Moving Average (only availeble for single courntry plot)")
args = parser.parse_args()
if (args.cumulative):
cumulativeResultsFlag = True
print("Cumulative Results = True")
if (args.local):
useCachedFileFlag = True
print("Use cached file = True")
if (args.noalign):
noAlignFlag = True
print("Do not align first instance date = True")
if (args.file):
fileSavePlotFlag = True
print("Save plot graphs to file = True")
if (args.population):
popNormalizeFlag = True
print("Normalize to population = True")
if (args.quiet):
noPlotFlag = True
print("Do not plot graphs = True")
if (args.single): # Process single country - if no country specified use default country at top of file
if (args.single != 1):
country_single[0] = args.single
countries = country_single # Overwrite the countries array
noAlignFlag = True
print("Process single country: " + str(countries))
print("Do not align first instance date = True")
main(useCachedFileFlag, cumulativeResultsFlag, noAlignFlag, noPlotFlag, fileSavePlotFlag, popNormalizeFlag)
| mit |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/cluster/tests/test_mean_shift.py | 1 | 3652 | """
Testing for mean shift clustering methods
"""
import warnings
import numpy as np
from sklearn.cluster import MeanShift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.cluster import mean_shift
from sklearn.datasets.samples_generator import make_blobs
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_equal(ms1.cluster_centers_, ms2.cluster_centers_)
assert_array_equal(ms1.labels_, ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X, )
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| mit |
CVML/scikit-learn | sklearn/utils/random.py | 234 | 10510 | # Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/matplotlib/finance.py | 10 | 42914 | """
A collection of functions for collecting, analyzing and plotting
financial data.
This module is deprecated in 2.0 and has been moved to a module called
`mpl_finance`.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import contextlib
import os
import warnings
from six.moves.urllib.request import urlopen
import datetime
import numpy as np
from matplotlib import colors as mcolors, verbose, get_cachedir
from matplotlib.dates import date2num
from matplotlib.cbook import iterable, mkdirs, warn_deprecated
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.lines import Line2D, TICKLEFT, TICKRIGHT
from matplotlib.patches import Rectangle
from matplotlib.transforms import Affine2D
warn_deprecated(
since=2.0,
message=("The finance module has been deprecated in mpl 2.0 and will "
"be removed in mpl 2.2. Please use the module mpl_finance "
"instead."))
if six.PY3:
import hashlib
def md5(x):
return hashlib.md5(x.encode())
else:
from hashlib import md5
cachedir = get_cachedir()
# cachedir will be None if there is no writable directory.
if cachedir is not None:
cachedir = os.path.join(cachedir, 'finance.cache')
else:
# Should only happen in a restricted environment (such as Google App
# Engine). Deal with this gracefully by not caching finance data.
cachedir = None
stock_dt_ohlc = np.dtype([
(str('date'), object),
(str('year'), np.int16),
(str('month'), np.int8),
(str('day'), np.int8),
(str('d'), np.float), # mpl datenum
(str('open'), np.float),
(str('high'), np.float),
(str('low'), np.float),
(str('close'), np.float),
(str('volume'), np.float),
(str('aclose'), np.float)])
stock_dt_ochl = np.dtype(
[(str('date'), object),
(str('year'), np.int16),
(str('month'), np.int8),
(str('day'), np.int8),
(str('d'), np.float), # mpl datenum
(str('open'), np.float),
(str('close'), np.float),
(str('high'), np.float),
(str('low'), np.float),
(str('volume'), np.float),
(str('aclose'), np.float)])
def parse_yahoo_historical_ochl(fh, adjusted=True, asobject=False):
"""Parse the historical data in file handle fh from yahoo finance.
Parameters
----------
adjusted : bool
If True (default) replace open, close, high, low prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
asobject : bool or None
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, close, high, low, volume
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, close, high, low,
volume, adjusted_close
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
"""
return _parse_yahoo_historical(fh, adjusted=adjusted, asobject=asobject,
ochl=True)
def parse_yahoo_historical_ohlc(fh, adjusted=True, asobject=False):
"""Parse the historical data in file handle fh from yahoo finance.
Parameters
----------
adjusted : bool
If True (default) replace open, high, low, close prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
asobject : bool or None
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, high, low, close, volume
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, high, low, close,
volume, adjusted_close
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
"""
return _parse_yahoo_historical(fh, adjusted=adjusted, asobject=asobject,
ochl=False)
def _parse_yahoo_historical(fh, adjusted=True, asobject=False,
ochl=True):
"""Parse the historical data in file handle fh from yahoo finance.
Parameters
----------
adjusted : bool
If True (default) replace open, high, low, close prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
asobject : bool or None
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, high, low, close, volume
or
d, open, close, high, low, volume
depending on `ochl`
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, high, low, close,
volume, adjusted_close
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
ochl : bool
Selects between ochl and ohlc ordering.
Defaults to True to preserve original functionality.
"""
if ochl:
stock_dt = stock_dt_ochl
else:
stock_dt = stock_dt_ohlc
results = []
# datefmt = '%Y-%m-%d'
fh.readline() # discard heading
for line in fh:
vals = line.split(',')
if len(vals) != 7:
continue # add warning?
datestr = vals[0]
#dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
# Using strptime doubles the runtime. With the present
# format, we don't need it.
dt = datetime.date(*[int(val) for val in datestr.split('-')])
dnum = date2num(dt)
open, high, low, close = [float(val) for val in vals[1:5]]
volume = float(vals[5])
aclose = float(vals[6])
if ochl:
results.append((dt, dt.year, dt.month, dt.day,
dnum, open, close, high, low, volume, aclose))
else:
results.append((dt, dt.year, dt.month, dt.day,
dnum, open, high, low, close, volume, aclose))
results.reverse()
d = np.array(results, dtype=stock_dt)
if adjusted:
scale = d['aclose'] / d['close']
scale[np.isinf(scale)] = np.nan
d['open'] *= scale
d['high'] *= scale
d['low'] *= scale
d['close'] *= scale
if not asobject:
# 2-D sequence; formerly list of tuples, now ndarray
ret = np.zeros((len(d), 6), dtype=np.float)
ret[:, 0] = d['d']
if ochl:
ret[:, 1] = d['open']
ret[:, 2] = d['close']
ret[:, 3] = d['high']
ret[:, 4] = d['low']
else:
ret[:, 1] = d['open']
ret[:, 2] = d['high']
ret[:, 3] = d['low']
ret[:, 4] = d['close']
ret[:, 5] = d['volume']
if asobject is None:
return ret
return [tuple(row) for row in ret]
return d.view(np.recarray) # Close enough to former Bunch return
def fetch_historical_yahoo(ticker, date1, date2, cachename=None,
dividends=False):
"""
Fetch historical data for ticker between date1 and date2. date1 and
date2 are date or datetime instances, or (year, month, day) sequences.
Parameters
----------
ticker : str
ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
dividends : bool
set dividends=True to return dividends instead of price data. With
this option set, parse functions will not work
Returns
-------
file_handle : file handle
a file handle is returned
Examples
--------
>>> fh = fetch_historical_yahoo('^GSPC', (2000, 1, 1), (2001, 12, 31))
"""
ticker = ticker.upper()
if iterable(date1):
d1 = (date1[1] - 1, date1[2], date1[0])
else:
d1 = (date1.month - 1, date1.day, date1.year)
if iterable(date2):
d2 = (date2[1] - 1, date2[2], date2[0])
else:
d2 = (date2.month - 1, date2.day, date2.year)
if dividends:
g = 'v'
verbose.report('Retrieving dividends instead of prices')
else:
g = 'd'
urlFmt = ('http://ichart.yahoo.com/table.csv?a=%d&b=%d&' +
'c=%d&d=%d&e=%d&f=%d&s=%s&y=0&g=%s&ignore=.csv')
url = urlFmt % (d1[0], d1[1], d1[2],
d2[0], d2[1], d2[2], ticker, g)
# Cache the finance data if cachename is supplied, or there is a writable
# cache directory.
if cachename is None and cachedir is not None:
cachename = os.path.join(cachedir, md5(url).hexdigest())
if cachename is not None:
if os.path.exists(cachename):
fh = open(cachename)
verbose.report('Using cachefile %s for '
'%s' % (cachename, ticker))
else:
mkdirs(os.path.abspath(os.path.dirname(cachename)))
with contextlib.closing(urlopen(url)) as urlfh:
with open(cachename, 'wb') as fh:
fh.write(urlfh.read())
verbose.report('Saved %s data to cache file '
'%s' % (ticker, cachename))
fh = open(cachename, 'r')
return fh
else:
return urlopen(url)
def quotes_historical_yahoo_ochl(ticker, date1, date2, asobject=False,
adjusted=True, cachename=None):
""" Get historical data for ticker between date1 and date2.
See :func:`parse_yahoo_historical` for explanation of output formats
and the *asobject* and *adjusted* kwargs.
Parameters
----------
ticker : str
stock ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str or `None`
is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
Examples
--------
>>> sp = f.quotes_historical_yahoo_ochl('^GSPC', d1, d2,
asobject=True, adjusted=True)
>>> returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
>>> [n,bins,patches] = hist(returns, 100)
>>> mu = mean(returns)
>>> sigma = std(returns)
>>> x = normpdf(bins, mu, sigma)
>>> plot(bins, x, color='red', lw=2)
"""
return _quotes_historical_yahoo(ticker, date1, date2, asobject=asobject,
adjusted=adjusted, cachename=cachename,
ochl=True)
def quotes_historical_yahoo_ohlc(ticker, date1, date2, asobject=False,
adjusted=True, cachename=None):
""" Get historical data for ticker between date1 and date2.
See :func:`parse_yahoo_historical` for explanation of output formats
and the *asobject* and *adjusted* kwargs.
Parameters
----------
ticker : str
stock ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str or `None`
is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
Examples
--------
>>> sp = f.quotes_historical_yahoo_ohlc('^GSPC', d1, d2,
asobject=True, adjusted=True)
>>> returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
>>> [n,bins,patches] = hist(returns, 100)
>>> mu = mean(returns)
>>> sigma = std(returns)
>>> x = normpdf(bins, mu, sigma)
>>> plot(bins, x, color='red', lw=2)
"""
return _quotes_historical_yahoo(ticker, date1, date2, asobject=asobject,
adjusted=adjusted, cachename=cachename,
ochl=False)
def _quotes_historical_yahoo(ticker, date1, date2, asobject=False,
adjusted=True, cachename=None,
ochl=True):
""" Get historical data for ticker between date1 and date2.
See :func:`parse_yahoo_historical` for explanation of output formats
and the *asobject* and *adjusted* kwargs.
Parameters
----------
ticker : str
stock ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str or `None`
is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
ochl: bool
temporary argument to select between ochl and ohlc ordering
Examples
--------
>>> sp = f.quotes_historical_yahoo('^GSPC', d1, d2,
asobject=True, adjusted=True)
>>> returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
>>> [n,bins,patches] = hist(returns, 100)
>>> mu = mean(returns)
>>> sigma = std(returns)
>>> x = normpdf(bins, mu, sigma)
>>> plot(bins, x, color='red', lw=2)
"""
# Maybe enable a warning later as part of a slow transition
# to using None instead of False.
#if asobject is False:
# warnings.warn("Recommend changing to asobject=None")
fh = fetch_historical_yahoo(ticker, date1, date2, cachename)
try:
ret = _parse_yahoo_historical(fh, asobject=asobject,
adjusted=adjusted, ochl=ochl)
if len(ret) == 0:
return None
except IOError as exc:
warnings.warn('fh failure\n%s' % (exc.strerror[1]))
return None
return ret
def plot_day_summary_oclh(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""Plots day summary
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an `Axes` instance to plot to
quotes : sequence of (time, open, close, high, low, ...) sequences
data to plot. time must be in float date format - see date2num
ticksize : int
open/close tick marker in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
lines : list
list of tuples of the lines added (one tuple per quote)
"""
return _plot_day_summary(ax, quotes, ticksize=ticksize,
colorup=colorup, colordown=colordown,
ochl=True)
def plot_day_summary_ohlc(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""Plots day summary
Represent the time, open, high, low, close as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an `Axes` instance to plot to
quotes : sequence of (time, open, high, low, close, ...) sequences
data to plot. time must be in float date format - see date2num
ticksize : int
open/close tick marker in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
lines : list
list of tuples of the lines added (one tuple per quote)
"""
return _plot_day_summary(ax, quotes, ticksize=ticksize,
colorup=colorup, colordown=colordown,
ochl=False)
def _plot_day_summary(ax, quotes, ticksize=3,
colorup='k', colordown='r',
ochl=True
):
"""Plots day summary
Represent the time, open, high, low, close as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an `Axes` instance to plot to
quotes : sequence of quote sequences
data to plot. time must be in float date format - see date2num
(time, open, high, low, close, ...) vs
(time, open, close, high, low, ...)
set by `ochl`
ticksize : int
open/close tick marker in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
ochl: bool
argument to select between ochl and ohlc ordering of quotes
Returns
-------
lines : list
list of tuples of the lines added (one tuple per quote)
"""
# unfortunately this has a different return type than plot_day_summary2_*
lines = []
for q in quotes:
if ochl:
t, open, close, high, low = q[:5]
else:
t, open, high, low, close = q[:5]
if close >= open:
color = colorup
else:
color = colordown
vline = Line2D(xdata=(t, t), ydata=(low, high),
color=color,
antialiased=False, # no need to antialias vert lines
)
oline = Line2D(xdata=(t, t), ydata=(open, open),
color=color,
antialiased=False,
marker=TICKLEFT,
markersize=ticksize,
)
cline = Line2D(xdata=(t, t), ydata=(close, close),
color=color,
antialiased=False,
markersize=ticksize,
marker=TICKRIGHT)
lines.extend((vline, oline, cline))
ax.add_line(vline)
ax.add_line(oline)
ax.add_line(cline)
ax.autoscale_view()
return lines
def candlestick_ochl(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
Plot the time, open, close, high, low as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of (time, open, close, high, low, ...) sequences
As long as the first 5 elements are these values,
the record can be as long as you want (e.g., it may store volume).
time must be in float days format - see date2num
width : float
fraction of a day for the rectangle width
colorup : color
the color of the rectangle where close >= open
colordown : color
the color of the rectangle where close < open
alpha : float
the rectangle alpha level
Returns
-------
ret : tuple
returns (lines, patches) where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
return _candlestick(ax, quotes, width=width, colorup=colorup,
colordown=colordown,
alpha=alpha, ochl=True)
def candlestick_ohlc(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
Plot the time, open, high, low, close as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of (time, open, high, low, close, ...) sequences
As long as the first 5 elements are these values,
the record can be as long as you want (e.g., it may store volume).
time must be in float days format - see date2num
width : float
fraction of a day for the rectangle width
colorup : color
the color of the rectangle where close >= open
colordown : color
the color of the rectangle where close < open
alpha : float
the rectangle alpha level
Returns
-------
ret : tuple
returns (lines, patches) where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
return _candlestick(ax, quotes, width=width, colorup=colorup,
colordown=colordown,
alpha=alpha, ochl=False)
def _candlestick(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0, ochl=True):
"""
Plot the time, open, high, low, close as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of quote sequences
data to plot. time must be in float date format - see date2num
(time, open, high, low, close, ...) vs
(time, open, close, high, low, ...)
set by `ochl`
width : float
fraction of a day for the rectangle width
colorup : color
the color of the rectangle where close >= open
colordown : color
the color of the rectangle where close < open
alpha : float
the rectangle alpha level
ochl: bool
argument to select between ochl and ohlc ordering of quotes
Returns
-------
ret : tuple
returns (lines, patches) where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
OFFSET = width / 2.0
lines = []
patches = []
for q in quotes:
if ochl:
t, open, close, high, low = q[:5]
else:
t, open, high, low, close = q[:5]
if close >= open:
color = colorup
lower = open
height = close - open
else:
color = colordown
lower = close
height = open - close
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color=color,
linewidth=0.5,
antialiased=True,
)
rect = Rectangle(
xy=(t - OFFSET, lower),
width=width,
height=height,
facecolor=color,
edgecolor=color,
)
rect.set_alpha(alpha)
lines.append(vline)
patches.append(rect)
ax.add_line(vline)
ax.add_patch(rect)
ax.autoscale_view()
return lines, patches
def _check_input(opens, closes, highs, lows, miss=-1):
"""Checks that *opens*, *highs*, *lows* and *closes* have the same length.
NOTE: this code assumes if any value open, high, low, close is
missing (*-1*) they all are missing
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
closes : sequence
sequence of closing values
miss : int
identifier of the missing data
Raises
------
ValueError
if the input sequences don't have the same length
"""
def _missing(sequence, miss=-1):
"""Returns the index in *sequence* of the missing data, identified by
*miss*
Parameters
----------
sequence :
sequence to evaluate
miss :
identifier of the missing data
Returns
-------
where_miss: numpy.ndarray
indices of the missing data
"""
return np.where(np.array(sequence) == miss)[0]
same_length = len(opens) == len(highs) == len(lows) == len(closes)
_missopens = _missing(opens)
same_missing = ((_missopens == _missing(highs)).all() and
(_missopens == _missing(lows)).all() and
(_missopens == _missing(closes)).all())
if not (same_length and same_missing):
msg = ("*opens*, *highs*, *lows* and *closes* must have the same"
" length. NOTE: this code assumes if any value open, high,"
" low, close is missing (*-1*) they all must be missing.")
raise ValueError(msg)
def plot_day_summary2_ochl(ax, opens, closes, highs, lows, ticksize=4,
colorup='k', colordown='r',
):
"""Represent the time, open, close, high, low, as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
closes : sequence
sequence of closing values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
ret : list
a list of lines added to the axes
"""
return plot_day_summary2_ohlc(ax, opens, highs, lows, closes, ticksize,
colorup, colordown)
def plot_day_summary2_ohlc(ax, opens, highs, lows, closes, ticksize=4,
colorup='k', colordown='r',
):
"""Represent the time, open, high, low, close as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
*opens*, *highs*, *lows* and *closes* must have the same length.
NOTE: this code assumes if any value open, high, low, close is
missing (*-1*) they all are missing
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
closes : sequence
sequence of closing values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
ret : list
a list of lines added to the axes
"""
_check_input(opens, highs, lows, closes)
rangeSegments = [((i, low), (i, high)) for i, low, high in
zip(xrange(len(lows)), lows, highs) if low != -1]
# the ticks will be from ticksize to 0 in points at the origin and
# we'll translate these to the i, close location
openSegments = [((-ticksize, 0), (0, 0))]
# the ticks will be from 0 to ticksize in points at the origin and
# we'll translate these to the i, close location
closeSegments = [((0, 0), (ticksize, 0))]
offsetsOpen = [(i, open) for i, open in
zip(xrange(len(opens)), opens) if open != -1]
offsetsClose = [(i, close) for i, close in
zip(xrange(len(closes)), closes) if close != -1]
scale = ax.figure.dpi * (1.0 / 72.0)
tickTransform = Affine2D().scale(scale, 0.0)
colorup = mcolors.to_rgba(colorup)
colordown = mcolors.to_rgba(colordown)
colord = {True: colorup, False: colordown}
colors = [colord[open < close] for open, close in
zip(opens, closes) if open != -1 and close != -1]
useAA = 0, # use tuple here
lw = 1, # and here
rangeCollection = LineCollection(rangeSegments,
colors=colors,
linewidths=lw,
antialiaseds=useAA,
)
openCollection = LineCollection(openSegments,
colors=colors,
antialiaseds=useAA,
linewidths=lw,
offsets=offsetsOpen,
transOffset=ax.transData,
)
openCollection.set_transform(tickTransform)
closeCollection = LineCollection(closeSegments,
colors=colors,
antialiaseds=useAA,
linewidths=lw,
offsets=offsetsClose,
transOffset=ax.transData,
)
closeCollection.set_transform(tickTransform)
minpy, maxx = (0, len(rangeSegments))
miny = min([low for low in lows if low != -1])
maxy = max([high for high in highs if high != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(openCollection)
ax.add_collection(closeCollection)
return rangeCollection, openCollection, closeCollection
def candlestick2_ochl(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""Represent the open, close as a bar line and high low range as a
vertical line.
Preserves the original argument order.
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
closes : sequence
sequence of closing values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : tuple
(lineCollection, barCollection)
"""
candlestick2_ohlc(ax, opens, highs, lows, closes, width=width,
colorup=colorup, colordown=colordown,
alpha=alpha)
def candlestick2_ohlc(ax, opens, highs, lows, closes, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""Represent the open, close as a bar line and high low range as a
vertical line.
NOTE: this code assumes if any value open, low, high, close is
missing they all are missing
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
closes : sequence
sequence of closing values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : tuple
(lineCollection, barCollection)
"""
_check_input(opens, highs, lows, closes)
delta = width / 2.
barVerts = [((i - delta, open),
(i - delta, close),
(i + delta, close),
(i + delta, open))
for i, open, close in zip(xrange(len(opens)), opens, closes)
if open != -1 and close != -1]
rangeSegments = [((i, low), (i, high))
for i, low, high in zip(xrange(len(lows)), lows, highs)
if low != -1]
colorup = mcolors.to_rgba(colorup, alpha)
colordown = mcolors.to_rgba(colordown, alpha)
colord = {True: colorup, False: colordown}
colors = [colord[open < close]
for open, close in zip(opens, closes)
if open != -1 and close != -1]
useAA = 0, # use tuple here
lw = 0.5, # and here
rangeCollection = LineCollection(rangeSegments,
colors=((0, 0, 0, 1), ),
linewidths=lw,
antialiaseds=useAA,
)
barCollection = PolyCollection(barVerts,
facecolors=colors,
edgecolors=((0, 0, 0, 1), ),
antialiaseds=useAA,
linewidths=lw,
)
minx, maxx = 0, len(rangeSegments)
miny = min([low for low in lows if low != -1])
maxy = max([high for high in highs if high != -1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(barCollection)
return rangeCollection, barCollection
def volume_overlay(ax, opens, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""Add a volume overlay to the current axes. The opens and closes
are used to determine the color of the bar. -1 is missing. If a
value is missing on one it must be missing on all
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
a sequence of opens
closes : sequence
a sequence of closes
volumes : sequence
a sequence of volumes
width : int
the bar width in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : `barCollection`
The `barrCollection` added to the axes
"""
colorup = mcolors.to_rgba(colorup, alpha)
colordown = mcolors.to_rgba(colordown, alpha)
colord = {True: colorup, False: colordown}
colors = [colord[open < close]
for open, close in zip(opens, closes)
if open != -1 and close != -1]
delta = width / 2.
bars = [((i - delta, 0), (i - delta, v), (i + delta, v), (i + delta, 0))
for i, v in enumerate(volumes)
if v != -1]
barCollection = PolyCollection(bars,
facecolors=colors,
edgecolors=((0, 0, 0, 1), ),
antialiaseds=(0,),
linewidths=(0.5,),
)
ax.add_collection(barCollection)
corners = (0, 0), (len(bars), max(volumes))
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
return barCollection
def volume_overlay2(ax, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The closes are used to
determine the color of the bar. -1 is missing. If a value is
missing on one it must be missing on all
nb: first point is not displayed - it is used only for choosing the
right color
Parameters
----------
ax : `Axes`
an Axes instance to plot to
closes : sequence
a sequence of closes
volumes : sequence
a sequence of volumes
width : int
the bar width in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : `barCollection`
The `barrCollection` added to the axes
"""
return volume_overlay(ax, closes[:-1], closes[1:], volumes[1:],
colorup, colordown, width, alpha)
def volume_overlay3(ax, quotes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""Add a volume overlay to the current axes. quotes is a list of (d,
open, high, low, close, volume) and close-open is used to
determine the color of the bar
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of (time, open, high, low, close, ...) sequences
data to plot. time must be in float date format - see date2num
width : int
the bar width in points
colorup : color
the color of the lines where close1 >= close0
colordown : color
the color of the lines where close1 < close0
alpha : float
bar transparency
Returns
-------
ret : `barCollection`
The `barrCollection` added to the axes
"""
colorup = mcolors.to_rgba(colorup, alpha)
colordown = mcolors.to_rgba(colordown, alpha)
colord = {True: colorup, False: colordown}
dates, opens, highs, lows, closes, volumes = list(zip(*quotes))
colors = [colord[close1 >= close0]
for close0, close1 in zip(closes[:-1], closes[1:])
if close0 != -1 and close1 != -1]
colors.insert(0, colord[closes[0] >= opens[0]])
right = width / 2.0
left = -width / 2.0
bars = [((left, 0), (left, volume), (right, volume), (right, 0))
for d, open, high, low, close, volume in quotes]
sx = ax.figure.dpi * (1.0 / 72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx, sy)
dates = [d for d, open, high, low, close, volume in quotes]
offsetsBars = [(d, 0) for d in dates]
useAA = 0, # use tuple here
lw = 0.5, # and here
barCollection = PolyCollection(bars,
facecolors=colors,
edgecolors=((0, 0, 0, 1),),
antialiaseds=useAA,
linewidths=lw,
offsets=offsetsBars,
transOffset=ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (min(dates), max(dates))
miny = 0
maxy = max([volume for d, open, high, low, close, volume in quotes])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
#print 'datalim', ax.dataLim.bounds
#print 'viewlim', ax.viewLim.bounds
ax.add_collection(barCollection)
ax.autoscale_view()
return barCollection
def index_bar(ax, vals,
facecolor='b', edgecolor='l',
width=4, alpha=1.0, ):
"""Add a bar collection graph with height vals (-1 is missing).
Parameters
----------
ax : `Axes`
an Axes instance to plot to
vals : sequence
a sequence of values
facecolor : color
the color of the bar face
edgecolor : color
the color of the bar edges
width : int
the bar width in points
alpha : float
bar transparency
Returns
-------
ret : `barCollection`
The `barrCollection` added to the axes
"""
facecolors = (mcolors.to_rgba(facecolor, alpha),)
edgecolors = (mcolors.to_rgba(edgecolor, alpha),)
right = width / 2.0
left = -width / 2.0
bars = [((left, 0), (left, v), (right, v), (right, 0))
for v in vals if v != -1]
sx = ax.figure.dpi * (1.0 / 72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx, sy)
offsetsBars = [(i, 0) for i, v in enumerate(vals) if v != -1]
barCollection = PolyCollection(bars,
facecolors=facecolors,
edgecolors=edgecolors,
antialiaseds=(0,),
linewidths=(0.5,),
offsets=offsetsBars,
transOffset=ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (0, len(offsetsBars))
miny = 0
maxy = max([v for v in vals if v != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
return barCollection
| gpl-3.0 |
jmargeta/scikit-learn | benchmarks/bench_random_projections.py | 4 | 8894 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Bench results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
"('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
"('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
JohanComparat/pySU | galaxy/python/ModelSpectraStacks.py | 1 | 22503 | """
.. moduleauthor:: Johan Comparat <johan.comparat__at__gmail.com>
General purpose:
................
The class ModelSpectraStacks is dedicated to modelling and extracting information from stacks of spectra.
*Imports*::
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as p
import os
import astropy.cosmology as co
cosmo=co.FlatLambdaCDM(H0=70,Om0=0.3)
import astropy.units as u
import astropy.io.fits as fits
import numpy as n
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from scipy.stats import scoreatpercentile
import astropy.io.fits as fits
from lineListAir import *
import LineFittingLibrary as lineFit
"""
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as p
import os
from os.path import join
import astropy.cosmology as co
cosmo=co.Planck13 #co.FlatLambdaCDM(H0=70,Om0=0.3)
import astropy.units as u
import astropy.io.fits as fits
import numpy as n
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from scipy.stats import scoreatpercentile
import astropy.io.fits as fits
from lineListVac import *
allLinesList = n.array([ [Ne3,Ne3_3869,"Ne3_3869","left"], [Ne3,Ne3_3968,"Ne3_3968","left"], [O3,O3_4363,"O3_4363","right"], [O3,O3_4960,"O3_4960","left"], [O3,O3_5007,"O3_5007","right"], [H1,H1_3970,"H1_3970","right"], [H1,H1_4102,"H1_4102","right"], [H1,H1_4341,"H1_4341","right"], [H1,H1_4862,"H1_4862","left"]])
# other lines that are optional
# [N2,N2_6549,"N2_6549","left"], [N2,N2_6585,"N2_6585","right"] , [H1,H1_6564,"H1_6564","left"]
# , [S2,S2_6718,"S2_6718","left"], [S2,S2_6732,"S2_6732","right"], [Ar3,Ar3_7137,"Ar3_7137","left"], [H1,H1_1216,"H1_1216","right"]
doubletList = n.array([[O2_3727,"O2_3727",O2_3729,"O2_3729",O2_mean]])
# import the fitting routines
import LineFittingLibrary as lineFit
#O2a=3727.092
#O2b=3729.875
#O2=(O2a+O2b)/2.
#Hg=4102.892
#Hd=4341.684
#Hb=4862.683
#O3a=4960.295
#O3b=5008.240
#Ha=6564.61
fnu = lambda mAB : 10**(-(mAB+48.6)/2.5) # erg/cm2/s/Hz
flambda= lambda mAB, ll : 10**10 * c*1000 * fnu(mAB) / ll**2. # erg/cm2/s/A
kla=lambda ll :2.659 *(-2.156+1.509/ll-0.198/ll**2+0.011/ll**3 ) + 4.05
klb=lambda ll :2.659 *(-1.857+1.040/ll)+4.05
def kl(ll):
"""Calzetti extinction law"""
if ll>6300:
return klb(ll)
if ll<=6300:
return kla(ll)
class ModelSpectraStacks:
"""
This class fits the emission lines on the continuum-subtracted stack.
:param stack_file: fits file generated with a LF in a luminosity bin.
:param cosmo: cosmology class from astropy
:param firefly_min_wavelength: minimum wavelength considered by firefly (default : 1000)
:param firefly_max_wavelength: minimum wavelength considered by firefly (default : 7500)
:param dV: default value that hold the place (default : -9999.99)
:param N_spectra_limitFraction: If the stack was made with N spectra. N_spectra_limitFraction selects the points that have were computed using more thant N_spectra_limitFraction * N spectra. (default : 0.8)
"""
def __init__(self, stack_file, model_file, mode="MILES", cosmo=cosmo, firefly_min_wavelength= 1000., firefly_max_wavelength=7500., dV=-9999.99, N_spectra_limitFraction=0.8, tutorial = False, eboss_stack = False):
self.stack_file = stack_file
self.stack_file_base = os.path.basename(stack_file)[:-5]
self.lineName = self.stack_file_base[:7]
self.stack_model_file = model_file
self.mode = mode
self.tutorial = tutorial
self.eboss_stack = eboss_stack
# retrieves the firefly model for the stack: stack_model_file
"""
if self.mode=="MILES":
self.stack_model_file = join( os.environ['SPECTRASTACKS_DIR'], "fits", self.lineName, self.stack_file_base + "-SPM-MILES.fits")
if self.mode=="STELIB":
self.stack_model_file = join( os.environ['SPECTRASTACKS_DIR'], "fits", self.lineName, self.stack_file_base + "-SPM-STELIB.fits")
"""
if self.tutorial :
self.stack_model_file = join( os.environ['DATA_DIR'], "ELG-composite", self.stack_file_base + "-SPM-MILES.fits")
if self.mode=="EBOSS": #eboss_stack :
self.stack_model_file = join(os.environ['EBOSS_TARGET'],"elg", "tests", "stacks", "fits", self.stack_file_base[:-6]+ "-SPM-MILES.fits")
self.redshift = 0.85
else :
self.redshift = float(self.stack_file_base.split('-')[2].split('_')[0][1:])
self.cosmo = cosmo
self.firefly_max_wavelength = firefly_max_wavelength
self.firefly_min_wavelength = firefly_min_wavelength
self.dV = dV
self.side = ''
self.N_spectra_limitFraction = N_spectra_limitFraction
# define self.sphereCM, find redshift ...
sphere=4*n.pi*( self.cosmo.luminosity_distance(self.redshift) )**2.
self.sphereCM=sphere.to(u.cm**2)
self.hdus = fits.open(self.stack_file)
self.hdR = self.hdus[0].header
self.hdu1 = self.hdus[1] # .data
print "Loads the data."
#print self.hdu1.data.dtype
if self.tutorial :
wlA, flA, flErrA = self.hdu1.data['WAVE'][0], self.hdu1.data['FLUXMEDIAN'][0]*10**(-17), self.hdu1.data['FLUXMEDIAN_ERR'][0]*10**(-17)
self.selection = (flA>0)
self.wl,self.fl,self.flErr = wlA[self.selection], flA[self.selection], flErrA[self.selection]
self.stack=interp1d(self.wl,self.fl)
self.stackErr=interp1d(self.wl,self.flErr)
# loads model :
hdus = fits.open(self.stack_model_file)
self.hdu2 = hdus[1] # .data
self.wlModel,self.flModel = self.hdu2.data['wavelength'], self.hdu2.data['firefly_model']*10**(-17)
self.model=interp1d(n.hstack((self.wlModel,[n.max(self.wlModel)+10,11000])), n.hstack(( self.flModel, [n.median(self.flModel[:-20]),n.median(self.flModel[:-20])] )) )
# wavelength range common to the stack and the model :
self.wlLineSpectrum = n.arange(n.max([self.stack.x.min(),self.model.x.min()]), n.min([self.stack.x.max(),self.model.x.max()]), 0.5)[2:-1]
self.flLineSpectrum=n.array([self.stack(xx)-self.model(xx) for xx in self.wlLineSpectrum])
self.fl_frac_LineSpectrum=n.array([self.stack(xx)/self.model(xx) for xx in self.wlLineSpectrum])
self.flErrLineSpectrum=self.stackErr(self.wlLineSpectrum)
elif eboss_stack :
print self.hdu1.data.dtype
wlA,flA,flErrA = self.hdu1.data['wavelength'], self.hdu1.data['meanWeightedStack']*10**(-17), self.hdu1.data['jackknifStackErrors'] * 10**(-17)
self.selection = (flA>0)
self.wl,self.fl,self.flErr = wlA[self.selection], flA[self.selection], flErrA[self.selection]
self.stack=interp1d(self.wl,self.fl)
self.stackErr=interp1d(self.wl,self.flErr)
# loads model :
hdus = fits.open(self.stack_model_file)
self.hdu2 = hdus[1] # .data
self.wlModel,self.flModel = self.hdu2.data['wavelength'], self.hdu2.data['firefly_model']*10**(-17)
self.model=interp1d(n.hstack((self.wlModel,[n.max(self.wlModel)+10,11000])), n.hstack(( self.flModel, [n.median(self.flModel[:-20]),n.median(self.flModel[:-20])] )) )
# wavelength range common to the stack and the model :
self.wlLineSpectrum = n.arange(n.max([self.stack.x.min(),self.model.x.min()]), n.min([self.stack.x.max(),self.model.x.max()]), 0.5)[2:-1]
self.flLineSpectrum=n.array([self.stack(xx)-self.model(xx) for xx in self.wlLineSpectrum])
self.fl_frac_LineSpectrum=n.array([self.stack(xx)/self.model(xx) for xx in self.wlLineSpectrum])
self.flErrLineSpectrum=self.stackErr(self.wlLineSpectrum)
else:
wlA,flA,flErrA = self.hdu1.data['wavelength'], self.hdu1.data['meanWeightedStack'], self.hdu1.data['jackknifStackErrors']
self.selection = (flA>0) & (self.hdu1.data['NspectraPerPixel'] > float( self.stack_file.split('_')[-5]) * self.N_spectra_limitFraction )
self.wl,self.fl,self.flErr = wlA[self.selection], flA[self.selection], flErrA[self.selection]
self.stack=interp1d(self.wl,self.fl)
self.stackErr=interp1d(self.wl,self.flErr)
# loads model :
hdus = fits.open(self.stack_model_file)
self.hdu2 = hdus[1] # .data
self.wlModel,self.flModel = self.hdu2.data['wavelength'], self.hdu2.data['firefly_model']*10**(-17)
self.model=interp1d(n.hstack((self.wlModel,[n.max(self.wlModel)+10,11000])), n.hstack(( self.flModel, [n.median(self.flModel[:-20]),n.median(self.flModel[:-20])] )) )
# wavelength range common to the stack and the model :
self.wlLineSpectrum = n.arange(n.max([self.stack.x.min(),self.model.x.min()]), n.min([self.stack.x.max(),self.model.x.max()]), 0.5)[2:-1]
self.flLineSpectrum=n.array([self.stack(xx)-self.model(xx) for xx in self.wlLineSpectrum])
self.fl_frac_LineSpectrum=n.array([self.stack(xx)/self.model(xx) for xx in self.wlLineSpectrum])
self.flErrLineSpectrum=self.stackErr(self.wlLineSpectrum)
def interpolate_stack(self):
"""
Divides the measured stack in overlapping and non-overlapping parts with the model.
"""
self.stack=interp1d(self.wl,self.fl)
self.stackErr=interp1d(self.wl,self.flErr)
# bluer than model
self.stBlue = (self.wl<=self.firefly_min_wavelength)
# optical
self.stOpt = (self.wl<self.firefly_max_wavelength)& (self.wl> self.firefly_min_wavelength)
# redder than model
self.stRed = (self.wl>=self.firefly_max_wavelength)
if len(self.wl)<50 :
print "no data, skips spectrum"
return 0.
if len(self.wl[self.stBlue])>0:
self.contBlue=n.median(self.fl[self.stBlue])
self.side='blue'
if len(self.wl[self.stRed])>0:
self.contRed=n.median(self.fl[self.stRed])
self.side='red'
if len(self.wl[self.stRed])>0 and len(self.wl[self.stBlue])>0:
self.contRed=n.median(self.fl[self.stRed])
self.contBlue=n.median(self.fl[self.stBlue])
self.side='both'
if len(self.wl[self.stRed])==0 and len(self.wl[self.stBlue])==0:
self.side='none'
def interpolate_model(self):
"""
Interpolates the model to an array with the same coverage as the stack.
"""
# overlap region with stack
print "interpolate model"
self.mdOK =(self.wlModel>n.min(self.wl))&(self.wlModel<n.max(self.wl))
mdBlue=(self.wlModel<=n.min(self.wl)) # bluer part than data
mdRed=(self.wlModel>=n.max(self.wl)) # redder part than data
okRed=(self.wlModel>4650)&(self.wlModel<self.firefly_max_wavelength)
# Correction model => stack
CORRection=n.sum((self.wl[self.stOpt][1:]-self.wl[self.stOpt][:-1])* self.fl[self.stOpt][1:]) / n.sum((self.wlModel[ self.mdOK ][1:]-self.wlModel[ self.mdOK ][:-1])* self.flModel [ self.mdOK ][1:])
print "Correction", CORRection
if self.side=='red':
self.model=interp1d(n.hstack((self.wlModel[ self.mdOK ],n.arange(self.wlModel[ self.mdOK ].max()+0.5, stack.x.max(), 0.5))), n.hstack(( self.flModel [ self.mdOK ]*CORRection, n.ones_like(n.arange( self.wlModel[ self.mdOK ].max() + 0.5, stack.x.max(), 0.5))*contRed )) )
elif self.side=='blue':
self.model=interp1d(n.hstack((n.arange(stack.x.min(),self.wlModel[ self.mdOK ].min()-1., 0.5),self.wlModel[ self.mdOK ])),n.hstack(( n.ones_like(n.arange(stack.x.min() ,self.wlModel[ self.mdOK ].min() -1.,0.5))* contBlue, self.flModel [ self.mdOK ]*CORRection )) )
elif self.side=='both':
x1=n.hstack((n.arange(stack.x.min(),self.wlModel[ self.mdOK ].min()-1., 0.5), self.wlModel[ self.mdOK ]))
y1=n.hstack(( n.ones_like(n.arange(stack.x.min(),self.wlModel[ self.mdOK ].min()- 1.,0.5))*contBlue, self.flModel [ self.mdOK ]*CORRection ))
x2=n.hstack((x1,n.arange(self.wlModel[ self.mdOK ].max()+0.5,stack.x.max(),0.5)))
y2=n.hstack((y1,n.ones_like(n.arange(self.wlModel[ self.mdOK ].max()+0.5, stack.x.max(), 0.5))*contRed ))
self.model=interp1d(x2,y2)
elif self.side=='none':
self.model=interp1d(self.wlModel[ self.mdOK ], self.flModel [ self.mdOK ])
def subtract_continuum_model(self):
"""
Creates the continuum substracted spectrum: the 'line' spectrum.
"""
self.interpolate_stack()
self.interpolate_model()
# wavelength range common to the stack and the model :
self.wlLineSpectrum = n.arange(n.max([self.stack.x.min(),self.model.x.min()]), n.min([self.stack.x.max(),self.model.x.max()]), 0.5)[2:-1]
print "range probed", self.wlLineSpectrum[0], self.wlLineSpectrum[-1], len( self.wlLineSpectrum)
self.flLineSpectrum=n.array([self.stack(xx)-self.model(xx) for xx in self.wlLineSpectrum])
self.flErrLineSpectrum=self.stackErr(self.wlLineSpectrum)
def fit_lines_to_lineSpectrum(self):
"""
Fits the emission lines on the line spectrum.
"""
# interpolates the mean spectra.
print "fits to the line spectrum"
lfit = lineFit.LineFittingLibrary()
#self.subtract_continuum_model()
data,h=[],[]
print O2_3727
dat_mean,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wlLineSpectrum, self.flLineSpectrum, self.flErrLineSpectrum, a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
print hI, dat_mean
d_out=[]
for kk in range(10):
fluxRR = interp1d(self.wl, self.hdu1.data['jackknifeSpectra'].T[kk][self.selection])
flLineSpectrumRR=n.array([fluxRR(xx)-self.model(xx) for xx in self.wlLineSpectrum])
d1,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wlLineSpectrum, flLineSpectrumRR, self.flErrLineSpectrum, a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
d_out.append(d1)
d_out = n.array(d_out)
#print "jk out", d_out
err_out = n.std(d_out,axis=0)
#print "before", err_out, dat_mean
# assign error values :
dat_mean[3] = err_out[3-1]
dat_mean[5] = err_out[5-1]
dat_mean[7] = err_out[7-1]
#print "after", dat_mean
data.append(dat_mean)
h.append(hI)
for li in allLinesList :
# measure line properties from the mean weighted stack
print li[2]
dat_mean,mI,hI=lfit.fit_Line_position_C0noise(self.wlLineSpectrum, self.flLineSpectrum, self.flErrLineSpectrum, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
print hI, dat_mean
# measure its dispersion using the stacks
d_out=[]
for kk in range(len(self.hdu1.data['jackknifeSpectra'].T)):
fluxRR = interp1d(self.wl, self.hdu1.data['jackknifeSpectra'].T[kk][self.selection])
flLineSpectrumRR=n.array([fluxRR(xx)-self.model(xx) for xx in self.wlLineSpectrum])
d1,mI,hI=lfit.fit_Line_position_C0noise(self.wlLineSpectrum, flLineSpectrumRR, self.flErrLineSpectrum, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
d_out.append(d1)
d_out = n.array(d_out)
err_out = n.std(d_out,axis=0)
# assign error values :
dat_mean[2] = err_out[2-1]
dat_mean[4] = err_out[4-1]
dat_mean[6] = err_out[6-1]
data.append(dat_mean)
h.append(hI)
heading="".join(h)
out=n.hstack((data))
#print "out", out
out[n.isnan(out)]=n.ones_like(out[n.isnan(out)])*self.dV
#output = n.array([ out ])
#print "----------------", output.T[0], output.T[1], output
colNames = heading.split()
#print colNames
col0 = fits.Column(name=colNames[0],format='D', array= n.array([out.T[0]]))
col1 = fits.Column(name=colNames[1],format='D', array= n.array([out.T[1]]))
self.lineSpec_cols = fits.ColDefs([col0, col1])
#print self.lineSpec_cols
#print colNames
for ll in range(2,len(colNames),1):
#self.hdR["HIERARCH "+colNames[ll]+"_nc"] = out.T[ll]
self.lineSpec_cols += fits.Column(name=colNames[ll], format='D', array= n.array([out.T[ll]]) )
#print self.lineSpec_cols
self.lineSpec_tb_hdu = fits.BinTableHDU.from_columns(self.lineSpec_cols)
def fit_lines_to_fullSpectrum(self):
"""
Fits the emission lines on the line spectrum.
"""
# interpolates the mean spectra.
print "fits to full spectrum"
lfit = lineFit.LineFittingLibrary()
data,h=[],[]
print O2_3727
dat_mean,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wl, self.fl, self.flErr, a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
print hI, dat_mean
d_out=[]
for kk in range(10):
d1,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wl, self.hdu1.data['jackknifeSpectra'].T[kk][self.selection], self.flErr , a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
d_out.append(d1)
d_out = n.array(d_out)
#print "jk out", d_out
err_out = n.std(d_out,axis=0)
#print "before", err_out, dat_mean
# assign error values :
dat_mean[3] = err_out[3-1]
dat_mean[5] = err_out[5-1]
dat_mean[7] = err_out[7-1]
#print "after", dat_mean
data.append(dat_mean)
h.append(hI)
for li in allLinesList :
print li[2]
# measure line properties from the mean weighted stack
dat_mean,mI,hI=lfit.fit_Line_position_C0noise(self.wl, self.fl, self.flErr, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
print hI, dat_mean
# measure its dispersion using the stacks
d_out=[]
for kk in range(len(self.hdu1.data['jackknifeSpectra'].T)):
d1,mI,hI=lfit.fit_Line_position_C0noise(self.wl, self.hdu1.data['jackknifeSpectra'].T[kk][self.selection], self.flErr, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
d_out.append(d1)
d_out = n.array(d_out)
err_out = n.std(d_out,axis=0)
# assign error values :
dat_mean[2] = err_out[2-1]
dat_mean[4] = err_out[4-1]
dat_mean[6] = err_out[6-1]
data.append(dat_mean)
#print li[2], dat_mean
h.append(hI)
heading="".join(h)
out=n.hstack((data))
out[n.isnan(out)]=n.ones_like(out[n.isnan(out)])*self.dV
#output = n.array([ out ])
#print "----------------", output.T[0], output.T[1], output
colNames = heading.split()
#print colNames
col0 = fits.Column(name=colNames[0],format='D', array= n.array([out.T[0]]))
col1 = fits.Column(name=colNames[1],format='D', array= n.array([out.T[1]]))
self.fullSpec_cols = fits.ColDefs([col0, col1])
#print colNames
for ll in range(2,len(colNames),1):
#self.hdR["HIERARCH "+colNames[ll]+"_nc"] = out.T[ll]
self.fullSpec_cols += fits.Column(name=colNames[ll], format='D', array= n.array([out.T[ll]]) )
self.fullSpec_tb_hdu = fits.BinTableHDU.from_columns(self.fullSpec_cols)
def fit_lines_to_lineSpectrum_tutorial(self):
"""
Fits the emission lines on the line spectrum.
"""
# interpolates the mean spectra.
print "fits to the line spectrum"
lfit = lineFit.LineFittingLibrary()
#self.subtract_continuum_model()
data,h=[],[]
print O2_3727
dat_mean,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wlLineSpectrum, self.flLineSpectrum, self.flErrLineSpectrum, a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
data.append(dat_mean)
h.append(hI)
for li in allLinesList :
# measure line properties from the mean weighted stack
print li[2]
dat_mean,mI,hI=lfit.fit_Line_position_C0noise(self.wlLineSpectrum, self.flLineSpectrum, self.flErrLineSpectrum, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
data.append(dat_mean)
h.append(hI)
heading="".join(h)
out=n.hstack((data))
#print "out", out
out[n.isnan(out)]=n.ones_like(out[n.isnan(out)])*self.dV
#output = n.array([ out ])
#print "----------------", output.T[0], output.T[1], output
colNames = heading.split()
#print colNames
col0 = fits.Column(name=colNames[0],format='D', array= n.array([out.T[0]]))
col1 = fits.Column(name=colNames[1],format='D', array= n.array([out.T[1]]))
self.lineSpec_cols = fits.ColDefs([col0, col1])
#print self.lineSpec_cols
#print colNames
for ll in range(2,len(colNames),1):
#self.hdR["HIERARCH "+colNames[ll]+"_nc"] = out.T[ll]
self.lineSpec_cols += fits.Column(name=colNames[ll], format='D', array= n.array([out.T[ll]]) )
#print self.lineSpec_cols
self.lineSpec_tb_hdu = fits.BinTableHDU.from_columns(self.lineSpec_cols)
def fit_lines_to_fullSpectrum_tutorial(self):
"""
Fits the emission lines on the line spectrum.
"""
# interpolates the mean spectra.
print "fits to full spectrum"
lfit = lineFit.LineFittingLibrary()
data,h=[],[]
print O2_3727
dat_mean,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wl, self.fl, self.flErr, a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
print hI, dat_mean
data.append(dat_mean)
h.append(hI)
for li in allLinesList :
print li[2]
# measure line properties from the mean weighted stack
dat_mean,mI,hI=lfit.fit_Line_position_C0noise(self.wl, self.fl, self.flErr, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
print hI, dat_mean
# measure its dispersion using the stacks
data.append(dat_mean)
#print li[2], dat_mean
h.append(hI)
heading="".join(h)
out=n.hstack((data))
out[n.isnan(out)]=n.ones_like(out[n.isnan(out)])*self.dV
#output = n.array([ out ])
#print "----------------", output.T[0], output.T[1], output
colNames = heading.split()
#print colNames
col0 = fits.Column(name=colNames[0],format='D', array= n.array([out.T[0]]))
col1 = fits.Column(name=colNames[1],format='D', array= n.array([out.T[1]]))
self.fullSpec_cols = fits.ColDefs([col0, col1])
#print colNames
for ll in range(2,len(colNames),1):
#self.hdR["HIERARCH "+colNames[ll]+"_nc"] = out.T[ll]
self.fullSpec_cols += fits.Column(name=colNames[ll], format='D', array= n.array([out.T[ll]]) )
self.fullSpec_tb_hdu = fits.BinTableHDU.from_columns(self.fullSpec_cols)
def save_spectrum(self):
"""
Saves the stack spectrum, the model and derived quantities in a single fits file with different hdus.
"""
wavelength = fits.Column(name="wavelength",format="D", unit="Angstrom", array= self.wlLineSpectrum)
flux = fits.Column(name="flux",format="D", unit="Angstrom", array= self.flLineSpectrum)
fluxErr = fits.Column(name="fluxErr",format="D", unit="Angstrom", array= self.flErrLineSpectrum)
# new columns
cols = fits.ColDefs([wavelength, flux, fluxErr])
lineSptbhdu = fits.BinTableHDU.from_columns(cols)
# previous file
prihdu = fits.PrimaryHDU(header=self.hdR)
thdulist = fits.HDUList([prihdu, self.hdu1, self.hdu2, lineSptbhdu, self.lineSpec_tb_hdu, self.fullSpec_tb_hdu])
outPutFileName = self.stack_model_file
outFile = n.core.defchararray.replace(outPutFileName, "fits", "model").item()
if self.tutorial:
outFile = join( os.environ['DATA_DIR'], "ELG-composite", self.stack_file_base[:-5]+".model" )
if self.eboss_stack:
#outFile = join(os.environ['DATA_DIR'],"ELG-composite", "stacks", "model", self.stack_file_base[:-6] + ".model.fits")
outFile = join(os.environ['EBOSS_TARGET'],"elg", "tests", "stacks", "model", self.stack_file_base[:-6] + ".model")
if os.path.isfile(outFile):
os.remove(outFile)
thdulist.writeto(outFile)
| cc0-1.0 |
wasade/qiime | tests/test_stats.py | 1 | 101464 | #!/usr/bin/env python
from __future__ import division
__author__ = "Michael Dwan"
__copyright__ = "Copyright 2012, The QIIME project"
__credits__ = ["Jai Ram Rideout", "Michael Dwan", "Logan Knecht",
"Damien Coy", "Levi McCracken", "Andrew Cochran",
"Will Van Treuren"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "[email protected]"
"""Test suite for classes, methods and functions of the stats module."""
from shutil import rmtree
from os.path import exists, join
from string import digits
from tempfile import mkdtemp
from StringIO import StringIO
from unittest import TestCase, main
from warnings import filterwarnings
from itertools import izip
from types import StringType, ListType, FloatType, TupleType
from skbio.util import remove_files
from numpy.testing import assert_almost_equal, assert_allclose
from numpy import (array, asarray, roll, median, nan, arange, matrix,
concatenate, nan, ndarray, number, ones,
reshape, testing, tril, var, log, fill_diagonal)
from numpy.random import permutation, shuffle, seed
from biom import Table, load_table
from qiime.stats import (all_pairs_t_test, _perform_pairwise_tests,
CorrelationStats,
DistanceMatrixStats, MantelCorrelogram,
PartialMantel, quantile, _quantile,
paired_difference_analyses,
G_2_by_2, g_fit, t_paired, t_one_sample,
t_two_sample, mc_t_two_sample,
_permute_observations,
correlation_t, ZeroExpectedError, fisher,
safe_sum_p_log_p, permute_2d,
pearson, spearman, ANOVA_one_way, mw_t,
mw_boot, is_symmetric_and_hollow,
tail, fdr_correction,
benjamini_hochberg_step_down,
bonferroni_correction, fisher_z_transform,
fisher_population_correlation,
inverse_fisher_z_transform,
z_transform_pval, kruskal_wallis, kendall,
kendall_pval, assign_correlation_pval,
cscore, williams_correction, t_one_observation,
normprob, tprob, fprob, chi2prob)
from qiime.parse import parse_mapping_file_to_dict
from skbio.stats.distance import (DissimilarityMatrix, DistanceMatrix)
from qiime.util import MetadataMap, get_qiime_temp_dir
class TestHelper(TestCase):
"""Helper class that instantiates some commonly-used objects.
This class should be subclassed by any test classes that want to use its
members.
"""
def compare_multiple_level_array(self, observed, expected):
""" Compare multiple level arrays.
It expecte observed and expected arrays, where each element is an
array of elements.
"""
if isinstance(observed, (TupleType, ListType)):
for obs, exp in izip(observed, expected):
self.compare_multiple_level_array(obs, exp)
elif observed is not None and isinstance(observed, (number, ndarray, FloatType)):
assert_almost_equal(observed, expected, decimal=5)
else:
self.assertEqual(observed, expected)
def setUp(self):
"""Define some useful test objects."""
# The unweighted unifrac distance matrix from the overview tutorial.
self.overview_dm_str = ["\tPC.354\tPC.355\tPC.356\tPC.481\tPC.593\
\tPC.607\tPC.634\tPC.635\tPC.636",
"PC.354\t0.0\t0.595483768391\t0.618074717633\
\t0.582763100909\t0.566949022108\
\t0.714717232268\t0.772001731764\
\t0.690237118413\t0.740681707488",
"PC.355\t0.595483768391\t0.0\t0.581427669668\
\t0.613726772383\t0.65945132763\
\t0.745176523638\t0.733836123821\
\t0.720305073505\t0.680785600439",
"PC.356\t0.618074717633\t0.581427669668\t0.0\
\t0.672149021573\t0.699416863323\
\t0.71405573754\t0.759178215168\
\t0.689701276341\t0.725100672826",
"PC.481\t0.582763100909\t0.613726772383\
\t0.672149021573\t0.0\t0.64756120797\
\t0.666018240373\t0.66532968784\
\t0.650464714994\t0.632524644216",
"PC.593\t0.566949022108\t0.65945132763\
\t0.699416863323\t0.64756120797\t0.0\
\t0.703720200713\t0.748240937349\
\t0.73416971958\t0.727154987937",
"PC.607\t0.714717232268\t0.745176523638\
\t0.71405573754\t0.666018240373\
\t0.703720200713\t0.0\t0.707316869557\
\t0.636288883818\t0.699880573956",
"PC.634\t0.772001731764\t0.733836123821\
\t0.759178215168\t0.66532968784\
\t0.748240937349\t0.707316869557\t0.0\
\t0.565875193399\t0.560605525642",
"PC.635\t0.690237118413\t0.720305073505\
\t0.689701276341\t0.650464714994\
\t0.73416971958\t0.636288883818\
\t0.565875193399\t0.0\t0.575788039321",
"PC.636\t0.740681707488\t0.680785600439\
\t0.725100672826\t0.632524644216\
\t0.727154987937\t0.699880573956\
\t0.560605525642\t0.575788039321\t0.0"]
self.overview_dm = DistanceMatrix.read(\
StringIO('\n'.join(self.overview_dm_str)))
# The overview tutorial's metadata mapping file.
self.overview_map_str = ["#SampleID\tBarcodeSequence\tTreatment\tDOB",
"PC.354\tAGCACGAGCCTA\tControl\t20061218",
"PC.355\tAACTCGTCGATG\tControl\t20061218",
"PC.356\tACAGACCACTCA\tControl\t20061126",
"PC.481\tACCAGCGACTAG\tControl\t20070314",
"PC.593\tAGCAGCACTTGT\tControl\t20071210",
"PC.607\tAACTGTGCGTAC\tFast\t20071112",
"PC.634\tACAGAGTCGGCT\tFast\t20080116",
"PC.635\tACCGCAGAGTCA\tFast\t20080116",
"PC.636\tACGGTGAGTGTC\tFast\t20080116"]
self.overview_map = MetadataMap.parseMetadataMap(self.overview_map_str)
self.test_map_str = [
"#SampleID\tBarcodeSequence\tFoo\tBar\tDescription",
"PC.354\tAGCACGAGCCTA\tfoo\ta\t354",
"PC.355\tAACTCGTCGATG\tfoo\ta\t355",
"PC.356\tACAGACCACTCA\tbar\ta\t356",
"PC.481\tACCAGCGACTAG\tfoo\ta\t481",
"PC.593\tAGCAGCACTTGT\tbar\ta\t593",
"PC.607\tAACTGTGCGTAC\tbar\ta\t607",
"PC.634\tACAGAGTCGGCT\tbar\ta\t634",
"PC.635\tACCGCAGAGTCA\tfoo\ta\t635",
"PC.636\tACGGTGAGTGTC\tbar\ta\t636"]
self.test_map = MetadataMap.parseMetadataMap(self.test_map_str)
# A 1x1 dm.
self.single_ele_dm = DistanceMatrix([[0]], ['s1'])
# How many times to test a p-value.
self.p_val_tests = 10
def assertCorrectPValue(self, exp_min, exp_max, fn, num_perms=None,
p_val_key='p_value'):
"""Tests that the stochastic p-value falls in the specified range.
Performs the test self.p_val_tests times and fails if the observed
p-value does not fall into the specified range at least once. Each
p-value is also tested that it falls in the range 0.0 to 1.0.
This method assumes that fn is callable, and will pass num_perms to fn
if num_perms is provided. p_val_key specifies the key that will be used
to retrieve the p-value from the results dict that is returned by fn.
"""
found_match = False
for i in range(self.p_val_tests):
if num_perms is not None:
obs = fn(num_perms)
else:
obs = fn()
p_val = obs[p_val_key]
self.assertTrue(0.0 <= p_val < 1.0)
if p_val >= exp_min and p_val <= exp_max:
found_match = True
break
self.assertTrue(found_match)
class NonRandomShuffler(object):
"""Helper class for testing p-values that are calculated by permutations.
Since p-values rely on randomness, it may be useful to use a non-random
function (such as that provided by this class) to generate permutations
so that p-values can be accurately tested.
This code is heavily based on Andrew Cochran's original version.
"""
def __init__(self):
"""Default constructor initializes the number of calls to zero."""
self.num_calls = 0
def permutation(self, x):
"""Non-random permutation function to test p-test code.
Returns the 'permuted' version of x.
Arguments:
x - the array to be 'permuted'
"""
x = array(x)
x = roll(x, self.num_calls)
self.num_calls += 1
return x
class StatsTests(TestHelper):
"""Tests for top-level functions in the stats module."""
def setUp(self):
"""Set up data that will be used by the tests."""
self.value_for_seed = 20
# Single comp.
self.labels1 = ['foo', 'bar']
self.dists1 = [[1, 2, 3], [7, 8]]
# Multiple comps.
self.labels2 = ['foo', 'bar', 'baz']
self.dists2 = [[1, 2, 3], [7, 8], [9, 10, 11]]
# Too few obs.
self.labels3 = ['foo', 'bar', 'baz']
self.dists3 = [[1], [7], [9, 10, 11]]
def remove_nums(self, text):
"""Removes all digits from the given string.
Returns the string will all digits removed. Useful for testing strings
for equality in unit tests where you don't care about numeric values,
or if some values are random.
This code was taken from http://bytes.com/topic/python/answers/
850562-finding-all-numbers-string-replacing
Arguments:
text - the string to remove digits from
"""
return text.translate(None, digits)
def test_all_pairs_t_test(self):
"""Test performing Monte Carlo tests on valid dataset."""
# We aren't testing the numeric values here, as they've already been
# tested in the functions that compute them. We are interested in the
# format of the returned string.
exp = """# The tests of significance were performed using a two-sided Student's two-sample t-test.
# Alternative hypothesis: Group 1 mean != Group 2 mean
# The nonparametric p-values were calculated using 999 Monte Carlo permutations.
# The nonparametric p-values contain the correct number of significant digits.
# Entries marked with "N/A" could not be calculated because at least one of the groups
# of distances was empty, both groups each contained only a single distance, or
# the test could not be performed (e.g. no variance in groups with the same mean).
Group 1 Group 2 t statistic Parametric p-value Parametric p-value (Bonferroni-corrected) Nonparametric p-value Nonparametric p-value (Bonferroni-corrected)
foo bar -6.6 0.00708047956412 0.0212414386924 0.095 0.285
foo baz -9.79795897113 0.000608184944463 0.00182455483339 0.101 0.303
bar baz -3.0 0.0576688856224 0.173006656867 0.217 0.651
"""
obs = all_pairs_t_test(self.labels2, self.dists2)
self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))
def test_all_pairs_t_test_no_perms(self):
"""Test performing Monte Carlo tests on valid dataset with no perms."""
exp = """# The tests of significance were performed using a two-sided Student's two-sample t-test.
# Alternative hypothesis: Group 1 mean != Group 2 mean
# Entries marked with "N/A" could not be calculated because at least one of the groups
# of distances was empty, both groups each contained only a single distance, or
# the test could not be performed (e.g. no variance in groups with the same mean).
Group 1 Group 2 t statistic Parametric p-value Parametric p-value (Bonferroni-corrected) Nonparametric p-value Nonparametric p-value (Bonferroni-corrected)
foo bar -6.6 0.00708047956412 0.0212414386924 N/A N/A
foo baz -9.79795897113 0.000608184944463 0.00182455483339 N/A N/A
bar baz -3.0 0.0576688856224 0.173006656867 N/A N/A
"""
obs = all_pairs_t_test(self.labels2, self.dists2,
num_permutations=0)
self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))
def test_all_pairs_t_test_few_perms(self):
"""Test performing Monte Carlo tests on dataset with a few perms."""
exp = """# The tests of significance were performed using a one-sided (low) Student's two-sample t-test.
# Alternative hypothesis: Group 1 mean < Group 2 mean
# The nonparametric p-values were calculated using 5 Monte Carlo permutations.
# The nonparametric p-values contain the correct number of significant digits.
# Entries marked with "N/A" could not be calculated because at least one of the groups
# of distances was empty, both groups each contained only a single distance, or
# the test could not be performed (e.g. no variance in groups with the same mean).
Group 1 Group 2 t statistic Parametric p-value Parametric p-value (Bonferroni-corrected) Nonparametric p-value Nonparametric p-value (Bonferroni-corrected)
foo bar -6.6 0.00354023978206 0.0106207193462 Too few iters to compute p-value (num_iters=5) Too few iters to compute p-value (num_iters=5)
foo baz -9.79795897113 0.000304092472232 0.000912277416695 Too few iters to compute p-value (num_iters=5) Too few iters to compute p-value (num_iters=5)
bar baz -3.0 0.0288344428112 0.0865033284337 Too few iters to compute p-value (num_iters=5) Too few iters to compute p-value (num_iters=5)
"""
obs = all_pairs_t_test(self.labels2, self.dists2,
num_permutations=5, tail_type='low')
self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))
def test_all_pairs_t_test_invalid_tests(self):
"""Test performing Monte Carlo tests with some invalid tests."""
exp = """# The tests of significance were performed using a one-sided (high) Student's two-sample t-test.
# Alternative hypothesis: Group 1 mean > Group 2 mean
# The nonparametric p-values were calculated using 20 Monte Carlo permutations.
# The nonparametric p-values contain the correct number of significant digits.
# Entries marked with "N/A" could not be calculated because at least one of the groups
# of distances was empty, both groups each contained only a single distance, or
# the test could not be performed (e.g. no variance in groups with the same mean).
Group 1 Group 2 t statistic Parametric p-value Parametric p-value (Bonferroni-corrected) Nonparametric p-value Nonparametric p-value (Bonferroni-corrected)
foo bar N/A N/A N/A N/A N/A
"""
obs = all_pairs_t_test(['foo', 'bar'], [[], [1, 2, 4]],
'high', 20)
self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))
def test_all_pairs_t_test_invalid_input(self):
"""Test performing Monte Carlo tests on invalid input."""
# Number of labels and distance groups do not match.
self.assertRaises(ValueError, all_pairs_t_test,
['foo', 'bar'], [[1, 2, 3], [4, 5, 6], [7, 8]])
# Invalid tail type.
self.assertRaises(ValueError, all_pairs_t_test,
['foo', 'bar'], [[1, 2, 3], [4, 5, 6]], 'foo')
# Invalid number of permutations.
self.assertRaises(ValueError, all_pairs_t_test,
['foo', 'bar'], [[1, 2, 3], [4, 5, 6]], num_permutations=-1)
def test_perform_pairwise_tests_single_comp(self):
"""Test on valid dataset w/ 1 comp."""
# Verified with R's t.test function.
exp = [['foo', 'bar', -6.5999999999999996, 0.0070804795641244006,
0.0070804795641244006, 0.100000000001, 0.10000000000001]]
seed(self.value_for_seed)
obs = _perform_pairwise_tests(self.labels1, self.dists1, 'two-sided',
999)
self.compare_multiple_level_array(obs, exp)
def test_perform_pairwise_tests_multi_comp(self):
"""Test on valid dataset w/ multiple comps."""
# Verified with R's t.test function.
exp = [['foo', 'bar', -6.5999999999999996, 0.0070804795641244006,
0.021241438692373202, nan, nan], ['foo', 'baz',
-
9.7979589711327115, 0.00060818494446333643, 0.0018245548333900093,
nan, nan], ['bar', 'baz', -3.0, 0.05766888562243732,
0.17300665686731195, nan, nan]]
obs = _perform_pairwise_tests(self.labels2, self.dists2, 'two-sided',
0)
self.compare_multiple_level_array(obs, exp)
def test_perform_pairwise_tests_too_few_obs(self):
"""Test on dataset w/ too few observations."""
exp = [['foo', 'bar', nan, nan, nan, nan, nan],
['foo', 'baz', -7.794228634059948, 0.008032650971672552,
0.016065301943345104, nan, nan],
['bar', 'baz', -2.598076211353316, 0.060844967173160069,
0.12168993434632014, nan, nan]]
obs = _perform_pairwise_tests(self.labels3, self.dists3, 'low', 0)
self.compare_multiple_level_array(obs, exp)
exp = [['foo', 'bar', nan, nan, nan, nan, nan]]
obs = _perform_pairwise_tests(['foo', 'bar'], [[], [1, 2, 4]], 'high',
20)
self.compare_multiple_level_array(obs, exp)
class DistanceMatrixStatsTests(TestHelper):
"""Tests for the DistanceMatrixStats class."""
def setUp(self):
"""Define some dm stats instances that will be used by the tests."""
super(DistanceMatrixStatsTests, self).setUp()
self.empty_dms = DistanceMatrixStats([])
self.single_dms = DistanceMatrixStats([self.overview_dm])
self.double_dms = DistanceMatrixStats(
[self.overview_dm, self.single_ele_dm])
# For testing the requirement that two distance matrices are set.
self.two_dms = DistanceMatrixStats(
[self.overview_dm, self.single_ele_dm], 2)
# For testing the requirement that the distance matrices meet the
# minimum size requirements.
self.size_dms = DistanceMatrixStats(
[self.overview_dm, self.overview_dm], 2, 4)
def test_DistanceMatrices_getter(self):
"""Test getter for distmats."""
self.assertEqual(self.empty_dms.DistanceMatrices, [])
self.assertEqual(self.single_dms.DistanceMatrices, [self.overview_dm])
self.assertEqual(self.double_dms.DistanceMatrices,
[self.overview_dm, self.single_ele_dm])
def test_DistanceMatrices_setter(self):
"""Test setter for dms on valid input data."""
self.empty_dms.DistanceMatrices = []
self.assertEqual(self.empty_dms.DistanceMatrices, [])
self.empty_dms.DistanceMatrices = [self.overview_dm]
self.assertEqual(self.empty_dms.DistanceMatrices, [self.overview_dm])
self.empty_dms.DistanceMatrices = [self.overview_dm, self.overview_dm]
self.assertEqual(self.empty_dms.DistanceMatrices,
[self.overview_dm, self.overview_dm])
def test_DistanceMatrices_setter_invalid(self):
"""Test setter for dms on invalid input data."""
# Allows testing of non-callable property setter that raises errors.
# Idea was obtained from http://stackoverflow.com/a/3073049
self.assertRaises(TypeError, setattr, self.empty_dms,
'DistanceMatrices', None)
self.assertRaises(TypeError, setattr, self.empty_dms,
'DistanceMatrices', 10)
self.assertRaises(TypeError, setattr, self.empty_dms,
'DistanceMatrices', 20.0)
self.assertRaises(TypeError, setattr, self.empty_dms,
'DistanceMatrices', "foo")
self.assertRaises(TypeError, setattr, self.empty_dms,
'DistanceMatrices', {})
self.assertRaises(TypeError, setattr, self.empty_dms,
'DistanceMatrices', self.overview_dm)
self.assertRaises(TypeError, setattr, self.empty_dms,
'DistanceMatrices', [1])
self.assertRaises(TypeError, setattr, self.empty_dms,
'DistanceMatrices',
[DissimilarityMatrix(
array([[0, 2], [3, 0]]), ['foo', 'bar']),
DissimilarityMatrix(
array([[0, 2], [3.5, 0]]), ['foo', 'bar'])])
# Test constructor as well.
self.assertRaises(TypeError, DistanceMatrixStats, None)
self.assertRaises(TypeError, DistanceMatrixStats, 10)
self.assertRaises(TypeError, DistanceMatrixStats, 20.0)
self.assertRaises(TypeError, DistanceMatrixStats, "foo")
self.assertRaises(TypeError, DistanceMatrixStats, {})
self.assertRaises(TypeError, DistanceMatrixStats, self.overview_dm)
self.assertRaises(TypeError, DistanceMatrixStats, [1])
self.assertRaises(TypeError, DistanceMatrixStats,
[DissimilarityMatrix(
array([[0, 2], [3, 0]]), ['foo', 'bar']),
DissimilarityMatrix(
array([[0, 2], [3.5, 0]]), ['foo', 'bar'])])
def test_DistanceMatrices_setter_wrong_number(self):
"""Test setting an invalid number of distance matrices."""
self.assertRaises(ValueError, setattr, self.two_dms,
'DistanceMatrices', [self.overview_dm])
self.assertRaises(ValueError, setattr, self.two_dms,
'DistanceMatrices', [self.overview_dm, self.overview_dm,
self.overview_dm])
def test_DistanceMatrices_setter_too_small(self):
"""Test setting distance matrices that are too small."""
self.assertRaises(ValueError, setattr, self.size_dms,
'DistanceMatrices', [self.single_ele_dm, self.single_ele_dm])
def test_call(self):
"""Test __call__() returns an empty result set."""
self.assertEqual(self.single_dms(), {})
self.assertEqual(self.single_dms(10), {})
self.assertEqual(self.single_dms(0), {})
def test_call_bad_perms(self):
"""Test __call__() fails upon receiving invalid number of perms."""
self.assertRaises(ValueError, self.single_dms, -1)
class CorrelationStatsTests(TestHelper):
"""Tests for the CorrelationStats class."""
def setUp(self):
"""Set up correlation stats instances for use in tests."""
super(CorrelationStatsTests, self).setUp()
self.cs = CorrelationStats([self.overview_dm, self.overview_dm])
def test_DistanceMatrices_setter(self):
"""Test setting valid distance matrices."""
dms = [self.overview_dm, self.overview_dm]
self.cs.DistanceMatrices = dms
self.assertEqual(self.cs.DistanceMatrices, dms)
dms = [self.overview_dm, self.overview_dm, self.overview_dm]
self.cs.DistanceMatrices = dms
self.assertEqual(self.cs.DistanceMatrices, dms)
def test_DistanceMatrices_setter_mismatched_labels(self):
"""Test setting dms with mismatching sample ID labels."""
mismatch = DistanceMatrix(array([[0]]), ['s2'])
self.assertRaises(ValueError, setattr, self.cs, 'DistanceMatrices',
[self.single_ele_dm, mismatch])
# Also test that constructor raises this error.
self.assertRaises(ValueError, CorrelationStats, [self.single_ele_dm,
mismatch])
def test_DistanceMatrices_setter_wrong_dims(self):
"""Test setting dms with mismatching dimensions."""
self.assertRaises(ValueError, setattr, self.cs, 'DistanceMatrices',
[self.overview_dm, self.single_ele_dm])
# Also test that constructor raises this error.
self.assertRaises(ValueError, CorrelationStats, [self.overview_dm,
self.single_ele_dm])
def test_DistanceMatrices_setter_too_few(self):
"""Test setting dms with not enough of them."""
self.assertRaises(ValueError, setattr, self.cs, 'DistanceMatrices', [])
# Also test that constructor raises this error.
self.assertRaises(ValueError, CorrelationStats, [])
def test_call(self):
"""Test __call__() returns an empty result set."""
self.assertEqual(self.cs(), {})
class MantelCorrelogramTests(TestHelper):
"""Tests for the MantelCorrelogram class."""
def setUp(self):
"""Set up mantel correlogram instances for use in tests."""
super(MantelCorrelogramTests, self).setUp()
# Mantel correlogram test using the overview tutorial's unifrac dm as
# both inputs.
self.mc = MantelCorrelogram(self.overview_dm, self.overview_dm)
# Smallest test case: 3x3 matrices.
ids = ['s1', 's2', 's3']
dm1 = DistanceMatrix(array([[0, 1, 2], [1, 0, 3], [2, 3, 0]]), ids)
dm2 = DistanceMatrix(array([[0, 2, 5], [2, 0, 8], [5, 8, 0]]), ids)
self.small_mc = MantelCorrelogram(dm1, dm2)
# For testing variable-sized bins.
self.small_mc_var_bins = MantelCorrelogram(dm1, dm2,
variable_size_distance_classes=True)
def test_Alpha_getter(self):
"""Test retrieving the value of alpha."""
self.assertEqual(self.mc.Alpha, 0.05)
def test_Alpha_setter(self):
"""Test setting the value of alpha."""
self.mc.Alpha = 0.01
self.assertEqual(self.mc.Alpha, 0.01)
def test_Alpha_setter_invalid(self):
"""Test setting the value of alpha with an invalid value."""
self.assertRaises(ValueError, setattr, self.mc, 'Alpha', -5)
self.assertRaises(ValueError, setattr, self.mc, 'Alpha', 2)
def test_DistanceMatrices_setter(self):
"""Test setting a valid number of distance matrices."""
dms = [self.overview_dm, self.overview_dm]
self.mc.DistanceMatrices = dms
self.assertEqual(self.mc.DistanceMatrices, dms)
def test_DistanceMatrices_setter_wrong_number(self):
"""Test setting an invalid number of distance matrices."""
self.assertRaises(ValueError, setattr, self.mc, 'DistanceMatrices',
[self.overview_dm])
self.assertRaises(ValueError, setattr, self.mc, 'DistanceMatrices',
[self.overview_dm, self.overview_dm, self.overview_dm])
def test_DistanceMatrices_setter_too_small(self):
"""Test setting distance matrices that are too small."""
self.assertRaises(ValueError, setattr, self.mc, 'DistanceMatrices',
[self.single_ele_dm, self.single_ele_dm])
def test_call(self):
"""Test running a Mantel correlogram analysis on valid input."""
# A lot of the returned numbers are based on random permutations and
# thus cannot be tested for exact values. We'll test what we can
# exactly, and then test for "sane" values for the "random" values. The
# matplotlib Figure object cannot be easily tested either, so we'll try
# our best to make sure it appears sane.
obs = self.mc()
exp_method_name = 'Mantel Correlogram'
self.assertEqual(obs['method_name'], exp_method_name)
exp_class_index = [0.5757052546507142, 0.60590471266814283,
0.63610417068557146, 0.66630362870299997, 0.69650308672042849,
0.72670254473785723, 0.75690200275528574]
assert_almost_equal(obs['class_index'], exp_class_index)
exp_num_dist = [12, 6, 8, 10, 12, 16, 8]
self.assertEqual(obs['num_dist'], exp_num_dist)
exp_mantel_r = [0.73244729118260765, 0.31157641757444593,
0.17627427296718071, None, None, None, None]
self.compare_multiple_level_array(obs['mantel_r'], exp_mantel_r)
# Test matplotlib Figure for a sane state.
obs_fig = obs['correlogram_plot']
obs_ax = obs_fig.get_axes()[0]
self.assertEqual(obs_ax.get_title(), "Mantel Correlogram")
self.assertEqual(obs_ax.get_xlabel(), "Distance class index")
self.assertEqual(obs_ax.get_ylabel(), "Mantel correlation statistic")
assert_almost_equal(obs_ax.get_xticks(), [0.57, 0.58, 0.59, 0.6,
0.61, 0.62, 0.63, 0.64, 0.65])
assert_almost_equal(obs_ax.get_yticks(), [0.1, 0.2, 0.3, 0.4, 0.5,
0.6, 0.7, 0.8, 0.9])
# Test p-values and corrected p-values.
found_match = False
for i in range(self.p_val_tests):
obs = self.mc()
p_vals = obs['mantel_p']
corr_p_vals = obs['mantel_p_corr']
self.assertEqual(len(p_vals), 7)
self.assertEqual(p_vals[3:], [None, None, None, None])
self.assertTrue(0.0 <= p_vals[0] <= 1.0)
self.assertTrue(0.0 <= p_vals[1] <= 1.0)
self.assertTrue(0.0 <= p_vals[2] <= 1.0)
self.compare_multiple_level_array(corr_p_vals,
[p_val * 3 if p_val is not None else None for p_val in p_vals])
if (p_vals[0] >= 0 and p_vals[0] <= 0.01 and p_vals[1] > 0.01 and
p_vals[1] <= 0.1 and p_vals[2] > 0.1 and p_vals[2] <= 0.5):
found_match = True
break
self.assertTrue(found_match)
def test_call_small(self):
"""Test running a Mantel correlogram analysis on the smallest input."""
# The expected output was verified with vegan's mantel correlogram
# function.
obs = self.small_mc()
exp_method_name = 'Mantel Correlogram'
self.assertEqual(obs['method_name'], exp_method_name)
exp_class_index = [3.0, 5.0, 7.0]
assert_almost_equal(obs['class_index'], exp_class_index)
exp_num_dist = [2, 2, 2]
self.assertEqual(obs['num_dist'], exp_num_dist)
exp_mantel_r = [0.86602540378443871, None, None]
self.compare_multiple_level_array(obs['mantel_r'], exp_mantel_r)
# Test matplotlib Figure for a sane state.
obs_fig = obs['correlogram_plot']
obs_ax = obs_fig.get_axes()[0]
self.assertEqual(obs_ax.get_title(), "Mantel Correlogram")
self.assertEqual(obs_ax.get_xlabel(), "Distance class index")
self.assertEqual(obs_ax.get_ylabel(), "Mantel correlation statistic")
assert_almost_equal(obs_ax.get_xticks(), [2.85, 2.9, 2.95, 3., 3.05,
3.1, 3.15, 3.2])
assert_almost_equal(obs_ax.get_yticks(), [0.82, 0.83, 0.84, 0.85,
0.86, 0.87, 0.88, 0.89, 0.9, 0.91])
# Test p-values and corrected p-values.
found_match = False
for i in range(self.p_val_tests):
obs = self.small_mc()
p_vals = obs['mantel_p']
corr_p_vals = obs['mantel_p_corr']
self.assertEqual(len(p_vals), 3)
self.assertEqual(p_vals[1:], [None, None])
self.assertTrue(0.0 <= p_vals[0] <= 1.0)
self.compare_multiple_level_array(corr_p_vals, p_vals)
if p_vals[0] >= 0 and p_vals[0] <= 0.5:
found_match = True
break
self.assertTrue(found_match)
def test_find_distance_classes(self):
"""Test finding the distance classes a matrix's elements are in."""
exp = (array([[-1, 0, 1], [0, -1, 2], [1, 2, -1]]),
[3.0, 5.0, 7.0])
obs = self.small_mc._find_distance_classes(
self.small_mc.DistanceMatrices[1], 3)
self.compare_multiple_level_array(obs, exp)
exp = (array([[-1, 1, 2, 0, 0, 5, 7, 4, 6],
[1, -1, 0, 2, 3, 6, 6, 6, 4],
[2, 0, -1, 4, 5, 5, 7, 4, 6],
[0, 2, 4, -1, 3, 3, 3, 3, 2],
[0, 3, 5, 3, -1, 5, 7, 6, 6],
[5, 6, 5, 3, 5, -1, 5, 2, 5],
[7, 6, 7, 3, 7, 5, -1, 0, 0],
[4, 6, 4, 3, 6, 2, 0, -1, 0],
[6, 4, 6, 2, 6, 5, 0, 0, -1]]),
[0.57381779, 0.60024231, 0.62666684, 0.65309137, 0.67951589,
0.70594042, 0.73236494, 0.75878947])
obs = self.mc._find_distance_classes(
self.mc.DistanceMatrices[1], 8)
self.compare_multiple_level_array(obs, exp)
def test_find_distance_classes_variable_size_bins(self):
"""Test finding distance classes with variable-size bins."""
# Single distance class.
exp = (array([[-1, 0, 0], [0, -1, 0], [0, 0, -1]]), [5.0])
obs = self.small_mc_var_bins._find_distance_classes(
self.small_mc_var_bins.DistanceMatrices[1], 1)
self.compare_multiple_level_array(obs, exp)
# Multiple distance classes (even #).
exp = (array([[-1, 0, 0], [0, -1, 1], [0, 1, -1]]), [3.5, 6.5])
obs = self.small_mc_var_bins._find_distance_classes(
self.small_mc_var_bins.DistanceMatrices[1], 2)
self.compare_multiple_level_array(obs, exp)
# Multiple distance classes (odd #).
exp = (array([[-1, 0, 1], [0, -1, 2], [1, 2, -1]]),
[2.0, 3.5, 6.5])
obs = self.small_mc_var_bins._find_distance_classes(
self.small_mc_var_bins.DistanceMatrices[1], 3)
self.compare_multiple_level_array(obs, exp)
# More classes than distances.
exp = (array([[-1, 0, 1], [0, -1, 2], [1, 2, -1]]),
[2.0, 3.5, 6.5, 8])
obs = self.small_mc_var_bins._find_distance_classes(
self.small_mc_var_bins.DistanceMatrices[1], 4)
self.compare_multiple_level_array(obs, exp)
def test_find_distance_classes_invalid_num_classes(self):
"""Test finding the distance classes for a bad number of classes."""
self.assertRaises(ValueError, self.mc._find_distance_classes,
self.mc.DistanceMatrices[1], 0)
self.assertRaises(ValueError, self.mc._find_distance_classes,
self.mc.DistanceMatrices[1], -1)
def test_find_row_col_indices(self):
"""Test finds the row and col based on a flattened-list index."""
obs = self.mc._find_row_col_indices(0)
self.assertEqual(obs, (1, 0))
obs = self.mc._find_row_col_indices(1)
self.assertEqual(obs, (2, 0))
obs = self.mc._find_row_col_indices(2)
self.assertEqual(obs, (2, 1))
obs = self.mc._find_row_col_indices(3)
self.assertEqual(obs, (3, 0))
obs = self.mc._find_row_col_indices(4)
self.assertEqual(obs, (3, 1))
obs = self.mc._find_row_col_indices(5)
self.assertEqual(obs, (3, 2))
obs = self.mc._find_row_col_indices(6)
self.assertEqual(obs, (4, 0))
self.assertRaises(IndexError, self.mc._find_row_col_indices, -1)
def test_find_break_points(self):
"""Test finding equal-spaced breakpoints in a range."""
exp = [-2.2204460492503131e-16, 1.0, 2.0, 3.0, 4.0, 5.0]
obs = self.mc._find_break_points(0, 5, 5)
assert_almost_equal(obs, exp)
exp = [-2.0, -1.66666666667, -1.33333333333, -1.0]
obs = self.mc._find_break_points(-2, -1, 3)
assert_almost_equal(obs, exp)
exp = [-1.0, -0.5, 0.0, 0.5, 1.0]
obs = self.mc._find_break_points(-1, 1, 4)
assert_almost_equal(obs, exp)
exp = [-1.0, 1.0]
obs = self.mc._find_break_points(-1, 1, 1)
assert_almost_equal(obs, exp)
def test_find_break_points_invalid_range(self):
"""Test finding breakpoints on an invalid range."""
self.assertRaises(ValueError, self.mc._find_break_points, 1, 0, 5)
self.assertRaises(ValueError, self.mc._find_break_points, 1, 1, 5)
def test_find_break_points_invalid_num_classes(self):
"""Test finding breakpoints with an invalid number of classes."""
self.assertRaises(ValueError, self.mc._find_break_points, 0, 1, 0)
self.assertRaises(ValueError, self.mc._find_break_points, 0, 1, -1)
def test_correct_p_values(self):
"""Test p-value correction for a small list of p-values."""
exp = [0.003, 0.006, 0.003]
obs = self.mc._correct_p_values([0.001, 0.002, 0.001])
assert_almost_equal(obs, exp)
def test_correct_p_values_all_None(self):
"""Test p-value correction for all None p-values."""
exp = [None, None]
obs = self.mc._correct_p_values([None, None])
self.assertEqual(obs, exp)
def test_correct_p_values_all_nan(self):
"""Test p-value correction for all NaN p-values."""
exp = [nan, nan]
obs = self.mc._correct_p_values([nan, nan])
self.assertEqual(obs, exp)
def test_correct_p_values_mixed(self):
"""p-value correction for mixture of None/NaN and valid p-values."""
exp = [None, 0.008, 0.01, nan]
obs = self.mc._correct_p_values([None, 0.004, 0.005, nan])
self.assertEqual(obs, exp)
def test_correct_p_values_no_change(self):
"""Test p-value correction where none is needed."""
exp = [None, 0.008]
obs = self.mc._correct_p_values([None, 0.008])
self.assertEqual(obs, exp)
exp = [0.007]
obs = self.mc._correct_p_values([0.007])
assert_almost_equal(obs, exp)
def test_correct_p_values_large_correction(self):
"""Test p-value correction that exceeds 1.0."""
exp = [1, None, 0.03, 0.03]
obs = self.mc._correct_p_values([0.5, None, 0.01, 0.01])
self.compare_multiple_level_array(obs, exp)
def test_correct_p_values_empty(self):
"""Test p-value correction on empty list."""
exp = []
obs = self.mc._correct_p_values([])
assert_almost_equal(obs, exp)
def test_generate_correlogram(self):
"""Test creating a correlogram plot."""
obs_fig = self.mc._generate_correlogram([0, 1, 2], [-0.9, 0, 0.9],
[0.001, 0.1, 0.9])
obs_ax = obs_fig.get_axes()[0]
self.assertEqual(obs_ax.get_title(), "Mantel Correlogram")
self.assertEqual(obs_ax.get_xlabel(), "Distance class index")
self.assertEqual(obs_ax.get_ylabel(), "Mantel correlation statistic")
assert_almost_equal(obs_ax.get_xticks(), [0., 0.5, 1., 1.5, 2.])
assert_almost_equal(obs_ax.get_yticks(), [-1., -0.5, 0., 0.5, 1.])
def test_generate_correlogram_empty(self):
"""Test creating a correlogram plot with no data."""
obs_fig = self.mc._generate_correlogram([], [], [])
obs_ax = obs_fig.get_axes()[0]
self.assertEqual(obs_ax.get_title(), "Mantel Correlogram")
self.assertEqual(obs_ax.get_xlabel(), "Distance class index")
self.assertEqual(obs_ax.get_ylabel(), "Mantel correlation statistic")
class PartialMantelTests(TestHelper):
"""Tests for the PartialMantel class."""
def setUp(self):
"""Set up PartialMantel instances for use in tests."""
super(PartialMantelTests, self).setUp()
# Test partial Mantel using the unifrac dm from the overview tutorial
# as all three inputs (should be a small value).
self.pm = PartialMantel(self.overview_dm, self.overview_dm,
self.overview_dm)
# Just a small matrix that is easy to edit and observe.
smpl_ids = ['s1', 's2', 's3']
self.small_pm = PartialMantel(
DistanceMatrix(array([[0, 1, 4], [1, 0, 3], [4, 3, 0]]), smpl_ids),
DistanceMatrix(array([[0, 2, 5], [2, 0, 8], [5, 8, 0]]), smpl_ids),
DistanceMatrix(array([[0, 9, 10], [9, 0, 2], [10, 2, 0]]),
smpl_ids))
self.small_pm_diff = PartialMantel(
DistanceMatrix(array([[0, 1, 4], [1, 0, 3], [4, 3, 0]]), smpl_ids),
DistanceMatrix(array([[0, 20, 51], [20, 0, 888], [51, 888, 0]]),
smpl_ids),
DistanceMatrix(array([[0, 9, 10], [9, 0, 2], [10, 2, 0]]),
smpl_ids))
smpl_ids = ['s1', 's2', 's3', 's4', 's5']
self.small_pm_diff2 = PartialMantel(
DistanceMatrix(array([[0, 1, 2, 3, 1.4],
[1, 0, 1.5, 1.6, 1.7],
[2, 1.5, 0, 0.8, 1.9],
[3, 1.6, 0.8, 0, 1.0],
[1.4, 1.7, 1.9, 1.0, 0]]), smpl_ids),
DistanceMatrix(array([[0, 1, 2, 3, 4.1],
[1, 0, 5, 6, 7],
[2, 5, 0, 8, 9],
[3, 6, 8, 0, 10],
[4.1, 7, 9, 10, 0]]), smpl_ids),
DistanceMatrix(array([[0, 1, 2, 3, 4],
[1, 0, 5, 6, 7],
[2, 5, 0, 8, 9.1],
[3, 6, 8, 0, 10],
[4, 7, 9.1, 10, 0]]), smpl_ids))
def test_DistanceMatrices_setter(self):
"""Test setting matrices using a valid number of distance matrices."""
dms = [self.overview_dm, self.overview_dm, self.overview_dm]
self.pm.DistanceMatrices = dms
self.assertEqual(self.pm.DistanceMatrices, dms)
def test_DistanceMatrices_setter_wrong_number(self):
"""Test setting an invalid number of distance matrices."""
self.assertRaises(ValueError, setattr, self.pm,
'DistanceMatrices', [self.overview_dm])
self.assertRaises(ValueError, setattr, self.pm,
'DistanceMatrices', [self.overview_dm, self.overview_dm])
def test_DistanceMatrices_setter_too_small(self):
"""Test setting distance matrices that are too small."""
self.assertRaises(ValueError, setattr, self.pm, 'DistanceMatrices',
[self.single_ele_dm, self.single_ele_dm, self.single_ele_dm])
def test_call_small(self):
"""Test the running of partial Mantel analysis on small input."""
obs = self.small_pm()
exp_method_name = 'Partial Mantel'
self.assertEqual(obs['method_name'], exp_method_name)
exp_mantel_r = 0.99999999999999944
assert_almost_equal(obs['mantel_r'], exp_mantel_r)
# We're not testing that this p-value falls between a certain range
# because this test has poor stability across platforms/numpy
# configurations. Just make sure the p-value is between 0 and 1.
self.assertTrue(0.0 <= obs['mantel_p'] <= 1.0)
obs = self.small_pm_diff()
exp_method_name = 'Partial Mantel'
self.assertEqual(obs['method_name'], exp_method_name)
exp_mantel_r = 0.99999999999999734
assert_almost_equal(obs['mantel_r'], exp_mantel_r)
self.assertCorrectPValue(0.25, 0.4, self.small_pm_diff,
p_val_key='mantel_p')
obs = self.small_pm_diff2()
exp_method_name = 'Partial Mantel'
self.assertEqual(obs['method_name'], exp_method_name)
exp_mantel_r = -0.350624881409
assert_almost_equal(obs['mantel_r'], exp_mantel_r)
self.assertCorrectPValue(0.8, 1.0, self.small_pm_diff2,
p_val_key='mantel_p')
class TopLevelTests(TestHelper):
def setUp(self):
pass
def test_quantile(self):
"""checks for correct quantile statistic values"""
# suffle the data to be sure, it is getting sorted
sample_data = array(range(1, 11))
shuffle(sample_data)
# regular cases
expected_output = [1.9, 2.8, 3.25, 5.5, 7.75, 7.93]
list_of_quantiles = [0.1, 0.2, 0.25, 0.5, 0.75, 0.77]
output = quantile(sample_data, list_of_quantiles)
assert_almost_equal(expected_output, output)
sample_data = array([42, 32, 24, 57, 15, 34, 83, 24, 60, 67, 55, 17,
83, 17, 80, 65, 14, 34, 39, 53])
list_of_quantiles = [0.5]
output = quantile(sample_data, list_of_quantiles)
assert_almost_equal(output, median(sample_data))
# quantiles must be between [0, 1]
with self.assertRaises(AssertionError):
output = quantile(sample_data, [0.1, 0.2, -0.1, 2, 0.3, 0.5])
# quantiles must be a list or a numpy array
with self.assertRaises(AssertionError):
output = quantile(sample_data, 1)
# the data must be a list or a numpy array
with self.assertRaises(AssertionError):
output = quantile(1, [0])
def test__quantile(self):
"""checks for correct quantiles according to R. type 7 algorithm"""
# regular cases
sample_data = array(range(25, 42))
assert_almost_equal(_quantile(sample_data, 0.5), median(sample_data))
# sorted data is assumed for this function
sample_data = sorted(
array([0.17483293, 0.99891939, 0.81377467, 0.8137437,
0.51990174, 0.35521497, 0.98751461]))
assert_almost_equal(_quantile(sample_data, 0.10), 0.283062154)
class PairedDifferenceTests(TestHelper):
def setUp(self):
self.personal_ids_to_state_values1 = \
{'firmicutes-abundance':
{'subject1': [0.45, 0.55],
'subject2': [0.11, 0.52]},
'bacteroidetes-abundance':
{'subject1': [0.28, 0.21],
'subject2': [0.11, 0.01]}
}
self.personal_ids_to_state_values2 = \
{'firmicutes-abundance':
{'subject1': [0.45, 0.55],
'subject2': [0.11, None]},
'bacteroidetes-abundance':
{'subject1': [0.28, 0.21],
'subject2': [0.11, 0.01]}
}
self.files_to_remove = []
self.dirs_to_remove = []
tmp_dir = get_qiime_temp_dir()
self.test_out = mkdtemp(dir=tmp_dir,
prefix='qiime_paired_diff_tests_',
suffix='')
self.dirs_to_remove.append(self.test_out)
def tearDown(self):
remove_files(self.files_to_remove)
# remove directories last, so we don't get errors
# trying to remove files which may be in the directories
for d in self.dirs_to_remove:
if exists(d):
rmtree(d)
def test_paired_difference_analyses(self):
"""paired_difference_analyses functions as expected
"""
actual = paired_difference_analyses(
self.personal_ids_to_state_values1,
['firmicutes-abundance',
'bacteroidetes-abundance'],
['Pre', 'Post'],
output_dir=self.test_out,
ymin=0.0,
ymax=1.0)
self.assertTrue(exists(join(self.test_out,
'paired_difference_comparisons.txt')))
self.assertTrue(
exists(join(self.test_out, 'firmicutes-abundance.pdf')))
self.assertTrue(
exists(join(self.test_out, 'bacteroidetes-abundance.pdf')))
# three output paths returned
self.assertEqual(len(actual[0]), 5)
# expected t values returned, they should be less than (firmicutes) or
# greater (bacteroidetes) than 2
self.assertLess(abs(actual[1]['firmicutes-abundance'][4]), 2)
self.assertLess(2, abs(actual[1]['bacteroidetes-abundance'][4]))
def test_paired_difference_analyses_biom_output(self):
"""paired_difference_analyses generates correct biom tables
"""
actual = paired_difference_analyses(
self.personal_ids_to_state_values1,
['firmicutes-abundance',
'bacteroidetes-abundance'],
['Pre', 'Post'],
output_dir=self.test_out,
ymin=0.0,
ymax=1.0)
biom_table_fp = join(self.test_out, 'differences.biom')
self.assertTrue(exists(biom_table_fp))
sids_fp = join(self.test_out, 'differences_sids.txt')
self.assertTrue(exists(sids_fp))
table = load_table(biom_table_fp)
self.assertItemsEqual(table.ids(), ['subject1', 'subject2'])
self.assertItemsEqual(table.ids(axis='observation'),
['firmicutes-abundance', 'bacteroidetes-abundance'])
assert_almost_equal(table
[(table.index('firmicutes-abundance',
axis='observation'),
table.index('subject1', axis='sample'))],
0.1, 2)
assert_almost_equal(table
[(table.index('bacteroidetes-abundance',
axis='observation'),
table.index('subject1', axis='sample'))],
-0.07, 2)
assert_almost_equal(table
[(table.index('firmicutes-abundance',
axis='observation'),
table.index('subject2', axis='sample'))],
0.41, 2)
assert_almost_equal(table
[(table.index('bacteroidetes-abundance',
axis='observation'),
table.index('subject2', axis='sample'))],
-0.10, 2)
with open(sids_fp) as sids_file:
md, _ = parse_mapping_file_to_dict(sids_file)
self.assertEqual(set(md.keys()), set(('subject1', 'subject2')))
s1_data_actual = md['subject1']
s1_data_expected = {'Pre-firmicutes-abundance': 0.45,
'Post-firmicutes-abundance': 0.55,
'Pre-bacteroidetes-abundance': 0.28,
'Post-bacteroidetes-abundance': 0.21}
s2_data_actual = md['subject2']
s2_data_expected = {'Pre-firmicutes-abundance': 0.11,
'Post-firmicutes-abundance': 0.52,
'Pre-bacteroidetes-abundance': 0.11,
'Post-bacteroidetes-abundance': 0.01}
# missing data should raise ValueError
self.assertRaises(ValueError, paired_difference_analyses,
self.personal_ids_to_state_values2,
['firmicutes-abundance',
'bacteroidetes-abundance'],
['Pre', 'Post'],
output_dir=self.test_out,
ymin=0.0,
ymax=1.0)
def test_paired_difference_analyses_wo_ymin_ymax(self):
"""paired_difference_analyses functions as expected w/o ymin/ymax
"""
# runs successfully with ymin/ymax
actual = paired_difference_analyses(
self.personal_ids_to_state_values1,
['firmicutes-abundance',
'bacteroidetes-abundance'],
['Pre', 'Post'],
output_dir=self.test_out,
ymin=None,
ymax=None)
self.assertTrue(exists(join(self.test_out,
'paired_difference_comparisons.txt')))
self.assertTrue(
exists(join(self.test_out, 'firmicutes-abundance.pdf')))
self.assertTrue(
exists(join(self.test_out, 'bacteroidetes-abundance.pdf')))
# three output paths returned
self.assertEqual(len(actual[0]), 5)
# expected t values returned, they should be less than (firmicutes) or
# greater (bacteroidetes) than 2
self.assertLess(0, actual[1]['firmicutes-abundance'][4])
self.assertLess(actual[1]['bacteroidetes-abundance'][4], 0)
def test_paired_difference_analyses_analysis_cat_subset(self):
"""paired_difference_analyses fns w a subset of analysis categories
"""
actual = paired_difference_analyses(
self.personal_ids_to_state_values1,
['firmicutes-abundance'],
['Pre', 'Post'],
output_dir=self.test_out,
ymin=0.0,
ymax=1.0)
self.assertTrue(exists(join(self.test_out,
'paired_difference_comparisons.txt')))
self.assertTrue(
exists(join(self.test_out, 'firmicutes-abundance.pdf')))
self.assertFalse(
exists(join(self.test_out, 'bacteroidetes-abundance.pdf')))
# three output paths returned
self.assertEqual(len(actual[0]), 4)
# expected t values returned
assert_almost_equal(actual[1]['firmicutes-abundance'][4], 1.645, 3)
class TestsHelper(TestCase):
"""Class with utility methods useful for other tests."""
# How many times a p-value should be tested to fall in a given range
# before failing the test.
p_val_tests = 20
def assertCorrectPValue(self, exp_min, exp_max, fn, args=None,
kwargs=None, p_val_idx=0):
"""Tests that the stochastic p-value falls in the specified range.
Performs the test self.p_val_tests times and fails if the observed
p-value does not fall into the specified range at least once. Each
p-value is also tested that it falls in the range 0.0 to 1.0.
This method assumes that fn is callable, and will unpack and pass args
and kwargs to fn if they are provided. It also assumes that fn returns
a single value (the p-value to be tested) or a tuple of results (any
length greater than or equal to 1), with the p-value at position
p_val_idx.
This is primarily used for testing the Mantel and correlation_test
functions.
"""
found_match = False
for i in range(self.p_val_tests):
if args is not None and kwargs is not None:
obs = fn(*args, **kwargs)
elif args is not None:
obs = fn(*args)
elif kwargs is not None:
obs = fn(**kwargs)
else:
obs = fn()
try:
p_val = float(obs)
except TypeError:
p_val = obs[p_val_idx]
self.assertTrue(0.0 <= p_val <= 1.0)
if p_val >= exp_min and p_val <= exp_max:
found_match = True
break
self.assertTrue(found_match)
class TestsTests(TestCase):
"""Tests miscellaneous functions."""
def test_tail(self):
"""tail should return x/2 if test is true; 1-(x/2) otherwise"""
assert_allclose(tail(0.25, 'a' == 'a'), 0.25 / 2)
assert_allclose(tail(0.25, 'a' != 'a'), 1 - (0.25 / 2))
def test_fisher(self):
"""fisher results should match p 795 Sokal and Rohlf"""
assert_allclose(fisher([0.073, 0.086, 0.10, 0.080, 0.060]),
0.0045957946540917905, atol=10e-7)
def test_permute_2d(self):
"""permute_2d permutes rows and cols of a matrix."""
a = reshape(arange(9), (3, 3))
assert_allclose(permute_2d(a, [0, 1, 2]), a)
assert_allclose(permute_2d(a, [2, 1, 0]),
array([[8, 7, 6], [5, 4, 3],
[2, 1, 0]]))
assert_allclose(permute_2d(a, [1, 2, 0]),
array([[4, 5, 3], [7, 8, 6],
[1, 2, 0]]))
class GTests(TestCase):
"""Tests implementation of the G tests for fit and independence."""
def test_G_2_by_2_2tailed_equal(self):
"""G_2_by_2 should return 0 if all cell counts are equal"""
assert_allclose(0, G_2_by_2(1, 1, 1, 1, False, False)[0])
assert_allclose(0, G_2_by_2(100, 100, 100, 100, False,
False)[0])
assert_allclose(0, G_2_by_2(100, 100, 100, 100, True,
False)[0])
def test_G_2_by_2_bad_data(self):
"""G_2_by_2 should raise ValueError if any counts are negative"""
self.assertRaises(ValueError, G_2_by_2, 1, -1, 1, 1)
def test_G_2_by_2_2tailed_examples(self):
"""G_2_by_2 values should match examples in Sokal & Rohlf"""
# example from p 731, Sokal and Rohlf (1995)
# without correction
assert_allclose(G_2_by_2(12, 22, 16, 50, False, False)[0],
1.33249, 0.0001)
assert_allclose(G_2_by_2(12, 22, 16, 50, False, False)[1],
0.24836, 0.0001)
# with correction
assert_allclose(G_2_by_2(12, 22, 16, 50, True, False)[0],
1.30277, 0.0001)
assert_allclose(G_2_by_2(12, 22, 16, 50, True, False)[1],
0.25371, 0.0001)
def test_G_2_by_2_1tailed_examples(self):
"""G_2_by_2 values should match values from codon_binding program"""
# first up...the famous arginine case
assert_allclose(G_2_by_2(36, 16, 38, 106), (29.111609, 0),
atol=10e-7)
# then some other miscellaneous positive and negative values
assert_allclose(
G_2_by_2(0, 52, 12, 132), (-7.259930, 0.996474), atol=10e-7)
assert_allclose(
G_2_by_2(5, 47, 14, 130), (-0.000481, 0.508751), atol=10e-7)
assert_allclose(
G_2_by_2(5, 47, 36, 108), (-6.065167, 0.993106), atol=10e-7)
def test_g_fit(self):
"""Test G fit is correct with and without Williams correction."""
# test with williams correction
data = [array(i) for i in [63, 31, 28, 12, 39, 16, 40, 12]]
exp_G = 69.030858949133162 / 1.00622406639
exp_p = 2.8277381487281706e-12
obs_G, obs_p = g_fit(data, williams=True)
assert_allclose(obs_G, exp_G)
assert_allclose(obs_p, exp_p, atol=1e-7)
# test with hand computed example and williams correction
data = [array([75, 65, 48]), array([200]), array([10, 250, 13,
85])]
exp_G = 85.90859811005285 / 1.0018930430667
exp_p = 2.4012235241479195e-19
obs_G, obs_p = g_fit(data, williams=True)
assert_allclose(obs_G, exp_G)
assert_allclose(obs_p, exp_p, atol=1e-7)
# test without williams correction on another hand computed example
data = [array([10, 12, 15, 7]), array([15, 12, 17, 18]),
array([6, 9, 13])]
exp_G = 1.6610421781232
exp_p = 0.43582212499949591
obs_G, obs_p = g_fit(data, williams=False)
assert_allclose(obs_G, exp_G)
assert_allclose(obs_p, exp_p, atol=1e-7)
def test_williams_correction(self):
"""Test that the Williams correction is correctly computed."""
n = 100
a = 10
G = 10.5783
exp = 10.387855973813421
assert_allclose(williams_correction(n, a, G), exp,
rtol=1e-5)
# test with an example from Sokal and Rohlf pg 699
n = 241
a = 8
G = 8.82396
exp = 8.76938
assert_allclose(williams_correction(n, a, G), exp,
rtol=1e-5)
def test_safe_sum_p_log_p(self):
"""safe_sum_p_log_p should ignore zero elements, not raise error"""
m = array([2, 4, 0, 8])
self.assertEqual(safe_sum_p_log_p(m, 2), 2 * 1 + 4 * 2 + 8 * 3)
class StatTests(TestsHelper):
"""Tests that the t and z tests are implemented correctly"""
def setUp(self):
super(StatTests, self).setUp()
self.x = [7.33, 7.49, 7.27, 7.93, 7.56, 7.81, 7.46, 6.94, 7.49, 7.44,
7.95, 7.47, 7.04, 7.10, 7.64]
self.y = [7.53, 7.70, 7.46, 8.21, 7.81, 8.01, 7.72, 7.13, 7.68, 7.66,
8.11, 7.66, 7.20, 7.25, 7.79]
def test_t_paired_2tailed(self):
"""t_paired should match values from Sokal & Rohlf p 353"""
x, y = self.x, self.y
# check value of t and the probability for 2-tailed
assert_allclose(t_paired(y, x)[0], 19.7203, 1e-4)
assert_allclose(t_paired(y, x)[1], 1.301439e-11, 1e-4)
def test_t_paired_no_variance(self):
"""t_paired should return None if lists are invariant"""
x = [1, 1, 1]
y = [0, 0, 0]
assert_allclose(t_paired(x, x), (nan, nan))
assert_allclose(t_paired(x, y), (nan, nan))
def test_t_paired_1tailed(self):
"""t_paired should match pre-calculated 1-tailed values"""
x, y = self.x, self.y
# check probability for 1-tailed low and high
assert_allclose(
t_paired(y, x, "low")[1], 1 - (1.301439e-11 / 2), 1e-4)
assert_allclose(
t_paired(x, y, "high")[1], 1 - (1.301439e-11 / 2), 1e-4)
assert_allclose(
t_paired(y, x, "high")[1], 1.301439e-11 / 2, 1e-4)
assert_allclose(
t_paired(x, y, "low")[1], 1.301439e-11 / 2, 1e-4)
def test_t_paired_specific_difference(self):
"""t_paired should allow a specific difference to be passed"""
x, y = self.x, self.y
# difference is 0.2, so test should be non-significant if 0.2 passed
self.assertFalse(t_paired(y, x, exp_diff=0.2)[0] > 1e-10)
# same, except that reversing list order reverses sign of difference
self.assertFalse(t_paired(x, y, exp_diff=-0.2)[0] > 1e-10)
# check that there's no significant difference from the true mean
assert_allclose(
t_paired(y, x, exp_diff=0.2)[1], 1, 1e-4)
def test_t_paired_bad_data(self):
"""t_paired should raise ValueError on lists of different lengths"""
self.assertRaises(ValueError, t_paired, self.y, [1, 2, 3])
def test_t_two_sample(self):
"""t_two_sample should match example on p.225 of Sokal and Rohlf"""
I = array([7.2, 7.1, 9.1, 7.2, 7.3, 7.2, 7.5])
II = array([8.8, 7.5, 7.7, 7.6, 7.4, 6.7, 7.2])
assert_allclose(t_two_sample(I, II, 'two-sided'),
(-0.1184, 0.45385 * 2),
atol=10e-3)
def test_t_two_sample_no_variance(self):
"""t_two_sample should properly handle lists that are invariant"""
# By default should return (None, None) to mimic R's t.test.
x = array([1, 1., 1])
y = array([0, 0, 0.0])
self.assertEqual(t_two_sample(x, x), (nan, nan))
self.assertEqual(t_two_sample(x, y), (nan, nan))
# Should still receive (nan, nan) if the lists have no variance and
# have the same single value.
self.assertEqual(t_two_sample(x, x), (nan, nan))
self.assertEqual(t_two_sample(x, [1, 1]), (nan, nan))
def test_t_one_sample(self):
"""t_one_sample results should match those from R"""
x = array(range(-5, 5))
y = array(range(-1, 10))
assert_allclose(t_one_sample(x), (-0.5222, 0.6141), atol=10e-3)
assert_allclose(t_one_sample(y), (4, 0.002518), atol=10e-3)
# do some one-tailed tests as well
assert_allclose(t_one_sample(y, tails='low'), (4, 0.9987), atol=10e-3)
assert_allclose(
t_one_sample(y, tails='high'), (4, 0.001259), atol=10e-3)
def test_t_two_sample_switch(self):
"""t_two_sample should call t_one_observation if 1 item in sample."""
sample = array([4.02, 3.88, 3.34, 3.87, 3.18])
x = array([3.02])
assert_allclose(t_two_sample(x, sample), (-1.5637254, 0.1929248))
assert_allclose(t_two_sample(sample, x), (-1.5637254, 0.1929248))
# can't do the test if both samples have single item
assert_allclose(t_two_sample(x, x), (nan, nan))
# Test special case if t=0.
assert_allclose(t_two_sample([2], [1, 2, 3]), (0.0, 1.0))
assert_allclose(t_two_sample([1, 2, 3], [2]), (0.0, 1.0))
def test_t_one_observation(self):
"""t_one_observation should match p. 228 of Sokal and Rohlf"""
sample = array([4.02, 3.88, 3.34, 3.87, 3.18])
x = 3.02
# note that this differs after the 3rd decimal place from what's in
# the book, because Sokal and Rohlf round their intermediate steps...
assert_allclose(t_one_observation(x, sample), (-1.5637254, 0.1929248))
def test_t_one_observation_no_variance(self):
"""t_one_observation should correctly handle an invariant list."""
sample = array([1.0, 1.0, 1.0])
assert_allclose(t_one_observation(1, sample), (nan, nan))
assert_allclose(t_one_observation(2, sample, exp_diff=3), (nan, nan))
assert_allclose(t_one_observation(2, sample, tails='low'), (nan, nan))
def test_mc_t_two_sample(self):
"""Test gives correct results with valid input data."""
# Verified against R's t.test() and Deducer::perm.t.test().
# With numpy array as input.
exp = (-0.11858541225631833, 0.90756579317867436)
I = array([7.2, 7.1, 9.1, 7.2, 7.3, 7.2, 7.5])
II = array([8.8, 7.5, 7.7, 7.6, 7.4, 6.7, 7.2])
obs = mc_t_two_sample(I, II)
assert_allclose(obs[:2], exp)
self.assertEqual(len(obs[2]), 999)
self.assertCorrectPValue(0.8, 0.9, mc_t_two_sample, [I, II],
p_val_idx=3)
# With python list as input.
exp = (-0.11858541225631833, 0.90756579317867436)
I = [7.2, 7.1, 9.1, 7.2, 7.3, 7.2, 7.5]
II = [8.8, 7.5, 7.7, 7.6, 7.4, 6.7, 7.2]
obs = mc_t_two_sample(I, II)
assert_allclose(obs[:2], exp)
self.assertEqual(len(obs[2]), 999)
self.assertCorrectPValue(0.8, 0.9, mc_t_two_sample, [I, II],
p_val_idx=3)
exp = (-0.11858541225631833, 0.45378289658933718)
obs = mc_t_two_sample(I, II, tails='low')
assert_allclose(obs[:2], exp)
self.assertEqual(len(obs[2]), 999)
self.assertCorrectPValue(0.4, 0.47, mc_t_two_sample, [I, II],
{'tails': 'low'}, p_val_idx=3)
exp = (-0.11858541225631833, 0.54621710341066287)
obs = mc_t_two_sample(I, II, tails='high', permutations=99)
assert_allclose(obs[:2], exp)
self.assertEqual(len(obs[2]), 99)
self.assertCorrectPValue(0.4, 0.62, mc_t_two_sample, [I, II],
{'tails': 'high', 'permutations': 99},
p_val_idx=3)
exp = (-2.8855783649036986, 0.99315596652421401)
obs = mc_t_two_sample(I, II, tails='high',
permutations=99, exp_diff=1)
assert_allclose(obs[:2], exp)
self.assertEqual(len(obs[2]), 99)
self.assertCorrectPValue(0.55, 0.99, mc_t_two_sample, [I, II],
{'tails': 'high', 'permutations': 99,
'exp_diff': 1}, p_val_idx=3)
def test_mc_t_two_sample_unbalanced_obs(self):
"""Test gives correct results with unequal number of obs per sample."""
# Verified against R's t.test() and Deducer::perm.t.test().
exp = (-0.10302479888889175, 0.91979753020527177)
I = array([7.2, 7.1, 9.1, 7.2, 7.3, 7.2])
II = array([8.8, 7.5, 7.7, 7.6, 7.4, 6.7, 7.2])
obs = mc_t_two_sample(I, II)
assert_allclose(obs[:2], exp)
self.assertEqual(len(obs[2]), 999)
self.assertCorrectPValue(0.8, 0.9, mc_t_two_sample, [I, II],
p_val_idx=3)
def test_mc_t_two_sample_single_obs_sample(self):
"""Test works correctly with one sample having a single observation."""
sample = array([4.02, 3.88, 3.34, 3.87, 3.18])
x = array([3.02])
exp = (-1.5637254, 0.1929248)
obs = mc_t_two_sample(x, sample)
assert_allclose(obs[:2], exp, atol=1e-6)
assert_allclose(len(obs[2]), 999)
self.assertTrue(0.0 <= obs[3] <= 1.0)
# Test the case where we can have no variance in the permuted lists.
x = array([1, 1, 2])
y = array([1])
exp = (-0.5, 0.666666666667)
obs = mc_t_two_sample(x, y)
assert_allclose(obs[:2], exp)
assert_allclose(len(obs[2]), 999)
self.assertTrue(0.0 <= obs[3] <= 1.0)
def test_mc_t_two_sample_no_perms(self):
"""Test gives empty permutation results if no perms are given."""
exp = (-0.11858541225631833, 0.90756579317867436, [], nan)
I = array([7.2, 7.1, 9.1, 7.2, 7.3, 7.2, 7.5])
II = array([8.8, 7.5, 7.7, 7.6, 7.4, 6.7, 7.2])
obs = mc_t_two_sample(I, II, permutations=0)
assert_allclose(obs[0], exp[0])
assert_allclose(obs[1], exp[1])
self.assertEqual(obs[2], exp[2])
assert_allclose(obs[3], exp[3])
def test_mc_t_two_sample_no_mc(self):
"""Test no MC stats if initial t-test is bad."""
x = array([1, 1, 1])
y = array([0, 0, 0])
self.assertEqual(mc_t_two_sample(x, y), (nan, nan, [], nan))
def test_mc_t_two_sample_no_variance(self):
"""Test input with no variance. Should match Deducer::perm.t.test."""
x = array([1, 1, 1])
y = array([2, 2, 2])
exp = (nan, nan)
obs = mc_t_two_sample(x, y, permutations=1000)
self.assertEqual(obs[:2], exp)
def test_mc_t_two_sample_no_permuted_variance(self):
"""Test with chance of getting no variance with some perms."""
# Verified against R's t.test() and Deducer::perm.t.test().
x = array([1, 1, 2])
y = array([2, 2, 1])
exp = (-0.70710678118654791, 0.51851851851851838)
obs = mc_t_two_sample(x, y, permutations=1000)
assert_allclose(obs[:2], exp)
self.assertEqual(len(obs[2]), 1000)
self.assertCorrectPValue(0.90, 1.0, mc_t_two_sample, [x, y],
{'permutations': 1000}, p_val_idx=3)
def test_mc_t_two_sample_invalid_input(self):
"""Test fails on various invalid input."""
# self.assertRaises(ValueError, mc_t_two_sample, [1, 2, 3],
# [4., 5., 4.], tails='foo')
# self.assertRaises(ValueError, mc_t_two_sample, [1, 2, 3],
# [4., 5., 4.], permutations=-1)
self.assertRaises(ValueError, mc_t_two_sample, [1], [4.])
self.assertRaises(ValueError, mc_t_two_sample, [1, 2], [])
def test_permute_observations(self):
"""Test works correctly on small input dataset."""
I = [10, 20., 1]
II = [2, 4, 5, 7]
obs = _permute_observations(I, II, 1)
self.assertEqual(len(obs[0]), 1)
self.assertEqual(len(obs[1]), 1)
self.assertEqual(len(obs[0][0]), len(I))
self.assertEqual(len(obs[1][0]), len(II))
assert_allclose(sorted(concatenate((obs[0][0],
obs[1][0]))),
sorted(I + II))
def test_tail(self):
"""tail should return prob/2 if test is true, or 1-(prob/2) if false
"""
assert_allclose(tail(0.25, True), 0.125)
assert_allclose(tail(0.25, False), 0.875)
assert_allclose(tail(1, True), 0.5)
assert_allclose(tail(1, False), 0.5)
assert_allclose(tail(0, True), 0)
assert_allclose(tail(0, False), 1)
class CorrelationTests(TestsHelper):
"""Tests of correlation coefficients and Mantel test."""
def setUp(self):
"""Sets up variables used in the tests."""
super(CorrelationTests, self).setUp()
# For testing spearman and correlation_test using method='spearman'.
# Taken from the Spearman wikipedia article. Also used for testing
# Pearson (verified with R).
self.data1 = [106, 86, 100, 101, 99, 103, 97, 113, 112, 110]
self.data2 = [7, 0, 27, 50, 28, 29, 20, 12, 6, 17]
# For testing spearman.
self.a = [1, 2, 4, 3, 1, 6, 7, 8, 10, 4]
self.b = [2, 10, 20, 1, 3, 7, 5, 11, 6, 13]
self.c = [7, 1, 20, 13, 3, 57, 5, 121, 2, 9]
self.r = (1.7, 10, 20, 1.7, 3, 7, 5, 11, 6.5, 13)
self.x = (1, 2, 4, 3, 1, 6, 7, 8, 10, 4, 100, 2, 3, 77)
# Ranked copies for testing spearman.
self.b_ranked = [2, 7, 10, 1, 3, 6, 4, 8, 5, 9]
self.c_ranked = [5, 1, 8, 7, 3, 9, 4, 10, 2, 6]
# silence the warnings that will tests for correlation_test
filterwarnings('ignore', category=RuntimeWarning)
def test_is_symmetric_and_hollow(self):
"""Should correctly test for symmetry and hollowness of dist mats."""
self.assertTrue(is_symmetric_and_hollow(array([[0, 1], [1, 0]])))
self.assertTrue(is_symmetric_and_hollow(matrix([[0, 1], [1, 0]])))
self.assertTrue(is_symmetric_and_hollow(matrix([[0.0, 0],
[0.0, 0]])))
self.assertTrue(not is_symmetric_and_hollow(
array([[0.001, 1], [1, 0]])))
self.assertTrue(not is_symmetric_and_hollow(
array([[0, 1.1], [1, 0]])))
self.assertTrue(not is_symmetric_and_hollow(
array([[0.5, 1.1], [1, 0]])))
def test_pearson(self):
"""Test pearson correlation method on valid data."""
# This test output was verified by R.
assert_allclose(pearson([1, 2], [1, 2]), 1.0)
assert_allclose(pearson([1, 2, 3], [1, 2, 3]), 1.0)
assert_allclose(pearson([1, 2, 3], [1, 2, 4]), 0.9819805)
def test_pearson_invalid_input(self):
"""Test running pearson on bad input."""
self.assertRaises(ValueError, pearson, [1.4, 2.5], [5.6, 8.8, 9.0])
self.assertRaises(ValueError, pearson, [1.4], [5.6])
def test_spearman(self):
"""Test the spearman function with valid input."""
# One vector has no ties.
exp = 0.3719581
obs = spearman(self.a, self.b)
assert_allclose(obs, exp)
# Both vectors have no ties.
exp = 0.2969697
obs = spearman(self.b, self.c)
assert_allclose(obs, exp)
# Both vectors have ties.
exp = 0.388381
obs = spearman(self.a, self.r)
assert_allclose(obs, exp)
exp = -0.17575757575757578
obs = spearman(self.data1, self.data2)
assert_allclose(obs, exp)
def test_spearman_no_variation(self):
"""Test the spearman function with a vector having no variation."""
exp = nan
obs = spearman([1, 1, 1], [1, 2, 3])
assert_allclose(obs, exp)
def test_spearman_ranked(self):
"""Test the spearman function with a vector that is already ranked."""
exp = 0.2969697
obs = spearman(self.b_ranked, self.c_ranked)
assert_allclose(obs, exp)
def test_spearman_one_obs(self):
"""Test running spearman on a single observation."""
self.assertRaises(ValueError, spearman, [1.0], [5.0])
def test_spearman_invalid_input(self):
"""Test the spearman function with invalid input."""
self.assertRaises(ValueError, spearman, [], [])
self.assertRaises(ValueError, spearman, self.a, [])
def test_correlation_test_pearson(self):
"""Test correlation_t using pearson on valid input."""
# These results were verified with R.
# Test with non-default confidence level and permutations.
obs = correlation_t(self.data1, self.data2, method='pearson',
confidence_level=0.90, permutations=990)
assert_allclose(obs[:2], (-0.03760147,
0.91786297277172868), atol=10e-7)
self.assertEqual(len(obs[2]), 990)
for r in obs[2]:
self.assertTrue(r >= -1.0 and r <= 1.0)
self.assertCorrectPValue(0.9, 0.93, correlation_t,
(self.data1, self.data2),
{'method': 'pearson',
'confidence_level': 0.90,
'permutations': 990},
p_val_idx=3)
assert_allclose(obs[4], (-0.5779077, 0.5256224))
# Test with non-default tail type.
obs = correlation_t(self.data1, self.data2, method='pearson',
confidence_level=0.90, permutations=990,
tails='low')
assert_allclose(obs[:2], (-0.03760147,
0.45893148638586434), atol=10e-7)
self.assertEqual(len(obs[2]), 990)
for r in obs[2]:
self.assertTrue(r >= -1.0 and r <= 1.0)
self.assertCorrectPValue(0.41, 0.46, correlation_t,
(self.data1, self.data2),
{'method': 'pearson',
'confidence_level': 0.90,
'permutations': 990,
'tails': 'low'},
p_val_idx=3)
assert_allclose(obs[4], (-0.5779077, 0.5256224))
def test_correlation_test_spearman(self):
"""Test correlation_t using spearman on valid input."""
# This example taken from Wikipedia page:
# http://en.wikipedia.org/wiki/Spearman's_rank_correlation_coefficient
obs = correlation_t(self.data1, self.data2, method='spearman',
tails='high')
assert_allclose(obs[:2], (-0.17575757575757578,
0.686405827612))
self.assertEqual(len(obs[2]), 999)
for rho in obs[2]:
self.assertTrue(rho >= -1.0 and rho <= 1.0)
self.assertCorrectPValue(0.67, 0.7, correlation_t,
(self.data1, self.data2),
{'method': 'spearman',
'tails': 'high'},
p_val_idx=3)
assert_allclose(obs[4], (-0.7251388558041697,
0.51034422964834503))
# The p-value is off because the example uses a one-tailed test, while
# we use a two-tailed test. Someone confirms the answer that we get
# here for a two-tailed test:
# http://stats.stackexchange.com/questions/22816/calculating-p-value-
# for-spearmans-rank-correlation-coefficient-example-on-wikip
obs = correlation_t(self.data1, self.data2, method='spearman',
tails='two-sided')
assert_allclose(obs[:2], (-0.17575757575757578,
0.62718834477648433))
self.assertEqual(len(obs[2]), 999)
for rho in obs[2]:
self.assertTrue(rho >= -1.0 and rho <= 1.0)
self.assertCorrectPValue(0.60, 0.64, correlation_t,
(self.data1, self.data2),
{'method': 'spearman', 'tails': 'two-sided'},
p_val_idx=3)
assert_allclose(obs[4], (-0.7251388558041697,
0.51034422964834503))
def test_correlation_test_invalid_input(self):
"""Test correlation_t using invalid input."""
self.assertRaises(ValueError, correlation_t, self.data1, self.data2,
method='foo')
# self.assertRaises(ValueError, correlation_t, self.data1, self.data2,
# tails='foo')
self.assertRaises(ValueError, correlation_t, self.data1, self.data2,
permutations=-1)
self.assertRaises(ValueError, correlation_t, self.data1, self.data2,
confidence_level=-1)
self.assertRaises(ValueError, correlation_t, self.data1, self.data2,
confidence_level=1.1)
self.assertRaises(ValueError, correlation_t, self.data1, self.data2,
confidence_level=0)
self.assertRaises(ValueError, correlation_t, self.data1, self.data2,
confidence_level=0.0)
self.assertRaises(ValueError, correlation_t, self.data1, self.data2,
confidence_level=1)
self.assertRaises(ValueError, correlation_t, self.data1, self.data2,
confidence_level=1.0)
def test_correlation_test_no_permutations(self):
"""Test correlation_t with no permutations."""
# These results were verified with R.
exp = (-0.2581988897471611, 0.7418011102528389, [], None,
(-0.97687328610475876, 0.93488023560400879))
obs = correlation_t([1, 2, 3, 4], [1, 2, 1, 1], permutations=0)
assert_allclose(obs[0], exp[0])
assert_allclose(obs[1], exp[1])
assert_allclose(obs[2], exp[2])
self.assertEqual(obs[3], exp[3])
assert_allclose(obs[4], exp[4])
def test_correlation_test_perfect_correlation(self):
"""Test correlation_t with perfectly-correlated input vectors."""
# These results were verified with R.
obs = correlation_t([1, 2, 3, 4], [1, 2, 3, 4])
assert_allclose(obs[:2], (1.0, 0.0))
self.assertEqual(len(obs[2]), 999)
for r in obs[2]:
self.assertTrue(r >= -1.0 and r <= 1.0)
self.assertCorrectPValue(0.06, 0.09, correlation_t,
([1, 2, 3, 4], [1, 2, 3, 4]),
p_val_idx=3)
assert_allclose(obs[4], (0.99999999999998879, 1.0))
def test_correlation_test_small_obs(self):
"""Test correlation_t with a small number of observations."""
# These results were verified with R.
obs = correlation_t([1, 2, 3], [1, 2, 3])
assert_allclose(obs[:2], (1.0, 0))
self.assertEqual(len(obs[2]), 999)
for r in obs[2]:
self.assertTrue(r >= -1.0 and r <= 1.0)
self.assertCorrectPValue(0.3, 0.4, correlation_t,
([1, 2, 3], [1, 2, 3]),
p_val_idx=3)
self.assertEqual(obs[4], (None, None))
obs = correlation_t([1, 2, 3], [1, 2, 3], method='spearman')
assert_allclose(obs[:2], (1.0, 0))
self.assertEqual(len(obs[2]), 999)
for r in obs[2]:
self.assertTrue(r >= -1.0 and r <= 1.0)
self.assertCorrectPValue(0.3, 0.4, correlation_t,
([1, 2, 3], [1, 2, 3]),
{'method': 'spearman'}, p_val_idx=3)
self.assertEqual(obs[4], (None, None))
def test_mw_test(self):
"""mann-whitney test results should match Sokal & Rohlf"""
# using Sokal and Rolhf and R wilcox.test
# x <- c(104, 109, 112, 114, 116, 118, 118, 119, 121, 123, 125, 126,
# 126, 128, 128, 128)
# y <- c(100, 105, 107, 107, 108, 111, 116, 120, 121, 123)
# wilcox.test(x,y)
# W = 123.5, p-value = 0.0232
x = [104, 109, 112, 114, 116, 118, 118, 119, 121, 123, 125, 126, 126,
128, 128, 128]
y = [100, 105, 107, 107, 108, 111, 116, 120, 121, 123]
u, p = mw_t(x, y, continuity=True, two_sided=True)
# a return of 123.5 would also be okay, there is a consensus to use the
# smaller U statistic, but the probability calculated from each is the
# same
self.assertTrue(u == 36.5 or u == 123.5)
assert_allclose(p, .0232, rtol=1e-3)
def test_mw_boot(self):
"""excercising the Monte-carlo variant of mann-whitney"""
x = [104, 109, 112, 114, 116, 118, 118, 119, 121, 123, 125, 126, 126,
128, 128, 128]
y = [100, 105, 107, 107, 108, 111, 116, 120, 121, 123]
u, p = mw_boot(x, y, 10)
self.assertTrue(u == 36.5 or u == 123.5)
self.assertTrue(0 <= p <= 0.5)
def test_kendall(self):
"""tests new kendall tau implamentation, returns tau, prob"""
# test from pg. 594 Sokal and Rohlf, Box 15.7
v1 = [8.7, 8.5, 9.4, 10, 6.3, 7.8, 11.9, 6.5, 6.6, 10.6, 10.2, 7.2,
8.6, 11.1, 11.6]
v2 = [5.95, 5.65, 6.00, 5.70, 4.70, 5.53, 6.40, 4.18, 6.15, 5.93, 5.70,
5.68, 6.13, 6.30, 6.03]
obs_tau = kendall(v1, v2)
obs_prob = kendall_pval(obs_tau, len(v1))
exp_tau = 0.49761335152811925
exp_prob = 0.0097188572446995618
assert_allclose(obs_tau, exp_tau)
assert_allclose(obs_prob, exp_prob)
# random vectors checked against scipy. v1 has 33 ties, v2 32
v1 = array(
[1.2, 9.7, 8.8, 1.7, 8.6, 9.9, 6.8, 7.3, 5.5, 5.4, 8.3,
3.6, 7.5, 2., 9.3, 5.1, 8.4, 0.3, 8.2, 2.4, 9.8, 8.5,
2.1, 6., 1.8, 3.7, 1.4, 4.6, 7.6, 5.2, 0.9, 5.2, 4.7,
2.9, 5., 6.9, 1.3, 6.7, 5.2, 2.4, 6.9, 2., 7.4, 0.4,
8.2, 9.5, 2.9, 5.7, 2.4, 8.8, 1.6, 3.5, 5.1, 3.6, 3.3,
7.5, 0.9, 9.3, 5.4, 6.9, 9.3, 2.3, 1.9, 8.1, 3.2, 4.2,
8.7, 3., 9.8, 5.3, 6.2, 4.8, 9., 2.8, 5.5, 8.4, 4.1,
5.6, 5.4, 6.9, 3.8, 2.7, 0.3, 3.9, 8.2, 6.6, 1.9, 3.9,
2., 4.4, 0.8, 6.5, 4.8, 1.5, 9.9, 9.1, 9.9, 6.2, 2.9,
2.])
v2 = array([6.6, 8.6, 3.9, 6.1, 0.9, 8.4, 10., 3.3, 0.4,
3.9, 7.6, 8.2, 8.6, 3., 6.9, 0.6, 8.4, 8.1,
6.3, 0.5, 5.2, 6.4, 8., 9.9, 1.2, 6.7, 8.4,
2.7, 8.4, 4.1, 4.6, 5.1, 5.2, 5.3, 2.2, 2.2,
4.3, 7.1, 1.4, 6.6, 7.6, 4.5, 7.8, 3.5, 7.1,
0.6, 4.6, 3.2, 2.2, 0.2, 3.9, 5.9, 7.7, 8.8,
1.3, 5.1, 5.6, 8.3, 8.8, 1.7, 5.2, 6.9, 1.3,
1.4, 4.9, 9.4, 2.3, 3.7, 9.1, 3.4, 1.6, 4.1,
9.7, 2.8, 9.9, 0.5, 2., 2.7, 3.3, 2.4, 3.6,
7.9, 6.5, 7., 4.2, 1.8, 1.6, 1.9, 5.5, 0.,
1.4, 2.2, 7.2, 8.2, 1.1, 2.5, 5.3, 0.2, 9., 0.2])
exp_tau, exp_prob = (0.024867511238807951, 0.71392573687923555)
obs_tau = kendall(v1, v2)
obs_prob = kendall_pval(obs_tau, len(v1))
assert_allclose(obs_tau, exp_tau)
assert_allclose(obs_prob, exp_prob)
class TestDistMatrixPermutationTest(TestCase):
"""Tests of distance_matrix_permutation_test"""
def setUp(self):
"""sets up variables for testing"""
self.matrix = array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
self.cells = [(0, 1), (1, 3)]
self.cells2 = [(0, 2), (2, 3)]
def test_ANOVA_one_way(self):
"""ANOVA one way returns same values as ANOVA on a stats package
"""
g1 = array([10.0, 11.0, 10.0, 5.0, 6.0])
g2 = array([1.0, 2.0, 3.0, 4.0, 1.0, 2.0])
g3 = array([6.0, 7.0, 5.0, 6.0, 7.0])
i = [g1, g2, g3]
F, pval = ANOVA_one_way(i)
assert_allclose(F, 18.565450643776831)
assert_allclose(pval, 0.00015486238993089464)
def test_kruskal_wallis(self):
"""Test kruskal_wallis on Sokal & Rohlf Box 13.6 dataset"""
d_control = [75, 67, 70, 75, 65, 71, 67, 67, 76, 68]
d_2_gluc = [57, 58, 60, 59, 62, 60, 60, 57, 59, 61]
d_2_fruc = [58, 61, 56, 58, 57, 56, 61, 60, 57, 58]
d_1_1 = [58, 59, 58, 61, 57, 56, 58, 57, 57, 59]
d_2_sucr = [62, 66, 65, 63, 64, 62, 65, 65, 62, 67]
data = [d_control, d_2_gluc, d_2_fruc, d_1_1, d_2_sucr]
kw_stat, pval = kruskal_wallis(data)
assert_allclose(kw_stat, 38.436807439)
assert_allclose(pval, 9.105424085598766e-08)
# test using a random data set against scipy
x_0 = array([0, 0, 0, 31, 12, 0, 25, 26, 775, 13])
x_1 = array([14, 15, 0, 15, 12, 13])
x_2 = array([0, 0, 0, 55, 92, 11, 11, 11, 555])
# kruskal(x_0, x_1, x_2) = (0.10761259465923653, 0.94761564440615031)
exp = (0.10761259465923653, 0.94761564440615031)
obs = kruskal_wallis([x_0, x_1, x_2])
assert_allclose(obs, exp)
class PvalueTests(TestCase):
'''Test that the methods for handling Pvalues return the results we expect.
Note: eps is being set lower on some of these because Sokal and Rohlf
provide only ~5 sig figs and our integrals diverge by that much or more.
'''
def setUp(self):
'''Nothing needed for all tests.'''
pass
def test_fdr_correction(self):
"""Test that the fdr_correction works as anticipated."""
pvals = array([.1, .7, .5, .3, .9])
exp = array([.5, .7 * 5 / 4., .5 * 5 / 3., .3 * 5 / 2., .9])
obs = fdr_correction(pvals)
assert_allclose(obs, exp)
def test_benjamini_hochberg_step_down(self):
"""Test that the BH step down procedure behaves as it does in R."""
# r values
# q = c(0.64771481, 0.93517796, 0.7169902 , 0.18223457, 0.26918556,
# 0.1450153 , 0.22448242, 0.74723508, 0.89061034, 0.74007906)
# p.adjust(q, method='BH')
# [1] 0.9340439 0.9351780 0.9340439 0.6729639 0.6729639 0.6729639
# 0.6729639
# [8] 0.9340439 0.9351780 0.9340439
pvals = array([0.64771481, 0.93517796, 0.7169902, 0.18223457,
0.26918556, 0.1450153, 0.22448242, 0.74723508,
0.89061034, 0.74007906])
exp = array([0.9340439, 0.9351780, 0.9340439, 0.6729639, 0.6729639,
0.6729639, 0.6729639, 0.9340439, 0.9351780, 0.9340439])
obs = benjamini_hochberg_step_down(pvals)
assert_allclose(obs, exp)
# example 2
pvals = array([1.32305426, 1.9345059, 0.87129877, 1.89957702,
1.85712616, 0.68757988, 0.41248969, 0.20751712,
1.97658599, 1.06209437])
exp = array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
obs = benjamini_hochberg_step_down(pvals)
assert_allclose(obs, exp)
def test_bonferroni_correction(self):
"""Test that Bonferroni correction behaves correctly."""
pvals = array([.1, .7, .5, .3, .9])
exp = pvals * 5.
obs = bonferroni_correction(pvals)
assert_allclose(obs, exp)
def test_fisher_z_transform(self):
'''Test Fisher Z transform is correct.'''
r = .657
exp = .5 * log(1.657 / .343)
obs = fisher_z_transform(r)
assert_allclose(exp, obs)
r = 1
obs = fisher_z_transform(r)
assert_allclose(obs, nan)
r = -1
obs = fisher_z_transform(r)
assert_allclose(obs, nan)
r = -5.6
obs = fisher_z_transform(r)
assert_allclose(obs, nan)
# from sokal and rohlf pg 575
r = .972
obs = fisher_z_transform(r)
exp = 2.12730
assert_allclose(exp, obs, rtol=1e-4)
def test_z_transform_pval(self):
'''Test that pval associated with Fisher Z is correct.'''
r = .6
n = 100
obs = z_transform_pval(r, n)
exp = 3.4353390341723208e-09
assert_allclose(exp, obs)
r = .5
n = 3
obs = z_transform_pval(r, n)
assert_allclose(obs, nan)
def test_inverse_fisher_z_transform(self):
'''Test that Fisher's Z transform is computed correctly.'''
z = .65
exp = 0.5716699660851171
obs = inverse_fisher_z_transform(z)
assert_allclose(exp, obs)
def test_fisher_population_correlation(self):
'''Test that the population rho and homogeneity coeff are correct.'''
# note: the error tolerances are lower than they would normally be
# because sokal and rolhf don't give many significant figures
# example from Sokal and Rohlf Biometry pg. 580 - 582
rs = array([.29, .7, .58, .56, .55, .67, .65, .61, .64, .56])
ns = array([100, 46, 28, 74, 33, 27, 52, 26, 20, 17])
zbar = .615268
X2 = 15.26352
pop_r = .547825
hval = chi2prob(X2, len(ns) - 1)
obs_p_rho, obs_hval = fisher_population_correlation(rs, ns)
assert_allclose(obs_p_rho, pop_r, rtol=1e-5)
assert_allclose(obs_hval, hval, rtol=1e-5)
# test with nans
rs = array(
[.29, .7, nan, .58, .56, .55, .67, .65, .61, .64, .56])
ns = array([100, 46, 400, 28, 74, 33, 27, 52, 26, 20, 17])
obs_p_rho, obs_hval = fisher_population_correlation(rs, ns)
assert_allclose(obs_p_rho, pop_r, rtol=1e-5)
assert_allclose(obs_hval, hval, rtol=1e-5)
# test with short vectors
rs = [.6, .5, .4, .6, .7]
ns = [10, 12, 42, 11, 3]
obs_p_rho, obs_hval = fisher_population_correlation(rs, ns)
assert_allclose(obs_p_rho, nan)
assert_allclose(obs_hval, nan)
# test with data with rs >1
rs = [.6, .5, .4, 1.4]
ns = [10, 50, 100, 10]
self.assertRaises(ValueError, fisher_population_correlation, rs, ns)
def test_assign_correlation_pval(self):
'''Test that correlation pvalues are assigned correctly with each meth.
'''
# test with parametric t distribution, use example from Sokal and Rohlf
# Biometry pg 576.
r = .86519
n = 12
ts = 5.45618 # only 5 sig figs in sokal and rohlf
exp = tprob(ts, n - 2, tails='two-sided')
obs = assign_correlation_pval(r, n, 'parametric_t_distribution')
assert_allclose(exp, obs, rtol=1e-5)
# test with too few samples
n = 3
self.assertRaises(ValueError, assign_correlation_pval, r, n,
'parametric_t_distribution')
# test with fisher_z_transform
r = .29
n = 100
z = 0.29856626366017841 # .2981 in biometry
exp = z_transform_pval(z, n)
obs = assign_correlation_pval(r, n, 'fisher_z_transform')
assert_allclose(exp, obs, rtol=1e-5)
r = .61
n = 26
z = 0.70892135942740819 # .7089 in biometry
exp = z_transform_pval(z, n)
obs = assign_correlation_pval(r, n, 'fisher_z_transform')
assert_allclose(exp, obs, rtol=1e-5)
# prove that we can have specify the other options, and as long as we
# dont have bootstrapped selected we are fine.
v1 = array([10, 11, 12])
v2 = array([10, 14, 15])
obs = assign_correlation_pval(r, n, 'fisher_z_transform',
permutations=1000, perm_test_fn=pearson,
v1=v1, v2=v2)
assert_allclose(exp, obs)
# test with bootstrapping, seed for reproducibility.
seed(0)
v1 = array([54, 71, 60, 54, 42, 64, 43, 89, 96, 38])
v2 = array([79, 52, 56, 92, 7, 8, 2, 83, 77, 87])
# c = corrcoef(v1,v2)[0][1]
exp = .357
obs = assign_correlation_pval(0.33112494, 20000, 'bootstrapped',
permutations=1000, perm_test_fn=pearson,
v1=v1, v2=v2)
assert_allclose(exp, obs)
# make sure it throws an error
self.assertRaises(ValueError, assign_correlation_pval, 7, 20000,
'bootstrapped', perm_test_fn=pearson, v1=None, v2=v2)
# test that it does properly with kendall
exp = kendall_pval(r, n)
obs = assign_correlation_pval(r, n, 'kendall')
assert_allclose(exp, obs)
def test_cscore(self):
'''Test cscore is calculated correctly.'''
# test using example from Stone and Roberts pg 75
v1 = array([1, 0, 0, 0, 1, 1, 0, 1, 0, 1])
v2 = array([1, 1, 1, 0, 1, 0, 1, 1, 1, 0])
obs = cscore(v1, v2)
exp = 8
self.assertEqual(obs, exp)
# test using examples verified in ecosim
v1 = array([4, 6, 12, 13, 14, 0, 0, 0, 14, 11, 9, 6, 0, 1, 1, 0, 0,
4])
v2 = array([4, 0, 0, 113, 1, 2, 20, 0, 1, 0, 19, 16, 0, 13, 6, 0, 5,
4])
# from R
# library(vegan)
# library(bipartite)
# m = matrix(c(4,6,12,13,14,0,0,0,14,11,9,6,0,1,1,0,0,4,4,0,0,113,1,2,
# 20,0,1,0,19,16,0,13,6,0,5,4), 18,2)
# C.score(m, normalise=FALSE)
exp = 9
obs = cscore(v1, v2)
self.assertEqual(obs, exp)
class DistributionTests(TestCase):
'''Test that the distributions from scipy are perfoming as we expect.'''
def setUp(self):
'''Nothing needed for all tests.'''
pass
def test_normal_probability_distribution(self):
'''Test that the normal probability distribution performs correctly.'''
# test against R
# library('stats')
# pnorm(4.5, mean = 0, sd=1, lower.tail=TRUE)
# 0.9999966
p = normprob(4.5, direction='low', mean=0, std=1)
assert_allclose(p, 0.9999966)
# pnorm(-14.5, mean = -5, sd=20, lower.tail=FALSE)
# 0.3173935
p = normprob(-14.5, direction='two-sided', mean=-5, std=20)
assert_allclose(p, 0.3173935*2)
# > pnorm(4.5, mean = 0, sd=1, lower.tail=FALSE)
# [1] 3.397673e-06
p = normprob(4.5, direction='high', mean=0, std=1)
assert_allclose(p, 3.397673e-06)
p = normprob(4.5, direction='two-sided', mean=0, std=1)
assert_allclose(p, 3.397673e-06*2)
# test that a ValueError is correctly raised
self.assertRaises(ValueError, normprob, 4.5, direction='dne')
def test_chi2_probability_distribution(self):
'''Test that chi2 probability distribution performs correctly.'''
# test against R
# library('stats')
# pchisq(13.4, 4, lower.tail=TRUE)
# 0.990522
p = chi2prob(13.4, 4, direction='low')
assert_allclose(p, 0.990522)
# > pchisq(13.4, 4, lower.tail=FALSE)
# [1] 0.009478022
p = chi2prob(13.4, 4, direction='high')
assert_allclose(p, 0.009478022)
# test when we have a negative chi2 stat
p = chi2prob(-10, 5, direction='high')
assert_allclose(p, nan)
# test another value
# > pchisq(45, 35)
# [1] 0.8800662
p = chi2prob(45, 35, direction='low')
assert_allclose(p, 0.8800662)
# test that a ValueError is correctly raised
self.assertRaises(ValueError, chi2prob, 4.5, 3, direction='dne')
def test_t_probability_distribution(self):
'''Test that the t probability distribution performs correctly.'''
# test against R
# library('stats')
# pt(2.5, 10, lower.tail=TRUE)
# 0.9842766
t = tprob(2.5, 10, tails='low')
assert_allclose(t, 0.9842766, atol=1e-7)
# pt(2.5, 10, lower.tail=FALSE)
# 0.01572342
t = tprob(2.5, 10, tails='high')
assert_allclose(t, 0.01572342, atol=1e-7)
# both tails
t = tprob(2.5, 10, tails='two-sided')
assert_allclose(t, 2*0.01572342, atol=1e-7)
# > pt(-6.7,2)
# [1] 0.01077945
t = tprob(-6.7, 2, tails='two-sided')
assert_allclose(t, 2*0.01077945, atol=1e-7)
# test that a ValueError is correctly raised
self.assertRaises(ValueError, tprob, 4.5, 3, tails='dne')
def test_f_probability_distribution(self):
'''Test that the f probability distribution performs correctly.'''
# test against R
# library('stats')
# pf(4.5, 3, 5)
# 0.9305489
p = fprob(4.5, 3, 5, direction='low')
assert_allclose(p, 0.9305489, atol=1e-7)
p = fprob(4.5, 3, 5, direction='high')
assert_allclose(p, 1 - 0.9305489, atol=1e-7)
# pf(33.5, 2, 5)
# 0.9987292
p = fprob(33.5, 2, 5, direction='low')
assert_allclose(p, 0.9987292, atol=1e-7)
# test when we have a negative f stat
p = fprob(-10, 5, 6, direction='high')
assert_allclose(p, nan)
# test that a ValueError is correctly raised
self.assertRaises(ValueError, fprob, 4.5, 3, 5, direction='dne')
if __name__ == "__main__":
main()
| gpl-2.0 |
gully/Starfish | attic/old_code.py | 2 | 13197 |
print("Hello")
def downsample(w_m, f_m, w_TRES):
'''Given a model wavelength and flux (w_m, f_m) and the instrument wavelength (w_TRES), downsample the model to
exactly match the TRES wavelength bins. '''
spec_interp = interp1d(w_m, f_m, kind="linear")
@np.vectorize
def avg_bin(bin0, bin1):
mdl_ind = (w_m > bin0) & (w_m < bin1)
wave = np.empty((np.sum(mdl_ind) + 2,))
flux = np.empty((np.sum(mdl_ind) + 2,))
wave[0] = bin0
wave[-1] = bin1
flux[0] = spec_interp(bin0)
flux[-1] = spec_interp(bin1)
wave[1:-1] = w_m[mdl_ind]
flux[1:-1] = f_m[mdl_ind]
return trapz(flux, wave) / (bin1 - bin0)
#Determine the bin edges
edges = np.empty((len(w_TRES) + 1,))
difs = np.diff(w_TRES) / 2.
edges[1:-1] = w_TRES[:-1] + difs
edges[0] = w_TRES[0] - difs[0]
edges[-1] = w_TRES[-1] + difs[-1]
b0s = edges[:-1]
b1s = edges[1:]
samp = avg_bin(b0s, b1s)
return (samp)
def downsample2(w_m, f_m, w_TRES):
'''Given a model wavelength and flux (w_m, f_m) and the instrument wavelength (w_TRES), downsample the model to
exactly match the TRES wavelength bins. Try this without calling the interpolation routine.'''
@np.vectorize
def avg_bin(bin0, bin1):
mdl_ind = (w_m > bin0) & (w_m < bin1)
length = np.sum(mdl_ind) + 2
wave = np.empty((length,))
flux = np.empty((length,))
wave[0] = bin0
wave[-1] = bin1
wave[1:-1] = w_m[mdl_ind]
flux[1:-1] = f_m[mdl_ind]
flux[0] = flux[1]
flux[-1] = flux[-2]
return trapz(flux, wave) / (bin1 - bin0)
#Determine the bin edges
edges = np.empty((len(w_TRES) + 1,))
difs = np.diff(w_TRES) / 2.
edges[1:-1] = w_TRES[:-1] + difs
edges[0] = w_TRES[0] - difs[0]
edges[-1] = w_TRES[-1] + difs[-1]
b0s = edges[:-1]
b1s = edges[1:]
return avg_bin(b0s, b1s)
def downsample3(w_m, f_m, w_TRES):
'''Given a model wavelength and flux (w_m, f_m) and the instrument wavelength (w_TRES), downsample the model to
exactly match the TRES wavelength bins. Try this only by averaging.'''
#More time could be saved by splitting up the original array into averageable chunks.
@np.vectorize
def avg_bin(bin0, bin1):
return np.average(f_m[(w_m > bin0) & (w_m < bin1)])
#Determine the bin edges
edges = np.empty((len(w_TRES) + 1,))
difs = np.diff(w_TRES) / 2.
edges[1:-1] = w_TRES[:-1] + difs
edges[0] = w_TRES[0] - difs[0]
edges[-1] = w_TRES[-1] + difs[-1]
b0s = edges[:-1]
b1s = edges[1:]
return avg_bin(b0s, b1s)
def downsample4(w_m, f_m, w_TRES):
out_flux = np.zeros_like(w_TRES)
len_mod = len(w_m)
#Determine the bin edges
len_TRES = len(w_TRES)
edges = np.empty((len_TRES + 1,))
difs = np.diff(w_TRES) / 2.
edges[1:-1] = w_TRES[:-1] + difs
edges[0] = w_TRES[0] - difs[0]
edges[-1] = w_TRES[-1] + difs[-1]
i_start = np.argwhere((w_m > edges[0]))[0][0] #return the first starting index for the model wavelength array
edges_i = 1
for i in range(len(w_m)):
if w_m[i] > edges[edges_i]:
i_finish = i - 1
out_flux[edges_i - 1] = np.mean(f_m[i_start:i_finish])
edges_i += 1
i_start = i_finish
if edges_i > len_TRES:
break
return out_flux
#Keep out here so memory keeps getting overwritten
fluxes = np.empty((4, len(wave_grid)))
def flux_interpolator_mini(temp, logg):
'''Load flux in a memory-nice manner. lnprob will already check that we are within temp = 2300 - 12000 and logg =
0.0 - 6.0, so we do not need to check that here.'''
#Determine T plus and minus
#If the previous check by lnprob was correct, these should always have elements
#Determine logg plus and minus
i_Tm = np.argwhere(temp >= T_points)[-1][0]
Tm = T_points[i_Tm]
i_Tp = np.argwhere(temp < T_points)[0][0]
Tp = T_points[i_Tp]
i_lm = np.argwhere(logg >= logg_points)[-1][0]
lm = logg_points[i_lm]
i_lp = np.argwhere(logg < logg_points)[0][0]
lp = logg_points[i_lp]
indexes = [(i_Tm, i_lm), (i_Tm, i_lp), (i_Tp, i_lm), (i_Tp, i_lp)]
points = np.array([(Tm, lm), (Tm, lp), (Tp, lm), (Tp, lp)])
for i in range(4):
#Load spectra for these points
#print(indexes[i])
fluxes[i] = LIB[indexes[i]]
if np.isnan(fluxes).any():
#If outside the defined grid (demarcated in the hdf5 object by nan's) just return 0s
return zero_flux
#Interpolate spectra with LinearNDInterpolator
flux_intp = LinearNDInterpolator(points, fluxes)
new_flux = flux_intp(temp, logg)
return new_flux
def flux_interpolator():
#points = np.loadtxt("param_grid_GWOri.txt")
points = np.loadtxt("param_grid_interp_test.txt")
#TODO: make this dynamic, specify param_grid dynamically too
len_w = 716665
fluxes = np.empty((len(points), len_w))
for i in range(len(points)):
fluxes[i] = load_flux(points[i][0], points[i][1])
#flux_intp = NearestNDInterpolator(points, fluxes)
flux_intp = LinearNDInterpolator(points, fluxes, fill_value=1.)
del fluxes
print("Loaded flux_interpolator")
return flux_intp
#Originally from PHOENIX_tools
def create_grid_parallel_Z0(ncores):
'''create an hdf5 file of the PHOENIX grid. Go through each T point, if the corresponding logg exists,
write it. If not, write nan.'''
f = h5py.File("LIB_2kms.hdf5", "w")
shape = (len(T_points), len(logg_points), len(wave_grid_coarse))
dset = f.create_dataset("LIB", shape, dtype="f")
# A thread pool of P processes
pool = mp.Pool(ncores)
param_combos = []
var_combos = []
for t, temp in enumerate(T_points):
for l, logg in enumerate(logg_points):
param_combos.append([t, l])
var_combos.append([temp, logg])
spec_gen = list(pool.map(process_spectrum_Z0, var_combos))
for i in range(len(param_combos)):
t, l = param_combos[i]
dset[t, l, :] = spec_gen[i]
f.close()
def process_spectrum_Z0(pars):
temp, logg = pars
try:
f = load_flux_full(temp, logg, True)[ind]
flux = resample_and_convolve(f,wave_grid_fine,wave_grid_coarse)
print("Finished %s, %s" % (temp, logg))
except OSError:
print("%s, %s does not exist!" % (temp, logg))
flux = np.nan
return flux
def load_flux_full_Z0(temp, logg, norm=False):
rname = "HiResFITS/PHOENIX-ACES-AGSS-COND-2011/Z-0.0/lte{temp:0>5.0f}-{logg:.2f}-0.0" \
".PHOENIX-ACES-AGSS-COND-2011-HiRes.fits".format(
temp=temp, logg=logg)
flux_file = pf.open(rname)
f = flux_file[0].data
L = flux_file[0].header['PHXLUM'] #W
if norm:
f = f * (L_sun / L)
print("Normalized luminosity to 1 L_sun")
flux_file.close()
print("Loaded " + rname)
return f
def flux_interpolator():
points = ascii.read("param_grid.txt")
T_list = points["T"].data
logg_list = points["logg"].data
fluxes = np.empty((len(T_list), len(w)))
for i in range(len(T_list)):
fluxes[i] = load_flux_npy(T_list[i], logg_list[i])
flux_intp = NearestNDInterpolator(np.array([T_list, logg_list]).T, fluxes)
return flux_intp
def flux_interpolator_np():
points = np.loadtxt("param_grid.txt")
print(points)
#T_list = points["T"].data
#logg_list = points["logg"].data
len_w = 716665
fluxes = np.empty((len(points), len_w))
for i in range(len(points)):
fluxes[i] = load_flux_npy(points[i][0], points[i][1])
flux_intp = NearestNDInterpolator(points, fluxes)
return flux_intp
def flux_interpolator_hdf5():
#load hdf5 file of PHOENIX grid
fhdf5 = h5py.File(LIB, 'r')
LIB = fhdf5['LIB']
index_combos = []
var_combos = []
for ti in range(len(T_points)):
for li in range(len(logg_points)):
for zi in range(len(Z_points)):
index_combos.append([T_arg[ti], logg_arg[li], Z_arg[zi]])
var_combos.append([T_points[ti], logg_points[li], Z_points[zi]])
#print(param_combos)
num_spec = len(index_combos)
points = np.array(var_combos)
fluxes = np.empty((num_spec, len(wave_grid)))
for i in range(num_spec):
t, l, z = index_combos[i]
fluxes[i] = LIB[t, l, z][ind]
flux_intp = LinearNDInterpolator(points, fluxes, fill_value=1.)
fhdf5.close()
del fluxes
gc.collect()
return flux_intp
import numpy as np
from scipy.interpolate import interp1d, InterpolatedUnivariateSpline, griddata
import matplotlib.pyplot as plt
import model as m
from scipy.special import hyp0f1, struve, j1
import PHOENIX_tools as pt
c_kms = 2.99792458e5 #km s^-1
f_full = pt.load_flux_full(5900, 3.5, True)
w_full = pt.w_full
#Truncate to Dave's order
#ind = (w_full > 5122) & (w_full < 5218)
ind = (w_full > 3000) & (w_full < 12000.6)
w_full = w_full[ind]
f_full = f_full[ind]
def calc_lam_grid(v=1., start=3700., end=10000):
'''Returns a grid evenly spaced in velocity'''
size = 600000 #this number just has to be bigger than the final array
lam_grid = np.zeros((size,))
i = 0
lam_grid[i] = start
vel = np.sqrt((c_kms + v) / (c_kms - v))
while (lam_grid[i] < end) and (i < size - 1):
lam_new = lam_grid[i] * vel
i += 1
lam_grid[i] = lam_new
return lam_grid[np.nonzero(lam_grid)][:-1]
#grid = calc_lam_grid(2.,start=3050.,end=11232.) #chosen to correspond to min U filter and max z filter
#wave_grid = calc_lam_grid(0.35, start=3050., end=11232.) #this spacing encapsulates the maximal velocity resolution
# of the PHOENIX grid, and corresponds to Delta lambda = 0.006 Ang at 5000 Ang.
#np.save('wave_grid_0.35kms.npy',wave_grid)
#Truncate wave_grid to Dave's order
wave_grid = np.load('wave_grid_0.35kms.npy')[:-1]
wave_grid = wave_grid[(wave_grid > 5165) & (wave_grid < 5190)]
np.save('wave_grid_trunc.npy', wave_grid)
@np.vectorize
def gauss_taper(s, sigma=2.89):
'''This is the FT of a gaussian w/ this sigma. Sigma in km/s'''
return np.exp(-2 * np.pi ** 2 * sigma * 2 * s ** 2)
def convolve_gauss(wl, fl, sigma=2.89, spacing=2.):
##Take FFT of f_grid
out = np.fft.fft(np.fft.fftshift(fl))
N = len(fl)
freqs = np.fft.fftfreq(N, d=spacing)
taper = gauss_taper(freqs, sigma)
tout = out * taper
blended = np.fft.fftshift(np.fft.ifft(tout))
return np.absolute(blended) #remove tiny complex component
def IUS(w, f, wl):
f = InterpolatedUnivariateSpline(w, f)
return f(wl)
def plot_interpolated():
f_grid = IUS(w_full, f_full, wave_grid)
np.save('f_grid.npy', f_grid)
print("Calculated flux_grid")
print("Length flux grid", len(f_grid))
f_grid6 = convolve_gauss(wave_grid, f_grid, spacing=0.35)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(wave_grid, f_grid)
#ax.plot(m.wls[0], IUS(wave_grid, f_grid6, m.wls[0]),"o")
plt.show()
@np.vectorize
def lanczos_kernel(x, a=2):
if np.abs(x) < a:
return np.sinc(np.pi * x) * np.sinc(np.pi * x / a)
else:
return 0.
def grid_interp():
return griddata(grid, blended, m.wls, method='linear')
def G(s, vL):
'''vL in km/s. Gray pg 475'''
ub = 2. * np.pi * vL * s
return j1(ub) / ub - 3 * np.cos(ub) / (2 * ub ** 2) + 3. * np.sin(ub) / (2 * ub ** 3)
def plot_gray():
fig = plt.figure()
ax = fig.add_subplot(111)
ss = np.linspace(0.001, 2, num=200)
Gs1 = G(ss, 1.)
Gs2 = G(ss, 2.)
ax.plot(ss, Gs1)
ax.plot(ss, Gs2)
plt.show()
def main():
plot_interpolated()
pass
if __name__ == "__main__":
main()
#Old sinc interpolation routines that didn't work out
#Test sinc interpolation
#def func(x):
# return (x - 3)**2 + 2 * x
#
#xs = np.arange(-299,301,1)
#ys = xs
#
#def sinc_interpolate(x):
# ind = np.argwhere(x > xs )[-1][0]
# ind2 = ind + 1
# print("ind",ind)
# print(xs[ind])
# print(xs[ind2])
# frac = x - xs[ind]
# print(frac)
# spacing = 1
# pts_grid = np.arange(-299.5,300,1)
# sinc_pts = np.sinc(pts_grid)
# print(pts_grid,sinc_pts,trapz(sinc_pts))
# flux_pts = ys
# print("Interpolated value",np.sum(sinc_pts * flux_pts))
# print("Neighboring value", ys[ind], ys[ind2])
# return(sinc_pts,flux_pts)
#Now, do since interpolation to the TRES pixels on the blended spectrum
##Take TRES pixel, call that the center of sinc, then sample it at +/- the other pixels in the grid
#def sinc_interpolate(wl_TRES):
# ind = np.argwhere(wl_TRES > grid)[-1][0]
# ind2 = ind + 1
# print(grid[ind])
# print(grid[ind2])
# frac = (wl_TRES - grid[ind])/(grid[ind2] - grid[ind])
# print(frac)
# spacing = 2 #km/s
# veloc_grid = np.arange(-48.,51,spacing) - frac * spacing
# print(veloc_grid)
# #convert wl spacing to velocity spacing
# sinc_pts = 0.5 * np.sinc(0.5 * veloc_grid)
# print(sinc_pts,trapz(sinc_pts,veloc_grid))
# print("Interpolated flux",np.sum(sinc_pts * f_grid[ind - 25: ind + 25]))
# print("Neighboring flux", f_grid[ind], f_grid[ind2])
#sinc_interpolate(6610.02)
| bsd-3-clause |
juliandewit/kaggle_ndsb2017 | step1b_preprocess_make_train_cubes.py | 2 | 12051 | import settings
import helpers
import glob
import pandas
import ntpath
import numpy
import cv2
import os
CUBE_IMGTYPE_SRC = "_i"
def save_cube_img(target_path, cube_img, rows, cols):
assert rows * cols == cube_img.shape[0]
img_height = cube_img.shape[1]
img_width = cube_img.shape[1]
res_img = numpy.zeros((rows * img_height, cols * img_width), dtype=numpy.uint8)
for row in range(rows):
for col in range(cols):
target_y = row * img_height
target_x = col * img_width
res_img[target_y:target_y + img_height, target_x:target_x + img_width] = cube_img[row * cols + col]
cv2.imwrite(target_path, res_img)
def get_cube_from_img(img3d, center_x, center_y, center_z, block_size):
start_x = max(center_x - block_size / 2, 0)
if start_x + block_size > img3d.shape[2]:
start_x = img3d.shape[2] - block_size
start_y = max(center_y - block_size / 2, 0)
start_z = max(center_z - block_size / 2, 0)
if start_z + block_size > img3d.shape[0]:
start_z = img3d.shape[0] - block_size
start_z = int(start_z)
start_y = int(start_y)
start_x = int(start_x)
res = img3d[start_z:start_z + block_size, start_y:start_y + block_size, start_x:start_x + block_size]
return res
def make_pos_annotation_images():
src_dir = settings.LUNA_16_TRAIN_DIR2D2 + "metadata/"
dst_dir = settings.BASE_DIR_SSD + "luna16_train_cubes_pos/"
for file_path in glob.glob(dst_dir + "*.*"):
os.remove(file_path)
for patient_index, csv_file in enumerate(glob.glob(src_dir + "*_annos_pos.csv")):
patient_id = ntpath.basename(csv_file).replace("_annos_pos.csv", "")
# print(patient_id)
# if not "148229375703208214308676934766" in patient_id:
# continue
df_annos = pandas.read_csv(csv_file)
if len(df_annos) == 0:
continue
images = helpers.load_patient_images(patient_id, settings.LUNA_16_TRAIN_DIR2D2, "*" + CUBE_IMGTYPE_SRC + ".png")
for index, row in df_annos.iterrows():
coord_x = int(row["coord_x"] * images.shape[2])
coord_y = int(row["coord_y"] * images.shape[1])
coord_z = int(row["coord_z"] * images.shape[0])
diam_mm = int(row["diameter"] * images.shape[2])
anno_index = int(row["anno_index"])
cube_img = get_cube_from_img(images, coord_x, coord_y, coord_z, 64)
if cube_img.sum() < 5:
print(" ***** Skipping ", coord_x, coord_y, coord_z)
continue
if cube_img.mean() < 10:
print(" ***** Suspicious ", coord_x, coord_y, coord_z)
save_cube_img(dst_dir + patient_id + "_" + str(anno_index) + "_" + str(diam_mm) + "_1_" + "pos.png", cube_img, 8, 8)
helpers.print_tabbed([patient_index, patient_id, len(df_annos)], [5, 64, 8])
def make_annotation_images_lidc():
src_dir = settings.LUNA16_EXTRACTED_IMAGE_DIR + "_labels/"
dst_dir = settings.BASE_DIR_SSD + "generated_traindata/luna16_train_cubes_lidc/"
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
for file_path in glob.glob(dst_dir + "*.*"):
os.remove(file_path)
for patient_index, csv_file in enumerate(glob.glob(src_dir + "*_annos_pos_lidc.csv")):
patient_id = ntpath.basename(csv_file).replace("_annos_pos_lidc.csv", "")
df_annos = pandas.read_csv(csv_file)
if len(df_annos) == 0:
continue
images = helpers.load_patient_images(patient_id, settings.LUNA16_EXTRACTED_IMAGE_DIR, "*" + CUBE_IMGTYPE_SRC + ".png")
for index, row in df_annos.iterrows():
coord_x = int(row["coord_x"] * images.shape[2])
coord_y = int(row["coord_y"] * images.shape[1])
coord_z = int(row["coord_z"] * images.shape[0])
malscore = int(row["malscore"])
anno_index = row["anno_index"]
anno_index = str(anno_index).replace(" ", "xspacex").replace(".", "xpointx").replace("_", "xunderscorex")
cube_img = get_cube_from_img(images, coord_x, coord_y, coord_z, 64)
if cube_img.sum() < 5:
print(" ***** Skipping ", coord_x, coord_y, coord_z)
continue
if cube_img.mean() < 10:
print(" ***** Suspicious ", coord_x, coord_y, coord_z)
if cube_img.shape != (64, 64, 64):
print(" ***** incorrect shape !!! ", str(anno_index), " - ",(coord_x, coord_y, coord_z))
continue
save_cube_img(dst_dir + patient_id + "_" + str(anno_index) + "_" + str(malscore * malscore) + "_1_pos.png", cube_img, 8, 8)
helpers.print_tabbed([patient_index, patient_id, len(df_annos)], [5, 64, 8])
def make_pos_annotation_images_manual():
src_dir = "resources/luna16_manual_labels/"
dst_dir = settings.BASE_DIR_SSD + "generated_traindata/luna16_train_cubes_manual/"
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
for file_path in glob.glob(dst_dir + "*_manual.*"):
os.remove(file_path)
for patient_index, csv_file in enumerate(glob.glob(src_dir + "*.csv")):
patient_id = ntpath.basename(csv_file).replace(".csv", "")
if "1.3.6.1.4" not in patient_id:
continue
print(patient_id)
# if not "172845185165807139298420209778" in patient_id:
# continue
df_annos = pandas.read_csv(csv_file)
if len(df_annos) == 0:
continue
images = helpers.load_patient_images(patient_id, settings.LUNA16_EXTRACTED_IMAGE_DIR, "*" + CUBE_IMGTYPE_SRC + ".png")
for index, row in df_annos.iterrows():
coord_x = int(row["x"] * images.shape[2])
coord_y = int(row["y"] * images.shape[1])
coord_z = int(row["z"] * images.shape[0])
diameter = int(row["d"] * images.shape[2])
node_type = int(row["id"])
malscore = int(diameter)
malscore = min(25, malscore)
malscore = max(16, malscore)
anno_index = index
cube_img = get_cube_from_img(images, coord_x, coord_y, coord_z, 64)
if cube_img.sum() < 5:
print(" ***** Skipping ", coord_x, coord_y, coord_z)
continue
if cube_img.mean() < 10:
print(" ***** Suspicious ", coord_x, coord_y, coord_z)
if cube_img.shape != (64, 64, 64):
print(" ***** incorrect shape !!! ", str(anno_index), " - ",(coord_x, coord_y, coord_z))
continue
save_cube_img(dst_dir + patient_id + "_" + str(anno_index) + "_" + str(malscore) + "_1_" + ("pos" if node_type == 0 else "neg") + ".png", cube_img, 8, 8)
helpers.print_tabbed([patient_index, patient_id, len(df_annos)], [5, 64, 8])
def make_candidate_auto_images(candidate_types=[]):
dst_dir = settings.BASE_DIR_SSD + "generated_traindata/luna16_train_cubes_auto/"
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
for candidate_type in candidate_types:
for file_path in glob.glob(dst_dir + "*_" + candidate_type + ".png"):
os.remove(file_path)
for candidate_type in candidate_types:
if candidate_type == "falsepos":
src_dir = "resources/luna16_falsepos_labels/"
else:
src_dir = settings.LUNA16_EXTRACTED_IMAGE_DIR + "_labels/"
for index, csv_file in enumerate(glob.glob(src_dir + "*_candidates_" + candidate_type + ".csv")):
patient_id = ntpath.basename(csv_file).replace("_candidates_" + candidate_type + ".csv", "")
print(index, ",patient: ", patient_id, " type:", candidate_type)
# if not "148229375703208214308676934766" in patient_id:
# continue
df_annos = pandas.read_csv(csv_file)
if len(df_annos) == 0:
continue
images = helpers.load_patient_images(patient_id, settings.LUNA16_EXTRACTED_IMAGE_DIR, "*" + CUBE_IMGTYPE_SRC + ".png", exclude_wildcards=[])
row_no = 0
for index, row in df_annos.iterrows():
coord_x = int(row["coord_x"] * images.shape[2])
coord_y = int(row["coord_y"] * images.shape[1])
coord_z = int(row["coord_z"] * images.shape[0])
anno_index = int(row["anno_index"])
cube_img = get_cube_from_img(images, coord_x, coord_y, coord_z, 48)
if cube_img.sum() < 10:
print("Skipping ", coord_x, coord_y, coord_z)
continue
# print(cube_img.sum())
try:
save_cube_img(dst_dir + patient_id + "_" + str(anno_index) + "_0_" + candidate_type + ".png", cube_img, 6, 8)
except Exception as ex:
print(ex)
row_no += 1
max_item = 240 if candidate_type == "white" else 200
if candidate_type == "luna":
max_item = 500
if row_no > max_item:
break
def make_pos_annotation_images_manual_ndsb3():
src_dir = "resources/ndsb3_manual_labels/"
dst_dir = settings.BASE_DIR_SSD + "generated_traindata/ndsb3_train_cubes_manual/"
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
train_label_df = pandas.read_csv("resources/stage1_labels.csv")
train_label_df.set_index(["id"], inplace=True)
for file_path in glob.glob(dst_dir + "*.*"):
os.remove(file_path)
for patient_index, csv_file in enumerate(glob.glob(src_dir + "*.csv")):
patient_id = ntpath.basename(csv_file).replace(".csv", "")
if "1.3.6.1.4.1" in patient_id:
continue
cancer_label = train_label_df.loc[patient_id]["cancer"]
df_annos = pandas.read_csv(csv_file)
if len(df_annos) == 0:
continue
images = helpers.load_patient_images(patient_id, settings.NDSB3_EXTRACTED_IMAGE_DIR, "*" + CUBE_IMGTYPE_SRC + ".png")
anno_index = 0
for index, row in df_annos.iterrows():
pos_neg = "pos" if row["id"] == 0 else "neg"
coord_x = int(row["x"] * images.shape[2])
coord_y = int(row["y"] * images.shape[1])
coord_z = int(row["z"] * images.shape[0])
malscore = int(round(row["dmm"]))
anno_index += 1
cube_img = get_cube_from_img(images, coord_x, coord_y, coord_z, 64)
if cube_img.sum() < 5:
print(" ***** Skipping ", coord_x, coord_y, coord_z)
continue
if cube_img.mean() < 10:
print(" ***** Suspicious ", coord_x, coord_y, coord_z)
if cube_img.shape != (64, 64, 64):
print(" ***** incorrect shape !!! ", str(anno_index), " - ",(coord_x, coord_y, coord_z))
continue
print(patient_id)
assert malscore > 0 or pos_neg == "neg"
save_cube_img(dst_dir + "ndsb3manual_" + patient_id + "_" + str(anno_index) + "_" + pos_neg + "_" + str(cancer_label) + "_" + str(malscore) + "_1_pn.png", cube_img, 8, 8)
helpers.print_tabbed([patient_index, patient_id, len(df_annos)], [5, 64, 8])
if __name__ == "__main__":
if not os.path.exists(settings.BASE_DIR_SSD + "generated_traindata/"):
os.mkdir(settings.BASE_DIR_SSD + "generated_traindata/")
if True:
make_annotation_images_lidc()
if True:
make_pos_annotation_images_manual()
# if False:
# make_pos_annotation_images() # not used anymore
if True:
make_candidate_auto_images(["falsepos", "luna", "edge"])
if True:
make_pos_annotation_images_manual_ndsb3() # for second model
| mit |
laurengulland/009yellow-beacon | v2_Tech_Review/gui/mvp2.0_controller.py | 1 | 9908 | """
For Software Architecture description:
see Yellow 2.009 Google Drive > Tech Review > Electronics Design > Queen Software > Queen Software Architecture
"""
import pandas as pd
import numpy as np
import time
import json
import serial
import pygame
import threading
from threading import Thread
from mvp2_0_view import GUI
from mvp2_0_model import Scouts
class ControllerState(object):
def __init__(self):
self.menu_active = False
self.selected_poi = 0
class Controller(object):
def __init__(self):
#create model object
#create gui object
#set up all the variables!
self.screen_location = [(0,0),(1,1)] #[(top left), (bottom right)]
self.something = 0
self.scout_queue = [] #store as list of bytearray objects
self.poi_queue = [] #store as list of bytearray objects
self.scouts = Scouts() #initialize model
self.next_poi_id = 0 #keep track of unused poi ids
self.state = ControllerState()
#Load constants from json file (UNTESTED)
json_data = json.load(open('mvp2.0_constants.json'))
self.screen_width,self.screen_height = json_data["screen_width"],json_data["screen_height"]
self.gui = GUI(self.screen_width,self.screen_height)
self.gui.render()
#initialize serial communication
self.port = serial.Serial('COM9') #MUST SELECT CORRECT PORT ON TABLET
self.step_rate = .5 #for da loopy loop
#self.dtd = Data_to_Display()
self.last_time=time.time()
print('reaches end of initialization')
def action_map(self):
'''
based on state, make options available
'''
if self.gui.gui_state != 'Menu':
return {
0: self.zoom_in,
1: self.zoom_out,
2: self.do_nothing,
3: self.toggle_menu,
4: self.do_nothing,
5: self.do_nothing,
#(6,'up'): lambda: self.pan_vertical(True),
#(6, 'down'): lambda: self.pan_vertical(False),
#(6,'left'): lambda: self.pan_horizontal(False),
#(6,'right'): lambda: self.pan_horizontal(True),
#(7,'up'): self.do_nothing,
#(7,'down'): self.do_nothing,
#(7,'right'): self.do_nothing,
#(7,'left'): self.do_nothing
}
else:
return {
0: lambda: self.poi_scroll(False), #eventually be zoom in
1: lambda: self.poi_scroll(True), #eventually be zoom out
2: self.do_nothing,
3: self.toggle_menu,
4: self.select_poi,
5: self.do_nothing,
#(6,'up'): lambda: self.poi_scroll(False),
#(6, 'down'): lambda: self.poi_scroll(True),
#(6,'left'): self.do_nothing,
#(6,'right'): self.do_nothing,
#(7,'up'): self.do_nothing,
#(7,'down'): self.do_nothing,
#(7,'right'): self.do_nothing,
#(7,'left'): self.do_nothing
}
def do_nothing(self):
#print('does nothing')
pass
def pan_vertical(self,positive):
#print('pan vert')
pass
def pan_horizontal(self, positive):
#print('pan horiz')
pass
def toggle_menu(self):
#print('toggle menu')
self.gui.toggle_menu_state()
def select_poi(self):
#print('select poi')
pass
def poi_scroll(self, positive):
#print('poi scroll')
if positive:
self.gui.move_down()
else:
self.gui.move_up()
def zoom_in(self):
#print('zoom in')
pass
def zoom_out(self):
#print('zoom out')
pass
def construct_scout_displays(self,range=[(0,0),(1,1)]):
#figure this function out
#interact with model, via data_within_range()?
pass
def parse_inputs(self):
'''
decide correct course of action
'''
#Parse input and decide whether GPS or button is most accurate
packet = self.port.read(83)
print(packet.hex())
length = packet[1]
packtype = packet[2]
content = bytearray(packet[3:2+length])
if packtype == 0x00:
self.parse_gps_inputs(content)
if packtype == 0x01:
self.parse_button_presses(content)
if packtype == 0x02:
self.transmit_data()
def parse_gps_inputs(self, content):
'''
parses payload of gps input
'''
trtime = int(time.time()) #time since epoch rounded to the second
poi = content[11]
is_poi = poi == 0x01
slat = self.get_signed_coord(content[0:5])
slon = self.get_signed_coord(content[5:10])
scout_id = content[10]
# add to model
self.scouts.add_data_point(scout_id, trtime,(slat,slon),False)
payload = self.get_scout_payload(content[0:11],trtime)
if payload is not None:
self.add_payload_to_scout_queue(payload)
# add to queue
if is_poi:
plat = self.get_signed_coord(content[12:17])
plon = self.get_signed_coord(content[17:22])
self.poi_queue.append(self.get_poi_packet(content[12:23]))
self.scouts.add_data_point(scout_id,trtime,(plat,plon),True)
#TODO: trigger view update?
def add_payload_to_scout_queue(self, payload):
'''
if there's room in the last bytearray, add it there
otherwise, create a new packet
'''
print('adding')
print(len(payload))
if len(payload)!=15:
raise Exception
if self.scout_queue == [] or self.scout_queue[-1][1]>75:
packet = bytearray(83)
packet[0] = 0x7e
packet[1] = 17
packet[2] = 0x03
packet[3] = 0x00
packet[4:19] = payload
self.scout_queue.append(packet)
else:
packet = self.scout_queue.pop()
packet[packet[1]+2:packet[1]+17] = payload
packet[1] = packet[1]+15
self.scout_queue.append(packet)
def get_poi_packet(self,content):
'''
build the complete poi package, sans description
'''
packet = bytearray(83)
packet[0] = 0x7e
packet[1] = 16
packet[2] = 0x03
packet[3] = 0x01
poi_hex = hex(self.next_poi_id)[2:]
self.next_poi_id +=1
while len(poi_hex)<4:
poi_hex = '0'+poi_hex
packet[4] = int(poi_hex[2:],16)
packet[5] = int(poi_hex[0:2],16)
packet[6:17] = content
return packet
def get_scout_payload(self,content,trtime):
'''
use info from content and transmission time to log scout info
'''
payload = bytearray(15)
payload[0:11] = content[0:11]
payload[11:15] = self.time_int_to_bytearray(trtime)
return payload
def time_int_to_bytearray(self,trtime):
'''
helper function to convert integer seconds from epoch to hex.
uses same least-significant-first convention as lat/long
'''
hexstring = hex(trtime)[2:]
return bytearray([
int(hexstring[6:8],16),
int(hexstring[4:6],16),
int(hexstring[2:4],16),
int(hexstring[0:2],16),
])
def get_signed_coord(self, coord_bytes):
'''
helper function to convert coordinate bytes to numbers
'''
hexstring = ''
for coord_byte in coord_bytes[0:-1]:
bytehex = str(hex(coord_byte))[2:]
if len(bytehex)==1:
bytehex = '0'+bytehex
hexstring = bytehex+hexstring
output = int(hexstring,16)
output = float(output)/(10**6)
if coord_bytes[-1]==0x02:
output = -output
return output
def parse_button_presses(self, content):
'''
takes in content as a bytearray and calls the necessary function, which might be in controller or view
'''
if content[0]<6:
self.action_map()[content[0]]()
else:
x = content[2]
y = content[4]
if x>y:
if content[1] == 0x02:
self.action_map()[(content[0],'left')]()
else:
self.action_map()[(content[0],'right')]()
else:
if content[3]==0x02:
self.action_map()[(content[0],'down')]()
else:
self.action_map()[(content[0],'up')]()
def transmit_data(self):
'''
send packets from queues, if available.
sends scout packets first, then poi.
sends no-content packet if nothing to send.
'''
print('transmitting')
print(self.scout_queue)
if self.scout_queue == []:
if self.poi_queue == []:
print('transmission done')
packet = bytearray(83)
packet[0]=0x7e
packet[1]=1
packet[2]=0x03
else:
packet = self.poi_queue.pop(0)
else:
packet = self.scout_queue.pop(0)
self.port.write(packet)
def update_view(self):
#update the view with the current range
#calls view.render()?
pass
def pump_gui(self, pit):
while True:
pygame.event.get()
def run(self):
crashed = False
while not crashed:
# pit = Thread(target = self.parse_inputs)
# guit = Thread(target = lambda: self.pump_gui(pit))
# pit.start()
# guit.start()
# while pit.is_alive():
# pass
current_time = time.time()
self.parse_inputs()
print('time after parsing inputs:',current_time)
if current_time-self.last_time > self.step_rate:
print('actuated')
self.gui.map_data.update(self.scouts.data_display)
self.gui.render()
self.last_time = time.time()
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
#LOOOOOOP:
#read inputs from serial, send to parse_inputs()
#trigger update for view if necessary? ==> this should be handled by serial parsing
# initiate transmissions? ==> also handled by parsing
self.port.close()
pygame.quit()
quit()
#quit everythin on shutdown
class Scout_Display(object):
def __init__(self,scout_id=0):
self.id = scout_id
self.current_position = []
self.positions = []
if __name__ == '__main__':
controller = Controller()
controller.run()
# buttons testing
# controller.parse_button_presses(bytearray([0x00]))
# controller.parse_button_presses(bytearray([0x01]))
# controller.parse_button_presses(bytearray([0x04]))
# controller.parse_button_presses(bytearray([0x06,0x01,0xaa,0x01,0x00]))
# controller.parse_button_presses(bytearray([0x06,0x01,0x00,0x01,0xaa]))
# controller.parse_button_presses(bytearray([0x06,0x02,0xaa,0x01,0x00]))
# controller.parse_button_presses(bytearray([0x06,0x01,0x00,0x02,0xaa]))
# controller.parse_button_presses(bytearray([0x03]))
# controller.parse_button_presses(bytearray([0x00]))
# controller.parse_button_presses(bytearray([0x01]))
# controller.parse_button_presses(bytearray([0x04]))
# controller.parse_button_presses(bytearray([0x06,0x01,0xaa,0x01,0x00]))
# controller.parse_button_presses(bytearray([0x06,0x01,0x00,0x01,0xaa]))
# controller.parse_button_presses(bytearray([0x06,0x02,0xaa,0x01,0x00]))
# controller.parse_button_presses(bytearray([0x06,0x01,0x00,0x02,0xaa]))
# controller.parse_button_presses(bytearray([0x03]))
| mit |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/scipy/cluster/tests/test_hierarchy.py | 7 | 34863 | #! /usr/bin/env python
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (TestCase, run_module_suite, dec, assert_raises,
assert_allclose, assert_equal, assert_)
from scipy.lib.six import xrange, u
import scipy.cluster.hierarchy
from scipy.cluster.hierarchy import (
linkage, from_mlab_linkage, to_mlab_linkage, num_obs_linkage, inconsistent,
cophenet, fclusterdata, fcluster, is_isomorphic, single, leaders,
correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram)
from scipy.spatial.distance import pdist
import hierarchy_test_data
# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
# check if it's available
try:
# import matplotlib
import matplotlib
# and set the backend to be Agg (no gui)
matplotlib.use('Agg')
# before importing pyplot
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
class TestLinkage(object):
def test_linkage_empty_distance_matrix(self):
# Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
y = np.zeros((0,))
assert_raises(ValueError, linkage, y)
################### linkage
def test_linkage_tdist(self):
for method in ['single', 'complete', 'average', 'weighted', u('single')]:
yield self.check_linkage_tdist, method
def check_linkage_tdist(self, method):
# Tests linkage(Y, method) on the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
assert_allclose(Z, expectedZ, atol=1e-10)
################### linkage on Q
def test_linkage_X(self):
for method in ['centroid', 'median', 'ward']:
yield self.check_linkage_q, method
def check_linkage_q(self, method):
# Tests linkage(Y, method) on the Q data set.
Z = linkage(hierarchy_test_data.X, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
assert_allclose(Z, expectedZ, atol=1e-06)
class TestInconsistent(object):
def test_inconsistent_tdist(self):
for depth in hierarchy_test_data.inconsistent_ytdist:
yield self.check_inconsistent_tdist, depth
def check_inconsistent_tdist(self, depth):
Z = hierarchy_test_data.linkage_ytdist_single
assert_allclose(inconsistent(Z, depth),
hierarchy_test_data.inconsistent_ytdist[depth])
class TestCopheneticDistance(object):
def test_linkage_cophenet_tdist_Z(self):
# Tests cophenet(Z) on tdist data set.
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
Z = hierarchy_test_data.linkage_ytdist_single
M = cophenet(Z)
assert_allclose(M, expectedM, atol=1e-10)
def test_linkage_cophenet_tdist_Z_Y(self):
# Tests cophenet(Z, Y) on tdist data set.
Z = hierarchy_test_data.linkage_ytdist_single
(c, M) = cophenet(Z, hierarchy_test_data.ytdist)
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
expectedc = 0.639931296433393415057366837573
assert_allclose(c, expectedc, atol=1e-10)
assert_allclose(M, expectedM, atol=1e-10)
class TestMLabLinkageConversion(object):
def test_mlab_linkage_conversion_empty(self):
# Tests from/to_mlab_linkage on empty linkage array.
X = np.asarray([])
assert_equal(from_mlab_linkage([]), X)
assert_equal(to_mlab_linkage([]), X)
def test_mlab_linkage_conversion_single_row(self):
# Tests from/to_mlab_linkage on linkage array with single row.
Z = np.asarray([[0., 1., 3., 2.]])
Zm = [[1, 2, 3]]
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
def test_mlab_linkage_conversion_multiple_rows(self):
# Tests from/to_mlab_linkage on linkage array with multiple rows.
Zm = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = np.array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]],
dtype=np.double)
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
class TestFcluster(object):
def test_fclusterdata(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fclusterdata, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fclusterdata, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fclusterdata, t, 'maxclust'
def check_fclusterdata(self, t, criterion):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
X = hierarchy_test_data.Q_X
T = fclusterdata(X, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fcluster, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster, t, 'maxclust'
def check_fcluster(self, t, criterion):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster_monocrit(self):
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster_monocrit, t
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster_maxclust_monocrit, t
def check_fcluster_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_distance[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
def check_fcluster_maxclust_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_maxclust[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
class TestLeaders(object):
def test_leaders_single(self):
# Tests leaders using a flat clustering generated by single linkage.
X = hierarchy_test_data.Q_X
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
L = leaders(Z, T)
assert_equal(L, Lright)
class TestIsIsomorphic(object):
def test_is_isomorphic_1(self):
# Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
a = [1, 1, 1]
b = [2, 2, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_2(self):
# Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
a = [1, 7, 1]
b = [2, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_3(self):
# Tests is_isomorphic on test case #3 (no flat clusters)
a = []
b = []
assert_(is_isomorphic(a, b))
def test_is_isomorphic_4A(self):
# Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
a = [1, 2, 3]
b = [1, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_4B(self):
# Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
a = [1, 2, 3, 3]
b = [1, 3, 2, 3]
assert_(is_isomorphic(a, b) == False)
assert_(is_isomorphic(b, a) == False)
def test_is_isomorphic_4C(self):
# Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
a = [7, 2, 3]
b = [6, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_5(self):
# Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling).
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc
def test_is_isomorphic_6(self):
# Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling, slightly
# nonisomorphic.)
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc, True, 5
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
for k in range(3):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
P = np.random.permutation(nclusters)
for i in xrange(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
assert_(is_isomorphic(a, b) == (not noniso))
assert_(is_isomorphic(b, a) == (not noniso))
class TestIsValidLinkage(object):
def test_is_valid_linkage_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_linkage_various_size, nrow, ncol, valid
def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
# Tests is_valid_linkage(Z) with linkage matrics of various sizes
Z = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
Z = Z[:nrow, :ncol]
assert_(is_valid_linkage(Z) == valid)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_int_type(self):
# Tests is_valid_linkage(Z) with integer type.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.int)
assert_(is_valid_linkage(Z) == False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self):
# Tests is_valid_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up_neg_index_left(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,0] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,1] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,2] = -0.5
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,3] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
class TestIsValidInconsistent(object):
def test_is_valid_im_int_type(self):
# Tests is_valid_im(R) with integer type.
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.int)
assert_(is_valid_im(R) == False)
assert_raises(TypeError, is_valid_im, R, throw=True)
def test_is_valid_im_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_im_various_size, nrow, ncol, valid
def check_is_valid_im_various_size(self, nrow, ncol, valid):
# Tests is_valid_im(R) with linkage matrics of various sizes
R = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
R = R[:nrow, :ncol]
assert_(is_valid_im(R) == valid)
if not valid:
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_empty(self):
# Tests is_valid_im(R) with empty inconsistency matrix.
R = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
assert_(is_valid_im(R) == True)
def test_is_valid_im_4_and_up_neg_index_left(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height means.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,0] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_index_right(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height standard deviations.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,1] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_dist(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,2] = -0.5
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage(TestCase):
def test_num_obs_linkage_empty(self):
# Tests num_obs_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
self.assertRaises(ValueError, num_obs_linkage, Z)
def test_num_obs_linkage_1x4(self):
# Tests num_obs_linkage(Z) on linkage over 2 observations.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
self.assertTrue(num_obs_linkage(Z) == 2)
def test_num_obs_linkage_2x4(self):
# Tests num_obs_linkage(Z) on linkage over 3 observations.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
self.assertTrue(num_obs_linkage(Z) == 3)
def test_num_obs_linkage_4_and_up(self):
# Tests num_obs_linkage(Z) on linkage on observation sets between sizes
# 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
self.assertTrue(num_obs_linkage(Z) == i)
class TestLeavesList(object):
def test_leaves_list_1x4(self):
# Tests leaves_list(Z) on a 1x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1])
def test_leaves_list_2x4(self):
# Tests leaves_list(Z) on a 2x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1, 2])
def test_leaves_list_Q(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']:
yield self.check_leaves_list_Q, method
def check_leaves_list_Q(self, method):
# Tests leaves_list(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
node = to_tree(Z)
assert_equal(node.pre_order(), leaves_list(Z))
def test_Q_subtree_pre_order(self):
# Tests that pre_order() works when called on sub-trees.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
node = to_tree(Z)
assert_equal(node.pre_order(), (node.get_left().pre_order()
+ node.get_right().pre_order()))
class TestCorrespond(TestCase):
def test_correspond_empty(self):
# Tests correspond(Z, y) with empty linkage and condensed distance matrix.
y = np.zeros((0,))
Z = np.zeros((0,4))
self.assertRaises(ValueError, correspond, Z, y)
def test_correspond_2_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes.
for i in xrange(2, 4):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
self.assertTrue(correspond(Z, y))
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
self.assertTrue(correspond(Z, y))
def test_correspond_4_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
list(zip(list(range(3, 5)), list(range(2, 4))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
self.assertTrue(correspond(Z, y2) == False)
self.assertTrue(correspond(Z2, y) == False)
def test_correspond_4_and_up_2(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
list(zip(list(range(2, 7)), list(range(16, 21))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
self.assertTrue(correspond(Z, y2) == False)
self.assertTrue(correspond(Z2, y) == False)
def test_num_obs_linkage_multi_matrix(self):
# Tests num_obs_linkage with observation matrices of multiple sizes.
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Z = linkage(Y)
self.assertTrue(num_obs_linkage(Z) == n)
class TestIsMonotonic(TestCase):
def test_is_monotonic_empty(self):
# Tests is_monotonic(Z) on an empty linkage.
Z = np.zeros((0, 4))
self.assertRaises(ValueError, is_monotonic, Z)
def test_is_monotonic_1x4(self):
# Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == True)
def test_is_monotonic_2x4_T(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == True)
def test_is_monotonic_2x4_F(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
Z = np.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_3x4_T(self):
# Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == True)
def test_is_monotonic_3x4_F1(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_3x4_F2(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
Z = np.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_3x4_F3(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_tdist_linkage1(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Expecting True.
Z = linkage(hierarchy_test_data.ytdist, 'single')
self.assertTrue(is_monotonic(Z) == True)
def test_is_monotonic_tdist_linkage2(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Perturbing. Expecting False.
Z = linkage(hierarchy_test_data.ytdist, 'single')
Z[2,2] = 0.0
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_Q_linkage(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# Q data set. Expecting True.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
self.assertTrue(is_monotonic(Z) == True)
class TestMaxDists(object):
def test_maxdists_empty_linkage(self):
# Tests maxdists(Z) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxdists, Z)
def test_maxdists_one_cluster_linkage(self):
# Tests maxdists(Z) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxdists_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxdists_Q_linkage, method
def check_maxdists_Q_linkage(self, method):
# Tests maxdists(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxInconsts(object):
def test_maxinconsts_empty_linkage(self):
# Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_difrow_linkage(self):
# Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_one_cluster_linkage(self):
# Tests maxinconsts(Z, R) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxinconsts_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxinconsts_Q_linkage, method
def check_maxinconsts_Q_linkage(self, method):
# Tests maxinconsts(Z, R) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxRStat(object):
def test_maxRstat_invalid_index(self):
for i in [3.3, -1, 4]:
yield self.check_maxRstat_invalid_index, i
def check_maxRstat_invalid_index(self, i):
# Tests maxRstat(Z, R, i). Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
if isinstance(i, int):
assert_raises(ValueError, maxRstat, Z, R, i)
else:
assert_raises(TypeError, maxRstat, Z, R, i)
def test_maxRstat_empty_linkage(self):
for i in range(4):
yield self.check_maxRstat_empty_linkage, i
def check_maxRstat_empty_linkage(self, i):
# Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_difrow_linkage(self):
for i in range(4):
yield self.check_maxRstat_difrow_linkage, i
def check_maxRstat_difrow_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_one_cluster_linkage(self):
for i in range(4):
yield self.check_maxRstat_one_cluster_linkage, i
def check_maxRstat_one_cluster_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxRstat_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
for i in range(4):
yield self.check_maxRstat_Q_linkage, method, i
def check_maxRstat_Q_linkage(self, method, i):
# Tests maxRstat(Z, R, i) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestDendrogram(object):
def test_dendrogram_single_linkage_tdist(self):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
def test_valid_orientation(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_raises(ValueError, dendrogram, Z, orientation="foo")
@dec.skipif(not have_matplotlib)
def test_dendrogram_plot(self):
for orientation in ['top', 'bottom', 'left', 'right']:
yield self.check_dendrogram_plot, orientation
def check_dendrogram_plot(self, orientation):
# Tests dendrogram plotting.
Z = linkage(hierarchy_test_data.ytdist, 'single')
expected = {'color_list': ['g', 'b', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4]}
fig = plt.figure()
ax = fig.add_subplot(111)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
plt.close()
assert_equal(R1, expected)
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
assert_equal(R2, expected)
@dec.skipif(not have_matplotlib)
def test_dendrogram_truncate_mode(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['b'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9]})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['g', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7]})
def calculate_maximum_distances(Z):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = Z[i, 2]
B[i] = q.max()
return B
def calculate_maximum_inconsistencies(Z, R, k=3):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = R[i, k]
B[i] = q.max()
return B
def test_euclidean_linkage_value_error():
for method in scipy.cluster.hierarchy._cpy_euclid_methods:
assert_raises(ValueError,
linkage, [[1, 1], [1, 1]], method=method, metric='cityblock')
def test_2x2_linkage():
Z1 = linkage([1], method='single', metric='euclidean')
Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
assert_allclose(Z1, Z2)
if __name__ == "__main__":
run_module_suite()
| mit |
andyh616/mne-python | mne/dipole.py | 3 | 23401 | # Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
import numpy as np
from scipy import linalg
from copy import deepcopy
import re
from .cov import read_cov, _get_whitener_data
from .io.constants import FIFF
from .io.pick import pick_types, channel_type
from .io.proj import make_projector, _has_eeg_average_ref_proj
from .bem import _fit_sphere
from .transforms import (_print_coord_trans, _coord_frame_name,
apply_trans, invert_transform)
from .forward._make_forward import (_get_mri_head_t, _setup_bem,
_prep_meg_channels, _prep_eeg_channels)
from .forward._compute_forward import (_compute_forwards_meeg,
_prep_field_computation)
from .externals.six import string_types
from .surface import (transform_surface_to, _normalize_vectors,
_get_ico_surface, _compute_nearest)
from .bem import _bem_find_surface, _bem_explain_surface
from .source_space import (_make_volume_source_space, SourceSpaces,
_points_outside_surface)
from .parallel import parallel_func
from .fixes import partial
from .utils import logger, verbose, _time_mask
class Dipole(object):
"""Dipole class
Used to store positions, orientations, amplitudes, times, goodness of fit
of dipoles, typically obtained with Neuromag/xfit, mne_dipole_fit
or certain inverse solvers.
Parameters
----------
times : array, shape (n_dipoles,)
The time instants at which each dipole was fitted (sec).
pos : array, shape (n_dipoles, 3)
The dipoles positions (m).
amplitude : array, shape (n_dipoles,)
The amplitude of the dipoles (nAm).
ori : array, shape (n_dipoles, 3)
The dipole orientations (normalized to unit length).
gof : array, shape (n_dipoles,)
The goodness of fit.
name : str | None
Name of the dipole.
"""
def __init__(self, times, pos, amplitude, ori, gof, name=None):
self.times = times
self.pos = pos
self.amplitude = amplitude
self.ori = ori
self.gof = gof
self.name = name
def __repr__(self):
s = "n_times : %s" % len(self.times)
s += ", tmin : %s" % np.min(self.times)
s += ", tmax : %s" % np.max(self.times)
return "<Dipole | %s>" % s
def save(self, fname):
"""Save dipole in a .dip file
Parameters
----------
fname : str
The name of the .dip file.
"""
fmt = " %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.1f"
with open(fname, 'wb') as fid:
fid.write('# CoordinateSystem "Head"\n'.encode('utf-8'))
fid.write('# begin end X (mm) Y (mm) Z (mm)'
' Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm) g/%\n'
.encode('utf-8'))
t = self.times[:, np.newaxis] * 1000.
gof = self.gof[:, np.newaxis]
amp = 1e9 * self.amplitude[:, np.newaxis]
out = np.concatenate((t, t, self.pos / 1e-3, amp,
self.ori * amp, gof), axis=-1)
np.savetxt(fid, out, fmt=fmt)
if self.name is not None:
fid.write(('## Name "%s dipoles" Style "Dipoles"'
% self.name).encode('utf-8'))
def crop(self, tmin=None, tmax=None):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
"""
mask = _time_mask(self.times, tmin, tmax)
for attr in ('times', 'pos', 'gof', 'amplitude', 'ori'):
setattr(self, attr, getattr(self, attr)[mask])
def copy(self):
"""Copy the Dipoles object
Returns
-------
dip : instance of Dipole
The copied dipole instance.
"""
return deepcopy(self)
@verbose
def plot_locations(self, trans, subject, subjects_dir=None,
bgcolor=(1, 1, 1), opacity=0.3,
brain_color=(0.7, 0.7, 0.7), mesh_color=(1, 1, 0),
fig_name=None, fig_size=(600, 600), mode='cone',
scale_factor=0.1e-1, colors=None, verbose=None):
"""Plot dipole locations as arrows
Parameters
----------
trans : dict
The mri to head trans.
subject : str
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : None | str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
The default is None.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
mesh_color : tuple of length 3
Mesh color.
fig_name : tuple of length 2
Mayavi figure name.
fig_size : tuple of length 2
Mayavi figure size.
mode : str
Should be ``'cone'`` or ``'sphere'`` to specify how the
dipoles should be shown.
scale_factor : float
The scaling applied to amplitudes for the plot.
colors: list of colors | None
Color to plot with each dipole. If None defaults colors are used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
from .viz import plot_dipole_locations
dipoles = []
for t in self.times:
dipoles.append(self.copy())
dipoles[-1].crop(t, t)
return plot_dipole_locations(
dipoles, trans, subject, subjects_dir, bgcolor, opacity,
brain_color, mesh_color, fig_name, fig_size, mode, scale_factor,
colors)
def plot_amplitudes(self, color='k', show=True):
"""Plot the dipole amplitudes as a function of time
Parameters
----------
color: matplotlib Color
Color to use for the trace.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
"""
from .viz import plot_dipole_amplitudes
return plot_dipole_amplitudes([self], [color], show)
def __getitem__(self, idx_slice):
"""Handle indexing"""
if isinstance(idx_slice, int): # make sure attributes stay 2d
idx_slice = [idx_slice]
selected_times = self.times[idx_slice].copy()
selected_pos = self.pos[idx_slice, :].copy()
selected_amplitude = self.amplitude[idx_slice].copy()
selected_ori = self.ori[idx_slice, :].copy()
selected_gof = self.gof[idx_slice].copy()
selected_name = self.name
new_dipole = Dipole(selected_times, selected_pos,
selected_amplitude, selected_ori,
selected_gof, selected_name)
return new_dipole
def __len__(self):
"""Handle len function"""
return self.pos.shape[0]
# #############################################################################
# IO
@verbose
def read_dipole(fname, verbose=None):
"""Read .dip file from Neuromag/xfit or MNE
Parameters
----------
fname : str
The name of the .dip file.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
dipole : instance of Dipole
The dipole.
"""
try:
data = np.loadtxt(fname, comments='%')
except:
data = np.loadtxt(fname, comments='#') # handle 2 types of comments...
name = None
with open(fname, 'r') as fid:
for line in fid.readlines():
if line.startswith('##') or line.startswith('%%'):
m = re.search('Name "(.*) dipoles"', line)
if m:
name = m.group(1)
break
if data.ndim == 1:
data = data[None, :]
logger.info("%d dipole(s) found" % len(data))
times = data[:, 0] / 1000.
pos = 1e-3 * data[:, 2:5] # put data in meters
amplitude = data[:, 5]
norm = amplitude.copy()
amplitude /= 1e9
norm[norm == 0] = 1
ori = data[:, 6:9] / norm[:, np.newaxis]
gof = data[:, 9]
return Dipole(times, pos, amplitude, ori, gof, name)
# #############################################################################
# Fitting
def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1):
"""Compute the forward solution and do other nice stuff"""
B = _compute_forwards_meeg(rr, fwd_data, n_jobs, verbose=False)
B = np.concatenate(B, axis=1)
B_orig = B.copy()
# Apply projection and whiten (cov has projections already)
B = np.dot(B, whitener.T)
# column normalization doesn't affect our fitting, so skip for now
# S = np.sum(B * B, axis=1) # across channels
# scales = np.repeat(3. / np.sqrt(np.sum(np.reshape(S, (len(rr), 3)),
# axis=1)), 3)
# B *= scales[:, np.newaxis]
scales = np.ones(3)
return B, B_orig, scales
def _make_guesses(surf_or_rad, r0, grid, exclude, mindist, n_jobs):
"""Make a guess space inside a sphere or BEM surface"""
if isinstance(surf_or_rad, dict):
surf = surf_or_rad
logger.info('Guess surface (%s) is in %s coordinates'
% (_bem_explain_surface(surf['id']),
_coord_frame_name(surf['coord_frame'])))
else:
radius = surf_or_rad[0]
logger.info('Making a spherical guess space with radius %7.1f mm...'
% (1000 * radius))
surf = _get_ico_surface(3)
_normalize_vectors(surf['rr'])
surf['rr'] *= radius
surf['rr'] += r0
logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid))
src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist,
do_neighbors=False, n_jobs=n_jobs)
# simplify the result to make things easier later
src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']],
nuse=src['nuse'], coord_frame=src['coord_frame'],
vertno=np.arange(src['nuse']))
return SourceSpaces([src])
def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None,
constraint=None):
"""Calculate the residual sum of squares"""
if fwd_svd is None:
dist = constraint(rd)
if dist <= 0:
return 1. - 100 * dist
r1s = rd[np.newaxis, :]
fwd = _dipole_forwards(fwd_data, whitener, r1s)[0]
uu, sing, vv = linalg.svd(fwd, full_matrices=False)
else:
uu, sing, vv = fwd_svd
return 1 - _dipole_gof(uu, sing, vv, B, B2)[0]
def _dipole_gof(uu, sing, vv, B, B2):
"""Calculate the goodness of fit from the forward SVD"""
ncomp = 3 if sing[2] / sing[0] > 0.2 else 2
one = np.dot(vv[:ncomp], B)
Bm2 = np.sum(one ** 2)
return Bm2 / B2, one
def _fit_Q(fwd_data, whitener, proj_op, B, B2, B_orig, rd):
"""Fit the dipole moment once the location is known"""
fwd, fwd_orig, scales = _dipole_forwards(fwd_data, whitener,
rd[np.newaxis, :])
uu, sing, vv = linalg.svd(fwd, full_matrices=False)
gof, one = _dipole_gof(uu, sing, vv, B, B2)
ncomp = len(one)
# Counteract the effect of column normalization
Q = scales[0] * np.sum(uu.T[:ncomp] * (one / sing[:ncomp])[:, np.newaxis],
axis=0)
# apply the projector to both elements
B_residual = np.dot(proj_op, B_orig) - np.dot(np.dot(Q, fwd_orig),
proj_op.T)
return Q, gof, B_residual
def _fit_dipoles(min_dist_to_inner_skull, data, times, rrs, guess_fwd_svd,
fwd_data, whitener, proj_op, n_jobs):
"""Fit a single dipole to the given whitened, projected data"""
from scipy.optimize import fmin_cobyla
parallel, p_fun, _ = parallel_func(_fit_dipole, n_jobs)
# parallel over time points
res = parallel(p_fun(min_dist_to_inner_skull, B, t, rrs, guess_fwd_svd,
fwd_data, whitener, proj_op, fmin_cobyla)
for B, t in zip(data.T, times))
pos = np.array([r[0] for r in res])
amp = np.array([r[1] for r in res])
ori = np.array([r[2] for r in res])
gof = np.array([r[3] for r in res]) * 100 # convert to percentage
residual = np.array([r[4] for r in res]).T
return pos, amp, ori, gof, residual
def _fit_dipole(min_dist_to_inner_skull, B_orig, t, rrs,
guess_fwd_svd, fwd_data, whitener, proj_op,
fmin_cobyla):
"""Fit a single bit of data"""
B = np.dot(whitener, B_orig)
surf = None
# make constraint function to keep the solver within the inner skull
if isinstance(fwd_data['inner_skull'], dict): # bem
surf = fwd_data['inner_skull']
def constraint(rd):
dist = _compute_nearest(surf['rr'], rd[np.newaxis, :],
return_dists=True)[1][0]
if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]:
dist *= -1.
# Once we know the dipole is below the inner skull,
# let's check if its distance to the inner skull is at least
# min_dist_to_inner_skull. This can be enforced by adding a
# constrain proportional to its distance.
dist -= min_dist_to_inner_skull
return dist
else: # sphere
R, r0 = fwd_data['inner_skull']
R_adj = R - 1e-3 # to be sure we don't hit the innermost surf
def constraint(rd):
return R_adj - np.sqrt(np.sum((rd - r0) ** 2))
# Find a good starting point (find_best_guess in C)
B2 = np.dot(B, B)
if B2 == 0:
logger.warning('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0
x0 = rrs[np.argmin([_fit_eval(rrs[fi][np.newaxis, :], B, B2, fwd_svd)
for fi, fwd_svd in enumerate(guess_fwd_svd)])]
fun = partial(_fit_eval, B=B, B2=B2, fwd_data=fwd_data, whitener=whitener,
constraint=constraint)
# Tested minimizers:
# Simplex, BFGS, CG, COBYLA, L-BFGS-B, Powell, SLSQP, TNC
# Several were similar, but COBYLA won for having a handy constraint
# function we can use to ensure we stay inside the inner skull /
# smallest sphere
rd_final = fmin_cobyla(fun, x0, (constraint,), consargs=(),
rhobeg=5e-2, rhoend=1e-4, disp=False)
# Compute the dipole moment at the final point
Q, gof, residual = _fit_Q(fwd_data, whitener, proj_op, B, B2, B_orig,
rd_final)
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
msg = '---- Fitted : %7.1f ms' % (1000. * t)
if surf is not None:
dist_to_inner_skull = _compute_nearest(surf['rr'],
rd_final[np.newaxis, :],
return_dists=True)[1][0]
msg += (", distance to inner skull : %2.4f mm"
% (dist_to_inner_skull * 1000.))
logger.info(msg)
return rd_final, amp, ori, gof, residual
@verbose
def fit_dipole(evoked, cov, bem, trans=None, min_dist=5.,
n_jobs=1, verbose=None):
"""Fit a dipole
Parameters
----------
evoked : instance of Evoked
The dataset to fit.
cov : str | instance of Covariance
The noise covariance.
bem : str | dict
The BEM filename (str) or a loaded sphere model (dict).
trans : str | None
The head<->MRI transform filename. Must be provided unless BEM
is a sphere model.
min_dist : float
Minimum distance (in milimeters) from the dipole to the inner skull.
Only used when using a BEM forward model. Must be positive.
n_jobs : int
Number of jobs to run in parallel (used in field computation
and fitting).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
dip : instance of Dipole
The dipole fits.
residual : ndarray, shape (n_meeg_channels, n_times)
The good M-EEG data channels with the fitted dipolar activity
removed.
See Also
--------
mne.beamformer.rap_music
Notes
-----
.. versionadded:: 0.9.0
"""
# This could eventually be adapted to work with other inputs, these
# are what is needed:
evoked = evoked.copy()
# Determine if a list of projectors has an average EEG ref
if "eeg" in evoked and not _has_eeg_average_ref_proj(evoked.info['projs']):
raise ValueError('EEG average reference is mandatory for dipole '
'fitting.')
if min_dist < 0:
raise ValueError('min_dist should be positive. Got %s' % min_dist)
data = evoked.data
info = evoked.info
times = evoked.times.copy()
comment = evoked.comment
# Convert the min_dist to meters
min_dist_to_inner_skull = min_dist / 1000.
# Figure out our inputs
neeg = len(pick_types(info, meg=False, eeg=True, exclude=[]))
if isinstance(bem, string_types):
logger.info('BEM : %s' % bem)
if trans is not None:
logger.info('MRI transform : %s' % trans)
mri_head_t, trans = _get_mri_head_t(trans)
else:
mri_head_t = {'from': FIFF.FIFFV_COORD_HEAD,
'to': FIFF.FIFFV_COORD_MRI, 'trans': np.eye(4)}
bem = _setup_bem(bem, bem, neeg, mri_head_t)
if not bem['is_sphere']:
if trans is None:
raise ValueError('mri must not be None if BEM is provided')
# Find the best-fitting sphere
inner_skull = _bem_find_surface(bem, 'inner_skull')
inner_skull = inner_skull.copy()
R, r0 = _fit_sphere(inner_skull['rr'], disp=False)
r0 = apply_trans(mri_head_t['trans'], r0[np.newaxis, :])[0]
logger.info('Grid origin : '
'%6.1f %6.1f %6.1f mm rad = %6.1f mm.'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], 1000 * R))
else:
r0 = bem['r0']
logger.info('Sphere model : origin at (% 7.2f % 7.2f % 7.2f) mm'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2]))
if 'layers' in bem:
R = bem['layers'][0]['rad']
else:
R = np.inf
inner_skull = [R, r0]
r0_mri = apply_trans(invert_transform(mri_head_t)['trans'],
r0[np.newaxis, :])[0]
# Eventually these could be parameters, but they are just used for
# the initial grid anyway
guess_grid = 0.02 # MNE-C uses 0.01, but this is faster w/similar perf
guess_mindist = 0.005 # 0.01
guess_exclude = 0.02 # 0.02
accurate = False # can be made an option later (shouldn't make big diff)
logger.info('Guess grid : %6.1f mm' % (1000 * guess_grid,))
if guess_mindist > 0.0:
logger.info('Guess mindist : %6.1f mm' % (1000 * guess_mindist,))
if guess_exclude > 0:
logger.info('Guess exclude : %6.1f mm' % (1000 * guess_exclude,))
logger.info('Using %s MEG coil definitions.'
% ("accurate" if accurate else "standard"))
if isinstance(cov, string_types):
logger.info('Noise covariance : %s' % (cov,))
cov = read_cov(cov, verbose=False)
logger.info('')
_print_coord_trans(mri_head_t)
_print_coord_trans(info['dev_head_t'])
logger.info('%d bad channels total' % len(info['bads']))
# Forward model setup (setup_forward_model from setup.c)
ch_types = [channel_type(info, idx) for idx in range(info['nchan'])]
megcoils, compcoils, megnames, meg_info = [], [], [], None
eegels, eegnames = [], []
if 'grad' in ch_types or 'mag' in ch_types:
megcoils, compcoils, megnames, meg_info = \
_prep_meg_channels(info, accurate=accurate, verbose=verbose)
if 'eeg' in ch_types:
eegels, eegnames = _prep_eeg_channels(info, exclude='bads',
verbose=verbose)
# Ensure that MEG and/or EEG channels are present
if len(megcoils + eegels) == 0:
raise RuntimeError('No MEG or EEG channels found.')
# Whitener for the data
logger.info('Decomposing the sensor noise covariance matrix...')
picks = pick_types(info, meg=True, eeg=True, exclude='bads')
# In case we want to more closely match MNE-C for debugging:
# from .io.pick import pick_info
# from .cov import prepare_noise_cov
# info_nb = pick_info(info, picks)
# cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False)
# nzero = (cov['eig'] > 0)
# n_chan = len(info_nb['ch_names'])
# whitener = np.zeros((n_chan, n_chan), dtype=np.float)
# whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero])
# whitener = np.dot(whitener, cov['eigvec'])
whitener = _get_whitener_data(info, cov, picks, verbose=False)
# Proceed to computing the fits (make_guess_data)
logger.info('\n---- Computing the forward solution for the guesses...')
src = _make_guesses(inner_skull, r0_mri,
guess_grid, guess_exclude, guess_mindist,
n_jobs=n_jobs)[0]
if isinstance(inner_skull, dict):
transform_surface_to(inner_skull, 'head', mri_head_t)
transform_surface_to(src, 'head', mri_head_t)
# C code computes guesses using a sphere model for speed, don't bother here
logger.info('Go through all guess source locations...')
fwd_data = dict(coils_list=[megcoils, eegels], infos=[meg_info, None],
ccoils_list=[compcoils, None], coil_types=['meg', 'eeg'],
inner_skull=inner_skull)
_prep_field_computation(src['rr'], bem, fwd_data, n_jobs, verbose=False)
guess_fwd = _dipole_forwards(fwd_data, whitener, src['rr'],
n_jobs=n_jobs)[0]
# decompose ahead of time
guess_fwd_svd = [linalg.svd(fwd, full_matrices=False)
for fwd in np.array_split(guess_fwd, len(src['rr']))]
logger.info('[done %d sources]' % src['nuse'])
# Do actual fits
data = data[picks]
ch_names = [info['ch_names'][p] for p in picks]
proj_op = make_projector(info['projs'], ch_names, info['bads'])[0]
out = _fit_dipoles(min_dist_to_inner_skull, data, times, src['rr'],
guess_fwd_svd, fwd_data,
whitener, proj_op, n_jobs)
dipoles = Dipole(times, out[0], out[1], out[2], out[3], comment)
residual = out[4]
logger.info('%d dipoles fitted' % len(dipoles.times))
return dipoles, residual
| bsd-3-clause |
vdrhtc/Measurement-automation | scripts/photon_wave_mixing/helpers.py | 1 | 3114 | from scipy.ndimage import gaussian_filter1d
from scipy.optimize import curve_fit
import scipy.fft as fp
import numpy as np
import pickle
import matplotlib.pyplot as plt
import lib.plotting as plt2
def parse_probe_qubit_sts(freqs, S21):
amps = np.abs(S21)
frequencies = freqs[gaussian_filter1d(amps, sigma=1).argmin(axis=-1)]
return frequencies
def parse_sps_sts(freqs, S21):
amps = np.abs(S21)
frequencies = freqs[gaussian_filter1d(amps, sigma=10).argmax(axis=-1)]
return frequencies
def qubit_fit_func(x, a, b, c):
return a * (x - b)**2 + c
def fit_probe_qubit_sts(filename, plot=True):
with open(filename, 'rb') as f:
data = pickle.load(f)
currents = data['bias, [A]']
freqs = data['Frequency [Hz]']
S21 = data['data']
frequencies = parse_probe_qubit_sts(freqs, S21)
popt, conv = curve_fit(qubit_fit_func, currents, frequencies,
p0=(-1e16, -2.5e-3, 5.15e9))
if plot:
xx, yy = np.meshgrid(currents, freqs)
plt2.plot_2D(xx, yy,
np.transpose(gaussian_filter1d(np.abs(S21), sigma=20)))
plt.figure()
plt.plot(currents, frequencies, 'o')
plt.plot(currents, qubit_fit_func(currents, *popt))
plt.margins(x=0)
plt.xlabel("Current, A")
plt.ylabel("Qubit if_freq, Hz")
plt.show()
return popt
def fit_sps_sts(filename, plot=True):
with open(filename, 'rb') as f:
data = pickle.load(f)
currents = data['bias, [A]']
freqs = data['Frequency [Hz]']
S21 = data['data']
frequencies = parse_sps_sts(freqs, S21)
popt, conv = curve_fit(qubit_fit_func, currents, frequencies,
p0=(-1e15, -5e-4, 5.15e9))
if plot:
xx, yy = np.meshgrid(currents, freqs)
plt2.plot_2D(xx, yy,
np.transpose(gaussian_filter1d(np.abs(S21), sigma=10)))
plt.figure()
plt.plot(currents, frequencies, 'o')
plt.plot(currents, qubit_fit_func(currents, *popt))
plt.margins(x=0)
plt.xlabel("Current, A")
plt.ylabel("Qubit if_freq, Hz")
plt.show()
return popt
def get_current(frequency, a, b, c):
current = b + np.sqrt((frequency - c) / a)
return current
def remove_outliers():
pass
def get_signal_amplitude(downconverted_trace):
N = len(downconverted_trace)
return np.abs(fp.fft(downconverted_trace)[0] / N)
def get_noise(downconverted_trace):
return np.std(downconverted_trace)
def measure_snr(devices_dict):
# turn off microwave
devices_dict['mw'].set_output_state("ON")
# turn off AWG
devices_dict['awg'].reset()
devices_dict['awg'].synchronize_channels(channelI, channelQ)
devices_dict['awg'].trigger_output_config(channel=channelI,
trig_length=100)
devices_dict['awg'].stop_AWG(channel=channelI)
devices_dict['iqawg'].set_parameters({"calibration": devices_dict['upconv_cal']})
devices_dict['iqawg'].output_IQ_waves_from_calibration(
amp_coeffs=(0.5, 0.5)) | gpl-3.0 |
Adai0808/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
swang8/Perl_scripts_misc | distruct_v2.py | 1 | 6573 | import numpy as np
import matplotlib.pyplot as plot
import colorsys
import getopt
import sys, pdb
def plot_admixture(admixture, population_indices, population_labels, title, ind_names):
N,K = admixture.shape
colors = [colorsys.hsv_to_rgb(h,0.9,0.7) for h in np.linspace(0,1,K+1)[:-1]]
text_color = 'k'
bg_color = 'w'
fontsize = 15
figure = plot.figure(figsize=(20,8))
xmin = 0.1
ymin = 0.2
height = 0.5
width = 0.8
indiv_width = width/N
subplot = figure.add_axes([xmin,ymin,width,height])
[spine.set_linewidth(0.001) for spine in subplot.spines.values()]
for k in xrange(K):
if k:
bottoms = admixture[:,:k].sum(1)
else:
bottoms = np.zeros((N,),dtype=float)
lefts = np.arange(N)*indiv_width
subplot.bar(lefts, admixture[:,k], width=indiv_width, bottom=bottoms, facecolor=colors[k], edgecolor=colors[k], linewidth=0.4)
subplot.axis([0, N*indiv_width, 0, 1])
subplot.tick_params(axis='both', top=False, right=False, left=False, bottom=False)
xtick_labels = tuple(map(str,['']*N))
subplot.set_xticklabels(xtick_labels)
ytick_labels = tuple(map(str,['']*K))
subplot.set_yticklabels(ytick_labels)
position = subplot.get_position()
title_position = (0.5, 0.9)
figure.text(title_position[0], title_position[1], title, fontsize=fontsize, \
color='k', horizontalalignment='center', verticalalignment='center')
for p,popname in enumerate(population_labels):
indices = np.where(population_indices==p)[0]
if indices.size>0:
vline_pos = (indices.max()+1)*indiv_width
subplot.axvline(vline_pos, linestyle='-', linewidth=0.2, c='#888888')
label_position = (xmin+(2*indices.min()+indices.size)*0.5*indiv_width, ymin-0.01)
figure.text(label_position[0], label_position[1], popname, fontsize=10, color='k', \
horizontalalignment='right', verticalalignment='top', rotation=90)
# individual names
for i in range(N):
label_position = (xmin + i * indiv_width + indiv_width * 0.5, ymin + height + 0.01)
figure.text(label_position[0], label_position[1], ind_names[i], fontsize=3, color='k', \
horizontalalignment='right', verticalalignment='bottom', rotation=90)
return figure
def get_admixture_proportions(params):
# load admixture proportions
handle = open('%s.%d.meanQ'%(params['inputfile'],params['K']),'r')
admixture = np.array([line.strip().split() for line in handle]).astype('float')
handle.close()
N,K = admixture.shape
admixture = admixture/admixture.sum(1).reshape(N,1)
ind_names=[]
# get individual names
if params.has_key('ind'):
handle = open(params['ind'], 'r')
names = [line.strip() for line in handle]
handle.close()
ind_names = np.array(names)
# get population labels
if params.has_key('popfile'):
handle = open(params['popfile'],'r')
populations = [line.strip() for line in handle]
handle.close()
population_labels = list(set(populations))
# group populations by cluster similarity
population_cluster = [np.mean(admixture[[i for i,p in enumerate(populations) if p==label],:],0).argmax() \
for label in population_labels]
population_labels = [population_labels[j] for j in np.argsort(population_cluster)]
population_indices = np.array([population_labels.index(pop) for pop in populations])
# re-order samples in admixture matrix
order = np.argsort(population_indices)
population_indices = population_indices[order]
admixture = admixture[order,:]
ind_names = ind_names[order]
else:
print "file with population labels is not provided or does not exist .... \ncreating population labels based on inferred admixture proportions"
population_labels = ['population %d'%i for i in xrange(1,K+1)]
population_indices = np.argmax(admixture,1)
# re-order samples in admixture matrix
order = np.argsort(population_indices)
population_indices = population_indices[order]
admixture = admixture[order,:]
order = np.arange(N)
for k in xrange(K):
order[population_indices==k] = order[population_indices==k][np.argsort(admixture[population_indices==k,:][:,k])[::-1]]
admixture = admixture[order,:]
return admixture, population_indices, population_labels, ind_names
def parseopts(opts):
"""
parses the command-line flags and options passed to the script
"""
params = {}
for opt, arg in opts:
if opt in ["-K"]:
params['K'] = int(arg)
elif opt in ["--input"]:
params['inputfile'] = arg
elif opt in ["--output"]:
params['outputfile'] = arg
elif opt in ["--popfile"]:
params['popfile'] = arg
elif opt in ["--indfile"]:
params['ind'] = arg
elif opt in ["--title"]:
params['title'] = arg
return params
def usage():
"""
brief description of various flags and options for this script
"""
print "\nHere is how you can use this script\n"
print "Usage: python %s"%sys.argv[0]
print "\t -K <int> (number of populations)"
print "\t --input=<file> (/path/to/input/file; same as output flag passed to structure.py)"
print "\t --output=<file> (/path/to/output/file)"
print "\t --popfile=<file> (file with known categorical labels; optional)"
print "\t --title=<figure title> (a title for the figure; optional)"
if __name__=="__main__":
# parse command-line options
argv = sys.argv[1:]
smallflags = "K:"
bigflags = ["input=", "output=", "popfile=", "title=", "indfile="]
try:
opts, args = getopt.getopt(argv, smallflags, bigflags)
if not opts:
usage()
sys.exit(2)
except getopt.GetoptError:
print "Incorrect options passed"
usage()
sys.exit(2)
params = parseopts(opts)
# get the data to be plotted
admixture, population_indices, population_labels, individual_names = get_admixture_proportions(params)
if params.has_key('title'):
title = params['title']
else:
title = params['inputfile']
# plot the data
figure = plot_admixture(admixture, population_indices, population_labels, title, individual_names)
figure.savefig(params['outputfile'], dpi=300)
| mit |
olologin/scikit-learn | examples/manifold/plot_compare_methods.py | 39 | 4036 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
jgillis/casadi | examples/python/vdp_indirect_muliple_shooting.py | 1 | 4572 | #
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010 by Joel Andersson, Moritz Diehl, K.U.Leuven. All rights reserved.
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
import numpy as NP
import matplotlib.pyplot as plt
# time
t = ssym("t")
# Declare variables (use simple, efficient DAG)
x0=ssym("x0"); x1=ssym("x1")
x = vertcat((x0,x1))
# Control
u = ssym("u")
# ODE right hand side
xdot = vertcat([(1 - x1*x1)*x0 - x1 + u, x0])
# Lagrangian function
L = x0*x0 + x1*x1 + u*u
# Costate
lam = ssym("lam",2)
# Hamiltonian function
H = inner_prod(lam,xdot) + L
Hfcn = SXFunction([x,lam,u,t],[H])
Hfcn.init()
# Costate equations
ldot = -Hfcn.grad(0,0)
## The control must minimize the Hamiltonian, which is:
print "Hamiltonian: ", H
# H is of a convect quadratic form in u: H = u*u + p*u + q, let's get the coefficient p
p = Hfcn.grad(2,0) # this gives us 2*u + p
p = substitute(p,u,0) # replace u with zero: gives us p
# H's unconstrained minimizer is: u = -p/2
u_opt = -p/2
# We must constrain u to the interval [-0.75, 1.0], convexity of H ensures that the optimum is obtain at the bound when u_opt is outside the interval
u_opt = min(u_opt,1.0)
u_opt = max(u_opt,-0.75)
print "optimal control: ", u_opt
# Augment f with lam_dot and subtitute in the value for the optimal control
f = vertcat((xdot,ldot))
f = substitute(f,u,u_opt)
# Create the right hand side function
rhs_in = list(daeIn(x=vertcat((x,lam))))
rhs_in[DAE_T] = t
rhs = SXFunction(rhs_in,daeOut(ode=f))
# Augmented DAE state dimension
nX = 4
# End time
tf = 10.0
# Number of shooting nodes
num_nodes = 20
# Create an integrator (CVodes)
I = CVodesIntegrator(rhs)
I.setOption("abstol",1e-8) # abs. tolerance
I.setOption("reltol",1e-8) # rel. tolerance
I.setOption("t0",0.0)
I.setOption("tf",tf/num_nodes)
I.init()
# Variables in the root finding problem
NV = nX*(num_nodes+1)
V = msym("V",NV)
# Get the state at each shooting node
X = []
v_offset = 0
for k in range(num_nodes+1):
X.append(V[v_offset:v_offset+nX])
v_offset = v_offset+nX
# Formulate the root finding problem
G = []
for k in range(num_nodes):
XF,_,_,_ = I.call(integratorIn(x0=X[k]))
G.append(XF-X[k+1])
# Terminal constraints: lam = 0
G = MXFunction([V],[vertcat(G)])
# Dummy objective function (there are no degrees of freedom)
F = MXFunction([V],[0])
# Allocate NLP solver
solver = IpoptSolver(F,G)
# Initialize the NLP solver
solver.init()
# Set bounds and initial guess
solver.setInput([ 0, 1,-inf,-inf] + (num_nodes-1)*[-inf,-inf,-inf,-inf] + [-inf,-inf, 0, 0], "lbx")
solver.setInput([ 0, 1, inf, inf] + (num_nodes-1)*[ inf, inf, inf, inf] + [ inf, inf, 0, 0], "ubx")
solver.setInput([ 0, 0, 0, 0] + (num_nodes-1)*[ 0, 0, 0, 0] + [ 0, 0, 0, 0], "x0")
solver.setInput(num_nodes*[0,0,0,0], "lbg")
solver.setInput(num_nodes*[0,0,0,0], "ubg")
# Solve the problem
solver.solve()
# Time grid for visualization
tgrid = NP.linspace(0,tf,100)
# Output functions
output_fcn = SXFunction(rhs_in,[x0,x1,u_opt])
# Increase the end time for the integrator
I.setOption("tf",tf)
I.init()
# Simulator to get optimal state and control trajectories
simulator = Simulator(I, output_fcn, tgrid)
simulator.init()
# Pass initial conditions to the simulator
simulator.setInput(solver.output("x")[0:4],"x0")
# Simulate to get the trajectories
simulator.evaluate()
# Get optimal control
x_opt = simulator.output(0)
y_opt = simulator.output(1)
u_opt = simulator.output(2)
# Plot the results
plt.figure(1)
plt.clf()
plt.plot(tgrid,x_opt,'--')
plt.plot(tgrid,y_opt,'-')
plt.plot(tgrid,u_opt,'-.')
plt.title("Van der Pol optimization - indirect multiple shooting")
plt.xlabel('time')
plt.legend(['x trajectory','y trajectory','u trajectory'])
plt.grid()
plt.show()
| lgpl-3.0 |
anntzer/scipy | scipy/integrate/odepack.py | 21 | 10740 | # Author: Travis Oliphant
__all__ = ['odeint']
import numpy as np
from . import _odepack
from copy import copy
import warnings
class ODEintWarning(Warning):
pass
_msgs = {2: "Integration successful.",
1: "Nothing was done; the integration time was 0.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error).",
-8: "Run terminated (internal error)."
}
def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
mxords=5, printmessg=0, tfirst=False):
"""
Integrate a system of ordinary differential equations.
.. note:: For new code, use `scipy.integrate.solve_ivp` to solve a
differential equation.
Solve a system of ordinary differential equations using lsoda from the
FORTRAN library odepack.
Solves the initial value problem for stiff or non-stiff systems
of first order ode-s::
dy/dt = func(y, t, ...) [or func(t, y, ...)]
where y can be a vector.
.. note:: By default, the required order of the first two arguments of
`func` are in the opposite order of the arguments in the system
definition function used by the `scipy.integrate.ode` class and
the function `scipy.integrate.solve_ivp`. To use a function with
the signature ``func(t, y, ...)``, the argument `tfirst` must be
set to ``True``.
Parameters
----------
func : callable(y, t, ...) or callable(t, y, ...)
Computes the derivative of y at t.
If the signature is ``callable(t, y, ...)``, then the argument
`tfirst` must be set ``True``.
y0 : array
Initial condition on y (can be a vector).
t : array
A sequence of time points for which to solve for y. The initial
value point should be the first element of this sequence.
This sequence must be monotonically increasing or monotonically
decreasing; repeated values are allowed.
args : tuple, optional
Extra arguments to pass to function.
Dfun : callable(y, t, ...) or callable(t, y, ...)
Gradient (Jacobian) of `func`.
If the signature is ``callable(t, y, ...)``, then the argument
`tfirst` must be set ``True``.
col_deriv : bool, optional
True if `Dfun` defines derivatives down columns (faster),
otherwise `Dfun` should define derivatives across rows.
full_output : bool, optional
True if to return a dictionary of optional outputs as the second output
printmessg : bool, optional
Whether to print the convergence message
tfirst: bool, optional
If True, the first two arguments of `func` (and `Dfun`, if given)
must ``t, y`` instead of the default ``y, t``.
.. versionadded:: 1.1.0
Returns
-------
y : array, shape (len(t), len(y0))
Array containing the value of y for each desired time in t,
with the initial value `y0` in the first row.
infodict : dict, only returned if full_output == True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'hu' vector of step sizes successfully used for each time step
'tcur' vector with the value of t reached for each time step
(will always be at least as large as the input times)
'tolsf' vector of tolerance scale factors, greater than 1.0,
computed when a request for too much accuracy was detected
'tsw' value of t at the time of the last method switch
(given for each time step)
'nst' cumulative number of time steps
'nfe' cumulative number of function evaluations for each time step
'nje' cumulative number of jacobian evaluations for each time step
'nqu' a vector of method orders for each successful step
'imxer' index of the component of largest magnitude in the
weighted local error vector (e / ewt) on an error return, -1
otherwise
'lenrw' the length of the double work array required
'leniw' the length of integer work array required
'mused' a vector of method indicators for each successful time step:
1: adams (nonstiff), 2: bdf (stiff)
======= ============================================================
Other Parameters
----------------
ml, mu : int, optional
If either of these are not None or non-negative, then the
Jacobian is assumed to be banded. These give the number of
lower and upper non-zero diagonals in this banded matrix.
For the banded case, `Dfun` should return a matrix whose
rows contain the non-zero bands (starting with the lowest diagonal).
Thus, the return matrix `jac` from `Dfun` should have shape
``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
The data in `jac` must be stored such that ``jac[i - j + mu, j]``
holds the derivative of the `i`th equation with respect to the `j`th
state variable. If `col_deriv` is True, the transpose of this
`jac` must be returned.
rtol, atol : float, optional
The input parameters `rtol` and `atol` determine the error
control performed by the solver. The solver will control the
vector, e, of estimated local errors in y, according to an
inequality of the form ``max-norm of (e / ewt) <= 1``,
where ewt is a vector of positive error weights computed as
``ewt = rtol * abs(y) + atol``.
rtol and atol can be either vectors the same length as y or scalars.
Defaults to 1.49012e-8.
tcrit : ndarray, optional
Vector of critical points (e.g., singularities) where integration
care should be taken.
h0 : float, (0: solver-determined), optional
The step size to be attempted on the first step.
hmax : float, (0: solver-determined), optional
The maximum absolute step size allowed.
hmin : float, (0: solver-determined), optional
The minimum absolute step size allowed.
ixpr : bool, optional
Whether to generate extra printing at method switches.
mxstep : int, (0: solver-determined), optional
Maximum number of (internally defined) steps allowed for each
integration point in t.
mxhnil : int, (0: solver-determined), optional
Maximum number of messages printed.
mxordn : int, (0: solver-determined), optional
Maximum order to be allowed for the non-stiff (Adams) method.
mxords : int, (0: solver-determined), optional
Maximum order to be allowed for the stiff (BDF) method.
See Also
--------
solve_ivp : solve an initial value problem for a system of ODEs
ode : a more object-oriented integrator based on VODE
quad : for finding the area under a curve
Examples
--------
The second order differential equation for the angle `theta` of a
pendulum acted on by gravity with friction can be written::
theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0
where `b` and `c` are positive constants, and a prime (') denotes a
derivative. To solve this equation with `odeint`, we must first convert
it to a system of first order equations. By defining the angular
velocity ``omega(t) = theta'(t)``, we obtain the system::
theta'(t) = omega(t)
omega'(t) = -b*omega(t) - c*sin(theta(t))
Let `y` be the vector [`theta`, `omega`]. We implement this system
in Python as:
>>> def pend(y, t, b, c):
... theta, omega = y
... dydt = [omega, -b*omega - c*np.sin(theta)]
... return dydt
...
We assume the constants are `b` = 0.25 and `c` = 5.0:
>>> b = 0.25
>>> c = 5.0
For initial conditions, we assume the pendulum is nearly vertical
with `theta(0)` = `pi` - 0.1, and is initially at rest, so
`omega(0)` = 0. Then the vector of initial conditions is
>>> y0 = [np.pi - 0.1, 0.0]
We will generate a solution at 101 evenly spaced samples in the interval
0 <= `t` <= 10. So our array of times is:
>>> t = np.linspace(0, 10, 101)
Call `odeint` to generate the solution. To pass the parameters
`b` and `c` to `pend`, we give them to `odeint` using the `args`
argument.
>>> from scipy.integrate import odeint
>>> sol = odeint(pend, y0, t, args=(b, c))
The solution is an array with shape (101, 2). The first column
is `theta(t)`, and the second is `omega(t)`. The following code
plots both components.
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, sol[:, 0], 'b', label='theta(t)')
>>> plt.plot(t, sol[:, 1], 'g', label='omega(t)')
>>> plt.legend(loc='best')
>>> plt.xlabel('t')
>>> plt.grid()
>>> plt.show()
"""
if ml is None:
ml = -1 # changed to zero inside function call
if mu is None:
mu = -1 # changed to zero inside function call
dt = np.diff(t)
if not((dt >= 0).all() or (dt <= 0).all()):
raise ValueError("The values in t must be monotonically increasing "
"or monotonically decreasing; repeated values are "
"allowed.")
t = copy(t)
y0 = copy(y0)
output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
full_output, rtol, atol, tcrit, h0, hmax, hmin,
ixpr, mxstep, mxhnil, mxordn, mxords,
int(bool(tfirst)))
if output[-1] < 0:
warning_msg = _msgs[output[-1]] + " Run with full_output = 1 to get quantitative information."
warnings.warn(warning_msg, ODEintWarning)
elif printmessg:
warning_msg = _msgs[output[-1]]
warnings.warn(warning_msg, ODEintWarning)
if full_output:
output[1]['message'] = _msgs[output[-1]]
output = output[:-1]
if len(output) == 1:
return output[0]
else:
return output
| bsd-3-clause |
babraham123/rpw | reader.py | 1 | 11194 | import subprocess, json, datetime, glob, os
import pandas as pd
import numpy as np
from AnxPy import Console
from AnxPy.environ import DW_PROD, DW_CTEST, DW_SAND
from anxapi import *
from link import lnk
from abc import ABCMeta, abstractmethod
def createReader(config):
if config['type'] == 'hdfs':
return HadoopFeedReader(config)
elif config['type'] == 'API':
return ApiReader(config)
elif config['type'] == 'database':
return DatabaseReader(config)
elif config['type'] == 'CSV':
return CsvReader(config)
else:
raise Exception('FeedReader not found')
class FeedReader:
''' An abstract class for reading in AppNexus data from a variety of sources into a flat dataframe.
Implement:
r = createReader(config)
dataframe = r.read()
'''
__metaclass__ = ABCMeta
def __init__(self, config):
self.config = config
def __str__(self):
print json.dumps(self.config, indent=2)
@abstractmethod
def read(self):
pass
def get_config(self):
return self.config
def set_config(self, config):
self.config = config
return True
class HadoopFeedReader(FeedReader):
'''
Read in data from Hadoop into a 2D table (dataframe)
example config::
{
"type":"hdfs",
"location":"/dv/domain_hourly_blocks/",
"filter":{
"days_ago":1
}
}
'''
def __init__(self, config):
self.config = config
def day_calc(self, n_days):
''' get date (n day ago from now, in UTC) '''
import datetime
d = datetime.datetime.utcnow() - datetime.timedelta(days=n_days)
return d.strftime("%Y/%m/%d")
def deleteTmp(self):
''' remove previous files from tmp/ if it exists '''
if os.path.exists('tmp/'):
filelist = glob.glob("tmp/*")
for f in filelist:
os.remove(f)
else:
os.makedirs('tmp/')
def getHDFS(self):
''' Connect to HDFS and get all of the compressed part files '''
# hdfs dfs -get /dv/domain_hourly_blocks/2014/04/16/part-r-00000.lzo | lzop -dc
if 'days_ago' in self.config['filter']:
daystr = self.day_calc(self.config['filter']['days_ago'])
else:
daystr = self.day_calc(1)
loc = self.config['location']
cmd1 = "hdfs dfs -get " + loc + daystr + "/* tmp/"
args1 = cmd1.split()
p1 = subprocess.Popen(args1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p1.communicate()
print "Hadoop dump:", stdout, stderr
print
def decompressLZO(self):
''' find all file parts, decompress .lzo files '''
with open("tmp/.pig_header", "r") as h:
headers = h.read()
headers = headers[:-1]
headers = headers.split(",")
data = []
filelist = glob.glob("tmp/*.lzo")
with open("error.log", "ab") as err:
err.write(self.day_calc( self.config['filter']['days_ago'] ) + ":")
for filepart in filelist:
cmd2 = "/usr/bin/lzop -dcf"
args2 = cmd2.split()
with open(filepart, "rb") as part:
p2 = subprocess.Popen(args2, stdin=part, stdout=subprocess.PIPE, stderr=err)
(stdout, stderr) = p2.communicate()
segment = stdout.split("\n")
for row in segment:
row = row[:-1]
row = row.split(",")
data.append(row)
del segment, stdout
print "headers: " + str(headers)
df = pd.DataFrame(data, columns=headers)
return df
def textHDFS(self):
''' Connect to HDFS and process all of the snappy part files '''
# hdfs dfs -ls /dv/domain_hourly_blocks/2014/06/05/
# hdfs dfs -text /dv/domain_hourly_blocks/2014/06/05/part-r-00000.snappy
if 'days_ago' in self.config['filter']:
daystr = self.day_calc(self.config['filter']['days_ago'])
else:
daystr = self.day_calc(1)
loc = self.config['location']
cmd1 = "hdfs dfs -ls " + loc + daystr + "/"
p1 = subprocess.Popen( cmd1.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p1.communicate()
print stderr
# get snappy files
data_files = stdout.replace('\n',' ')
data_files = data_files.split()
data_files = [f for f in data_files if '.snappy' in f]
# sort by part number
data_files.sort()
# get headers
cmd2 = "hdfs dfs -text " + loc + daystr + "/.pig_header"
p2 = subprocess.Popen( cmd2.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(headers, stderr) = p2.communicate()
print stderr
# create empty DataFrame
headers = headers.replace('\n','')
#headers = headers + ','
headers = headers.split(',')
data_df = pd.DataFrame(columns=headers)
# retreive data
for data_file in data_files:
cmd3 = "hdfs dfs -text " + data_file
# forced to use temp csv file in orderto get nice unit conversion
try:
temp_file = open('temp.csv','w')
p3 = subprocess.Popen( cmd3.split(), stdout=temp_file, stderr=subprocess.PIPE)
(s, stderr) = p3.communicate()
print stderr
temp_file.close()
except Exception, e:
print "Skipping: " + data_file
print str(e)
# logger.exception
continue
# convert string to list of lists
#data_part = [row.split(',') for row in stdout.split('\n') if row]
# convert list of lists (with headers) to a list of dictionaries
#df = [ {data_part[0][i]:row[i] for i in range(len(row))} for row in data_part[1:] ]
#df = pd.DataFrame(df)
# Pretty harsh unit conversion, watch for columns with all NaN
#df = df.convert_objects(convert_numeric=True)
# read and append csv
df = pd.read_csv('temp.csv', index_col=False, names=headers, header=None)
data_df = data_df.append(df, ignore_index=True)
return data_df
def read(self):
# read in .snappy files
df = self.textHDFS()
return df
class ApiReader(FeedReader):
'''
Read in data from the API into a 2D table (dataframe)
example config::
{
"type":"API",
"environment":"dw-prod",
"service":"domain-list",
"filter":{
"ids":[123456],
"member_id":958
},
"field_name":"domains",
"field_type":"list",
}
'''
def __init__(self, config):
self.config = config
if self.config['environment'] in ['prod', 'dw-prod']:
self.base = 'dw-prod'
self.console = Console(DW_PROD)
elif self.config['environment'] in ['sand', 'dw-sand']:
self.base = 'dw-sand'
self.console = Console(DW_SAND)
elif self.config['environment'] in ['ctest', 'dw-ctest']:
self.base = 'dw-ctest'
self.console = Console(DW_CTEST)
elif self.config['environment'] in ['api-prod', 'api-sand', 'api-ctest']:
self.base = self.config['environment']
self.console = None
else:
raise Exception('Environment not found')
def querystr(self):
''' convert filter into query string params
'''
if 'api' in self.base:
qstr = '/'
else:
qstr = '?'
for key, val in self.config['filter']:
qstr += str(key) + '=' + str(val) + '&'
qstr = self.config['service'] + qstr[:-1]
return qstr
def get_objects(self):
''' Retreives the object json
'''
service = self.config['service']
if self.console:
# convert dashs to camelcase
service = ''.join( [word[0].upper() + word[1:] for word in service.split('-')] )
# same as: service = console.DomainList
service = getattr(console, service)
api_collection = service.get(filter=self.config['filter'])
api_objects = api_collection.get_all()
api_objects = [ api_object.data for api_object in api_objects ]
else:
response = anx_get(self.base, self.querystr())
if 'error' in response or 'error_code' in response:
raise Exception(json.dumps(response, indent=2))
else:
response = anx_get_all(self.base, service + querystr)
if response['status'] != 'OK':
raise Exception('Unknown API error')
else:
if service in response:
api_objects = [response[service]]
elif (service+'s') in response:
api_objects = response[service+'s']
else:
raise Exception('Unknown API error')
return api_objects
def create_df(self, api_objects):
''' Creates df from field_name and object jsons
'''
data = []
save_list = ['id', 'advertiser_id', 'publisher_id', 'member_id', 'line_item_id', 'insertion_order_id']
for api_object in api_objects:
row = {}
for save_item in save_list:
if save_item in api_object:
row[save_item] = api_object[save_item]
field = api_object[ self.config['field_name'] ]
if isinstance(field, (list, tuple)):
for item in field:
row[ self.config['field_name'] ] = item
data.append(row)
else:
row[ self.config['field_name'] ] = field
data.append(row)
return pd.DataFrame(data)
def read(self):
api_objects = self.get_objects()
df = self.create_df(api_objects)
return df
class DatabaseReader(FeedReader):
'''
Read in data from a database into a 2D table (dataframe), using Link
example config::
{
"type":"database",
"db":"vertica",
"query":"select * from agg_dw_advertiser_publisher_analytics_adjusted limit 30;"
}
'''
def __init__(self, config):
self.config = config
def read(self):
db = getattr(lnk.dbs, self.config['db'])
df = db.select_dataframe(self.config['query'])
return df
class CsvReader(FeedReader):
'''
Read in data from a csv file into a 2D table (dataframe), using a native pandas function
example config::
{
"type":"CSV",
"filename":"data.csv"
}
'''
def __init__(self, config):
self.config = config
def read(self):
db = pd.read_csv( self.config['filename'], index_col=False )
return df
| mit |
jmhsi/justin_tinker | data_science/courses/learning_dl_packages/models/research/autoencoder/AutoencoderRunner.py | 3 | 1712 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder_models.Autoencoder import Autoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1
autoencoder = Autoencoder(
n_input=784,
n_hidden=200,
transfer_function=tf.nn.softplus,
optimizer=tf.train.AdamOptimizer(learning_rate=0.001))
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
# Fit training using batch data
cost = autoencoder.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%d,' % (epoch + 1),
"Cost:", "{:.9f}".format(avg_cost))
print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
| apache-2.0 |
shenzebang/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
yaukwankiu/armor | tests/modifiedMexicanHatTest18.py | 1 | 4761 | # modifiedMexicanHatTest18.py
# two 3d charts in one
thisScript = 'modifiedMexicanHatTest18.py'
#outputFolder = '/media/TOSHIBA EXT/ARMOR/labLogs/2014-05-27-modifiedMexicanHatTest18_dual_3d_charts/'
outputFolder = '/media/TOSHIBA EXT/ARMOR/labLogs/2014-6-6/radar-radarPreprocessSigma16/'
choice = 16
diffExaggeration = 1
"""
1. load xyz1 for compref(radar)
2. load xyz2 for wrf
3. fix xyz2
4. charting 2 in one
"""
import shutil, time, os
import pickle
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
#from armor import defaultParameters as dp
timeString = str(int(time.time()))
###########################################################################
inputFolder='/media/TOSHIBA EXT/ARMOR/labLogs/2014-5-26-modifiedMexicanHatTest17_Numerical_Spectrum_for_Typhoon_Kong-Rey_RADAR/'
dataSource = "Numerical_Spectrum_for_Typhoon_Kong-Rey_RADAR"
i=121
xyz = pickle.load(open(inputFolder+'XYZ.pydump','r'))
X = xyz['X']
Y = xyz['Y']
Z = xyz['Z']
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, np.log2(Y), np.log2(Z), rstride=1, cstride=1) #key line
plt.title(dataSource+ " " + str(i) + "DBZ images\n"+\
"x-axis: response intensity(from 0 to 20)\n"+\
"y-axis: log_2(sigma)\n"+\
"z-axis: log_2(count)\n")
plt.xlabel('response intensity')
plt.ylabel('log2(sigma)')
fig.show()
xyz1 = xyz
dataSource1 = dataSource
i1=i
#################################################################################
if choice == 2:
i = 56
inputFolder = '/media/TOSHIBA EXT/ARMOR/labLogs/2014-5-28-modifiedMexicanHatTest17_Numerical_Spectrum_for_Kong-Rey_COMPREF-sigmaPreprocessing2/'
dataSource = 'Numerical_Spectrum_for_Kong-Rey_COMPREF-sigmaPreprocessing2'
elif choice ==4:
i = 68
inputFolder = "/media/TOSHIBA EXT/ARMOR/labLogs/2014-5-28-modifiedMexicanHatTest17_Numerical_Spectrum_for_Kong-Rey_COMPREF-sigmaPreprocessing4/"
dataSource = "Numerical_Spectrum_for_Kong-Rey_COMPREF-sigmaPreprocessing4"
elif choice ==10:
i = 123
inputFolder = "/media/TOSHIBA EXT/ARMOR/labLogs/2014-5-28-modifiedMexicanHatTest17_Numerical_Spectrum_for_Kong-Rey_COMPREF-sigmaPreprocessing10/"
dataSource = "Numerical_Spectrum_for_Kong-Rey_COMPREF-sigmaPreprocessing10"
elif choice ==16:
i = 75
inputFolder = "/media/TOSHIBA EXT/ARMOR/labLogs/2014-5-28-modifiedMexicanHatTest17_Numerical_Spectrum_for_Kong-Rey_COMPREF-sigmaPreprocessing16/"
dataSource = "Numerical_Spectrum_for_Kong-Rey_COMPREF-sigmaPreprocessing16"
elif choice == 'WRF' or choice=='wrf':
i = 399
inputFolder = '/media/TOSHIBA EXT/ARMOR/labLogs/2014-6-6-modifiedMexicanHatTest17_Numerical_Spectrum_for_Typhoon_Kong-Rey_WRF/'
dataSource = 'Numerical_Spectrum_for_Typhoon_Kong-Rey_WRF'
xyz = pickle.load(open(inputFolder+'XYZ.pydump','r'))
X = xyz['X']
Y = xyz['Y']
Z = xyz['Z']
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, np.log2(Y), np.log2(Z), rstride=1, cstride=1) #key line
plt.title(dataSource+ " " + str(i) + "DBZ images\n"+\
"x-axis: response intensity(from 0 to 20)\n"+\
"y-axis: log_2(sigma)\n"+\
"z-axis: log_2(count)\n")
plt.xlabel('response intensity')
plt.ylabel('log2(sigma)')
fig.show()
xyz2=xyz
dataSource2 = dataSource
i2=i
##############################################################################
xyz2['X'] +=2 #in log2 scale
xyz2['Z'] +=2
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X = xyz1['X']
Y = xyz1['Y']
Z = xyz1['Z'] *1./i1
Z1 = Z
ax.plot_surface(X, np.log2(Y), np.log2(Z), rstride=1, cstride=1, cmap='jet') #key line
X = xyz2['X']
Y = xyz2['Y']
Z = xyz2['Z'] *1./i2
Z2 = Z
ax.plot_surface(X, np.log2(Y), np.log2(Z), rstride=1, cstride=1, cmap='gray') #key line
ax.plot_wireframe(X, np.log2(Y), (np.log2(Z1)-np.log2(Z2))* diffExaggeration, rstride=1, cstride=1, colors="red") #key line
ax.plot_wireframe(X, np.log2(Y), np.zeros(X.shape), colors="green")
plt.title("Blue: Averaged "+dataSource1+ " " + str(i1) + "DBZ images\n"+\
"Gray: Averaged "+dataSource2+ " " + str(i2) + "DBZ images\n"+\
"Red wireframe: " + str(diffExaggeration) + "x Difference of Blue and Gray"
"y-axis: log_2(sigma)\n"+\
"z-axis: log_2(count)\n")
plt.xlabel('response intensity')
plt.ylabel('log2(sigma)')
for ang in range(270, 360+270, 10):
ax.azim = ang
fig.savefig(outputFolder+ dataSource1+"_" +dataSource2+ str(ax.azim) + '.png')
fig.show()
#####
shutil.copyfile('/media/TOSHIBA EXT/ARMOR/python/armor/tests/' +thisScript, outputFolder + timeString+ thisScript)
| cc0-1.0 |
rvraghav93/scikit-learn | examples/gaussian_process/plot_gpc_isoprobability.py | 64 | 3049 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================================
Iso-probability lines for Gaussian Processes classification (GPC)
=================================================================
A two-dimensional classification example showing iso-probability lines for
the predicted probabilities.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Adapted to GaussianProcessClassifier:
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import DotProduct, ConstantKernel as C
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = np.array(g(X) > 0, dtype=int)
# Instanciate and fit Gaussian Process Model
kernel = C(0.1, (1e-5, np.inf)) * DotProduct(sigma_0=0.1) ** 2
gp = GaussianProcessClassifier(kernel=kernel)
gp.fit(X, y)
print("Learned kernel: %s " % gp.kernel_)
# Evaluate real function and the predicted probability
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_prob = gp.predict_proba(xx)[:, 1]
y_true = y_true.reshape((res, res))
y_prob = y_prob.reshape((res, res))
# Plot the probabilistic classification iso-values
fig = plt.figure(1)
ax = fig.gca()
ax.axes.set_aspect('equal')
plt.xticks([])
plt.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
cax = plt.imshow(y_prob, cmap=cm.gray_r, alpha=0.8,
extent=(-lim, lim, -lim, lim))
norm = plt.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = plt.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
plt.clim(0, 1)
plt.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
plt.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = plt.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = plt.contour(x1, x2, y_prob, [0.666], colors='b',
linestyles='solid')
plt.clabel(cs, fontsize=11)
cs = plt.contour(x1, x2, y_prob, [0.5], colors='k',
linestyles='dashed')
plt.clabel(cs, fontsize=11)
cs = plt.contour(x1, x2, y_prob, [0.334], colors='r',
linestyles='solid')
plt.clabel(cs, fontsize=11)
plt.show()
| bsd-3-clause |
kaichogami/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 42 | 20925 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.model_selection import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
@ignore_warnings
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = assert_warns(DeprecationWarning, estimator.decision_function, X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
| bsd-3-clause |
shangwuhencc/scikit-learn | sklearn/utils/extmath.py | 70 | 21951 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
grundgruen/zipline | zipline/gens/tradesimulation.py | 1 | 15561 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib2 import ExitStack
from logbook import Logger, Processor
from pandas.tslib import normalize_date
from zipline.utils.api_support import ZiplineAPI
from zipline.finance.trading import NoFurtherDataError
from zipline.protocol import (
BarData,
SIDData,
DATASOURCE_TYPE
)
log = Logger('Trade Simulation')
class AlgorithmSimulator(object):
EMISSION_TO_PERF_KEY_MAP = {
'minute': 'minute_perf',
'daily': 'daily_perf'
}
def __init__(self, algo, sim_params):
# ==============
# Simulation
# Param Setup
# ==============
self.sim_params = sim_params
# ==============
# Algo Setup
# ==============
self.algo = algo
self.algo_start = normalize_date(self.sim_params.first_open)
self.env = algo.trading_environment
# ==============
# Snapshot Setup
# ==============
# The algorithm's data as of our most recent event.
# We want an object that will have empty objects as default
# values on missing keys.
self.current_data = BarData()
# We don't have a datetime for the current snapshot until we
# receive a message.
self.simulation_dt = None
self.previous_dt = self.algo_start
# =============
# Logging Setup
# =============
# Processor function for injecting the algo_dt into
# user prints/logs.
def inject_algo_dt(record):
if 'algo_dt' not in record.extra:
record.extra['algo_dt'] = self.simulation_dt
self.processor = Processor(inject_algo_dt)
def transform(self, stream_in):
"""
Main generator work loop.
"""
# Initialize the mkt_close
mkt_open = self.algo.perf_tracker.market_open
mkt_close = self.algo.perf_tracker.market_close
# inject the current algo
# snapshot time to any log record generated.
with ExitStack() as stack:
stack.enter_context(self.processor)
stack.enter_context(ZiplineAPI(self.algo))
data_frequency = self.sim_params.data_frequency
self._call_before_trading_start(mkt_open)
for date, snapshot in stream_in:
expired_sids = self.env.asset_finder.lookup_expired_futures(
start=self.previous_dt, end=date)
self.previous_dt = date
self.simulation_dt = date
self.on_dt_changed(date)
# removing expired futures
for sid in expired_sids:
try:
del self.current_data[sid]
except KeyError:
continue
# If we're still in the warmup period. Use the event to
# update our universe, but don't yield any perf messages,
# and don't send a snapshot to handle_data.
if date < self.algo_start:
for event in snapshot:
if event.type == DATASOURCE_TYPE.SPLIT:
self.algo.blotter.process_split(event)
elif event.type == DATASOURCE_TYPE.TRADE:
self.update_universe(event)
self.algo.perf_tracker.process_trade(event)
elif event.type == DATASOURCE_TYPE.CUSTOM:
self.update_universe(event)
else:
messages = self._process_snapshot(
date,
snapshot,
self.algo.instant_fill,
)
# Perf messages are only emitted if the snapshot contained
# a benchmark event.
for message in messages:
yield message
# When emitting minutely, we need to call
# before_trading_start before the next trading day begins
if date == mkt_close:
if mkt_close <= self.algo.perf_tracker.last_close:
before_last_close = \
mkt_close < self.algo.perf_tracker.last_close
try:
mkt_open, mkt_close = \
self.env.next_open_and_close(mkt_close)
except NoFurtherDataError:
# If at the end of backtest history,
# skip advancing market close.
pass
if before_last_close:
self._call_before_trading_start(mkt_open)
elif data_frequency == 'daily':
next_day = self.env.next_trading_day(date)
if next_day is not None and \
next_day < self.algo.perf_tracker.last_close:
self._call_before_trading_start(next_day)
self.algo.portfolio_needs_update = True
self.algo.account_needs_update = True
self.algo.performance_needs_update = True
risk_message = self.algo.perf_tracker.handle_simulation_end()
yield risk_message
def _process_snapshot(self, dt, snapshot, instant_fill):
"""
Process a stream of events corresponding to a single datetime, possibly
returning a perf message to be yielded.
If @instant_fill = True, we delay processing of events until after the
user's call to handle_data, and we process the user's placed orders
before the snapshot's events. Note that this introduces a lookahead
bias, since the user effectively is effectively placing orders that are
filled based on trades that happened prior to the call the handle_data.
If @instant_fill = False, we process Trade events before calling
handle_data. This means that orders are filled based on trades
occurring in the next snapshot. This is the more conservative model,
and as such it is the default behavior in TradingAlgorithm.
"""
# Flags indicating whether we saw any events of type TRADE and type
# BENCHMARK. Respectively, these control whether or not handle_data is
# called for this snapshot and whether we emit a perf message for this
# snapshot.
any_trade_occurred = False
benchmark_event_occurred = False
if instant_fill:
events_to_be_processed = []
# Assign process events to variables to avoid attribute access in
# innermost loops.
#
# Done here, to allow for perf_tracker or blotter to be swapped out
# or changed in between snapshots.
perf_process_trade = self.algo.perf_tracker.process_trade
perf_process_transaction = self.algo.perf_tracker.process_transaction
perf_process_order = self.algo.perf_tracker.process_order
perf_process_benchmark = self.algo.perf_tracker.process_benchmark
perf_process_split = self.algo.perf_tracker.process_split
perf_process_dividend = self.algo.perf_tracker.process_dividend
perf_process_commission = self.algo.perf_tracker.process_commission
perf_process_close_position = \
self.algo.perf_tracker.process_close_position
blotter_process_trade = self.algo.blotter.process_trade
blotter_process_benchmark = self.algo.blotter.process_benchmark
# Containers for the snapshotted events, so that the events are
# processed in a predictable order, without relying on the sorted order
# of the individual sources.
# There is only one benchmark per snapshot, will be set to the current
# benchmark iff it occurs.
benchmark = None
# trades and customs are initialized as a list since process_snapshot
# is most often called on market bars, which could contain trades or
# custom events.
trades = []
customs = []
closes = []
# splits and dividends are processed once a day.
#
# The avoidance of creating the list every time this is called is more
# to attempt to show that this is the infrequent case of the method,
# since the performance benefit from deferring the list allocation is
# marginal. splits list will be allocated when a split occurs in the
# snapshot.
splits = None
# dividends list will be allocated when a dividend occurs in the
# snapshot.
dividends = None
for event in snapshot:
if event.type == DATASOURCE_TYPE.TRADE:
trades.append(event)
elif event.type == DATASOURCE_TYPE.BENCHMARK:
benchmark = event
elif event.type == DATASOURCE_TYPE.SPLIT:
if splits is None:
splits = []
splits.append(event)
elif event.type == DATASOURCE_TYPE.CUSTOM:
customs.append(event)
elif event.type == DATASOURCE_TYPE.DIVIDEND:
if dividends is None:
dividends = []
dividends.append(event)
elif event.type == DATASOURCE_TYPE.CLOSE_POSITION:
closes.append(event)
else:
raise log.warn("Unrecognized event=%s".format(event))
# Handle benchmark first.
#
# Internal broker implementation depends on the benchmark being
# processed first so that transactions and commissions reported from
# the broker can be injected.
if benchmark is not None:
benchmark_event_occurred = True
perf_process_benchmark(benchmark)
for txn, order in blotter_process_benchmark(benchmark):
if txn.type == DATASOURCE_TYPE.TRANSACTION:
perf_process_transaction(txn)
elif txn.type == DATASOURCE_TYPE.COMMISSION:
perf_process_commission(txn)
perf_process_order(order)
for trade in trades:
self.update_universe(trade)
any_trade_occurred = True
if instant_fill:
events_to_be_processed.append(trade)
else:
for txn, order in blotter_process_trade(trade):
if txn.type == DATASOURCE_TYPE.TRANSACTION:
perf_process_transaction(txn)
elif txn.type == DATASOURCE_TYPE.COMMISSION:
perf_process_commission(txn)
perf_process_order(order)
perf_process_trade(trade)
for custom in customs:
self.update_universe(custom)
for close in closes:
self.update_universe(close)
perf_process_close_position(close)
if splits is not None:
for split in splits:
# process_split is not assigned to a variable since it is
# called rarely compared to the other event processors.
self.algo.blotter.process_split(split)
perf_process_split(split)
if dividends is not None:
for dividend in dividends:
perf_process_dividend(dividend)
if any_trade_occurred:
new_orders = self._call_handle_data()
for order in new_orders:
perf_process_order(order)
if instant_fill:
# Now that handle_data has been called and orders have been placed,
# process the event stream to fill user orders based on the events
# from this snapshot.
for trade in events_to_be_processed:
for txn, order in blotter_process_trade(trade):
if txn is not None:
perf_process_transaction(txn)
if order is not None:
perf_process_order(order)
perf_process_trade(trade)
if benchmark_event_occurred:
return self.generate_messages(dt)
else:
return ()
def _call_handle_data(self):
"""
Call the user's handle_data, returning any orders placed by the algo
during the call.
"""
self.algo.event_manager.handle_data(
self.algo,
self.current_data,
self.simulation_dt,
)
orders = self.algo.blotter.new_orders
self.algo.blotter.new_orders = []
return orders
def _call_before_trading_start(self, dt):
dt = normalize_date(dt)
self.simulation_dt = dt
self.on_dt_changed(dt)
self.algo.before_trading_start(self.current_data)
def on_dt_changed(self, dt):
if self.algo.datetime != dt:
self.algo.on_dt_changed(dt)
def generate_messages(self, dt):
"""
Generator that yields perf messages for the given datetime.
"""
# Ensure that updated_portfolio has been called at least once for this
# dt before we emit a perf message. This is a no-op if
# updated_portfolio has already been called this dt.
self.algo.updated_portfolio()
self.algo.updated_account()
rvars = self.algo.recorded_vars
if self.algo.perf_tracker.emission_rate == 'daily':
perf_message = \
self.algo.perf_tracker.handle_market_close_daily()
perf_message['daily_perf']['recorded_vars'] = rvars
yield perf_message
elif self.algo.perf_tracker.emission_rate == 'minute':
# close the minute in the tracker, and collect the daily message if
# the minute is the close of the trading day
minute_message, daily_message = \
self.algo.perf_tracker.handle_minute_close(dt)
# collect and yield the minute's perf message
minute_message['minute_perf']['recorded_vars'] = rvars
yield minute_message
# if there was a daily perf message, collect and yield it
if daily_message:
daily_message['daily_perf']['recorded_vars'] = rvars
yield daily_message
def update_universe(self, event):
"""
Update the universe with new event information.
"""
# Update our knowledge of this event's sid
# rather than use if event.sid in ..., just trying
# and handling the exception is significantly faster
try:
sid_data = self.current_data[event.sid]
except KeyError:
sid_data = self.current_data[event.sid] = SIDData(event.sid)
sid_data.__dict__.update(event.__dict__)
| apache-2.0 |
kdebrab/pandas | pandas/tests/arrays/categorical/test_algos.py | 4 | 4074 | import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
@pytest.mark.parametrize('ordered', [True, False])
@pytest.mark.parametrize('categories', [
['b', 'a', 'c'],
['a', 'b', 'c', 'd'],
])
def test_factorize(categories, ordered):
cat = pd.Categorical(['b', 'b', 'a', 'c', None],
categories=categories,
ordered=ordered)
labels, uniques = pd.factorize(cat)
expected_labels = np.array([0, 0, 1, 2, -1], dtype=np.intp)
expected_uniques = pd.Categorical(['b', 'a', 'c'],
categories=categories,
ordered=ordered)
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort():
cat = pd.Categorical(['b', 'b', None, 'a'])
labels, uniques = pd.factorize(cat, sort=True)
expected_labels = np.array([1, 1, -1, 0], dtype=np.intp)
expected_uniques = pd.Categorical(['a', 'b'])
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort_ordered():
cat = pd.Categorical(['b', 'b', None, 'a'],
categories=['c', 'b', 'a'],
ordered=True)
labels, uniques = pd.factorize(cat, sort=True)
expected_labels = np.array([0, 0, -1, 1], dtype=np.intp)
expected_uniques = pd.Categorical(['b', 'a'],
categories=['c', 'b', 'a'],
ordered=True)
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_isin_cats():
# GH2003
cat = pd.Categorical(["a", "b", np.nan])
result = cat.isin(["a", np.nan])
expected = np.array([True, False, True], dtype=bool)
tm.assert_numpy_array_equal(expected, result)
result = cat.isin(["a", "c"])
expected = np.array([True, False, False], dtype=bool)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], pd.Series(), np.array([])])
def test_isin_empty(empty):
s = pd.Categorical(["a", "b"])
expected = np.array([False, False], dtype=bool)
result = s.isin(empty)
tm.assert_numpy_array_equal(expected, result)
class TestTake(object):
# https://github.com/pandas-dev/pandas/issues/20664
def test_take_warns(self):
cat = pd.Categorical(['a', 'b'])
with tm.assert_produces_warning(FutureWarning):
cat.take([0, -1])
def test_take_positive_no_warning(self):
cat = pd.Categorical(['a', 'b'])
with tm.assert_produces_warning(None):
cat.take([0, 0])
def test_take_bounds(self, allow_fill):
# https://github.com/pandas-dev/pandas/issues/20664
cat = pd.Categorical(['a', 'b', 'a'])
with pytest.raises(IndexError):
cat.take([4, 5], allow_fill=allow_fill)
def test_take_empty(self, allow_fill):
# https://github.com/pandas-dev/pandas/issues/20664
cat = pd.Categorical([], categories=['a', 'b'])
with pytest.raises(IndexError):
cat.take([0], allow_fill=allow_fill)
def test_positional_take(self, ordered):
cat = pd.Categorical(['a', 'a', 'b', 'b'], categories=['b', 'a'],
ordered=ordered)
result = cat.take([0, 1, 2], allow_fill=False)
expected = pd.Categorical(['a', 'a', 'b'], categories=cat.categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_positional_take_unobserved(self, ordered):
cat = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'],
ordered=ordered)
result = cat.take([1, 0], allow_fill=False)
expected = pd.Categorical(['b', 'a'], categories=cat.categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
| bsd-3-clause |
antoinecarme/pyaf | tests/bugs/issue_106/insurance_exog.py | 1 | 1208 | import numpy as np
import pandas as pd
import pyaf.ForecastEngine as autof
# example from https://otexts.org/fpp2/lagged-predictors.html
df = pd.read_csv("https://raw.githubusercontent.com/antoinecarme/TimeSeriesData/master/fpp2/insurance.csv")
df.info()
(lTimeVar , lSigVar , lExogVar) = ("Index", "Quotes" , "TV.advert")
df_sig = df[[lTimeVar , lSigVar]]
df_exog = df[[lTimeVar , lExogVar]] # need time here
H = 4
df_sig.info()
df_exog.info()
lEngine = autof.cForecastEngine()
lEngine.mOptions.set_active_autoregressions(['ARX'])
lExogenousData = (df_exog , [lExogVar])
lEngine.train(df_sig , lTimeVar , lSigVar, H, lExogenousData);
lEngine.getModelInfo();
lEngine.standardPlots(name = "outputs/insurance")
dfapp_in = df_sig.copy();
dfapp_in.tail()
dfapp_out = lEngine.forecast(dfapp_in, H);
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[lTimeVar , lSigVar, lSigVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H).values);
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
| bsd-3-clause |
cigroup-ol/metaopt | docs/_extensions/gen_gallery.py | 2 | 4135 | # -*- coding: UTF-8 -*-
# generate a thumbnail gallery of examples
template = """\
{%% extends "layout.html" %%}
{%% set title = "Thumbnail gallery" %%}
{%% block body %%}
<h3>Click on any image to see full size image and source code</h3>
<br/>
<li><a class="reference internal" href="#">Gallery</a><ul>
%s
</ul>
</li>
%s
{%% endblock %%}
"""
import os, glob, re, sys, warnings
import matplotlib.image as image
multiimage = re.compile('(.*?)(_\d\d){1,2}')
def make_thumbnail(args):
image.thumbnail(args[0], args[1], 0.3)
def out_of_date(original, derived):
return (not os.path.exists(derived) or
os.stat(derived).st_mtime < os.stat(original).st_mtime)
def gen_gallery(app, doctree):
if app.builder.name != 'html':
return
outdir = app.builder.outdir
rootdir = 'plot_directive/mpl_examples'
# images we want to skip for the gallery because they are an unusual
# size that doesn't layout well in a table, or because they may be
# redundant with other images or uninteresting
skips = set([
'mathtext_examples',
'matshow_02',
'matshow_03',
'matplotlib_icon',
])
thumbnails = {}
rows = []
toc_rows = []
link_template = """\
<a href="%s"><img src="%s" border="0" alt="%s"/></a>
"""
header_template = """<div class="section" id="%s">\
<h4>%s<a class="headerlink" href="#%s" title="Permalink to this headline">¶</a></h4>"""
toc_template = """\
<li><a class="reference internal" href="#%s">%s</a></li>"""
dirs = ('api', 'pylab_examples', 'mplot3d', 'widgets', 'axes_grid' )
for subdir in dirs :
rows.append(header_template % (subdir, subdir, subdir))
toc_rows.append(toc_template % (subdir, subdir))
origdir = os.path.join('build', rootdir, subdir)
thumbdir = os.path.join(outdir, rootdir, subdir, 'thumbnails')
if not os.path.exists(thumbdir):
os.makedirs(thumbdir)
data = []
for filename in sorted(glob.glob(os.path.join(origdir, '*.png'))):
if filename.endswith("hires.png"):
continue
path, filename = os.path.split(filename)
basename, ext = os.path.splitext(filename)
if basename in skips:
continue
# Create thumbnails based on images in tmpdir, and place
# them within the build tree
orig_path = str(os.path.join(origdir, filename))
thumb_path = str(os.path.join(thumbdir, filename))
if out_of_date(orig_path, thumb_path) or True:
thumbnails[orig_path] = thumb_path
m = multiimage.match(basename)
if m is not None:
basename = m.group(1)
data.append((subdir, basename,
os.path.join(rootdir, subdir, 'thumbnails', filename)))
for (subdir, basename, thumbfile) in data:
if thumbfile is not None:
link = 'examples/%s/%s.html'%(subdir, basename)
rows.append(link_template%(link, thumbfile, basename))
if len(data) == 0:
warnings.warn("No thumbnails were found in %s" % subdir)
# Close out the <div> opened up at the top of this loop
rows.append("</div>")
content = template % ('\n'.join(toc_rows),
'\n'.join(rows))
# Only write out the file if the contents have actually changed.
# Otherwise, this triggers a full rebuild of the docs
gallery_path = os.path.join(app.builder.srcdir, '_templates', 'gallery.html')
if os.path.exists(gallery_path):
fh = file(gallery_path, 'r')
regenerate = fh.read() != content
fh.close()
else:
regenerate = True
if regenerate:
fh = file(gallery_path, 'w')
fh.write(content)
fh.close()
for key in app.builder.status_iterator(
thumbnails.iterkeys(), "generating thumbnails... ",
length=len(thumbnails)):
image.thumbnail(key, thumbnails[key], 0.3)
def setup(app):
app.connect('env-updated', gen_gallery)
| bsd-3-clause |
jorge2703/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
mayblue9/bokeh | examples/app/stock_applet/stock_app_simple.py | 43 | 12408 | """
This file demonstrates a bokeh applet, which can either be viewed
directly on a bokeh-server, or embedded into a flask application.
See the README.md file in this directory for instructions on running.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
from os import listdir
from os.path import dirname, join, splitext
import numpy as np
import pandas as pd
from bokeh.models import ColumnDataSource, Plot
from bokeh.plotting import figure, curdoc
from bokeh.properties import String, Instance
from bokeh.server.app import bokeh_app
from bokeh.server.utils.plugins import object_page
from bokeh.models.widgets import (HBox, VBox, VBoxForm, PreText,
Select, AppHBox, AppVBox, AppVBoxForm)
from bokeh.simpleapp import simpleapp
select1 = Select(name='ticker1', value='AAPL', options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO'])
select2 = Select(name='ticker2', value='GOOG', options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO'])
@simpleapp(select1, select2)
def stock(ticker1, ticker2):
pretext = PreText(text="", width=500)
df = get_data(ticker1, ticker2)
source = ColumnDataSource(data=df)
source.tags = ['main_source']
p = figure(
title="%s vs %s" % (ticker1, ticker2),
plot_width=400, plot_height=400,
tools="pan,wheel_zoom,box_select,reset",
title_text_font_size="10pt",
)
p.circle(ticker1 + "_returns", ticker2 + "_returns",
size=2,
nonselection_alpha=0.02,
source=source
)
stats = df.describe()
pretext.text = str(stats)
row1 = HBox(children=[p, pretext])
hist1 = hist_plot(df, ticker1)
hist2 = hist_plot(df, ticker2)
row2 = HBox(children=[hist1, hist2])
line1 = line_plot(ticker1, source)
line2 = line_plot(ticker2, source, line1.x_range)
output = VBox(children=[row1, row2, line1, line2])
return output
stock.route("/bokeh/stocks/")
@simpleapp(select1, select2)
def stock2(ticker1, ticker2):
pretext = PreText(text="", width=500)
df = get_data(ticker1, ticker2)
source = ColumnDataSource(data=df)
source.tags = ['main_source']
p = figure(
title="%s vs %s" % (ticker1, ticker2),
plot_width=400, plot_height=400,
tools="pan,wheel_zoom,box_select,reset",
title_text_font_size="10pt",
)
p.circle(ticker1 + "_returns", ticker2 + "_returns",
size=2,
nonselection_alpha=0.02,
source=source
)
stats = df.describe()
pretext.text = str(stats)
hist1 = hist_plot(df, ticker1)
hist2 = hist_plot(df, ticker2)
line1 = line_plot(ticker1, source)
line2 = line_plot(ticker2, source, line1.x_range)
return dict(scatterplot=p,
statstext=pretext,
hist1=hist1,
hist2=hist2,
line1=line1,
line2=line2)
@stock2.layout
def stock2_layout(app):
widgets = AppVBoxForm(app=app, children=['ticker1', 'ticker2'])
row1 = AppHBox(app=app, children=['scatterplot', 'statstext'])
row2 = AppHBox(app=app, children=['hist1', 'hist2'])
all_plots = AppVBox(app=app, children=[row1, row2, 'line1', 'line2'])
app = AppHBox(app=app, children=[widgets, all_plots])
return app
@stock2.update(['ticker1', 'ticker2'])
def stock2_update_input(ticker1, ticker2, app):
return stock2(ticker1, ticker2)
@stock2.update([({'tags' : 'main_source'}, ['selected'])])
def stock2_update_selection(ticker1, ticker2, app):
source = app.select_one({'tags' : 'main_source'})
df = get_data(ticker1, ticker2)
if source.selected:
selected_df = df.iloc[source.selected['1d']['indices'], :]
else:
selected_df = df
stats_text = app.objects['statstext']
stats_text.text = str(selected_df.describe())
return {
'hist1': hist_plot(df, ticker1, selected_df=selected_df),
'hist2': hist_plot(df, ticker2, selected_df=selected_df),
'statstext': stats_text,
}
stock2.route("/bokeh/stocks2/")
def hist_plot(df, ticker, selected_df=None):
if selected_df is None:
selected_df = df
global_hist, global_bins = np.histogram(df[ticker + "_returns"], bins=50)
hist, bins = np.histogram(selected_df[ticker + "_returns"], bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
start = global_bins.min()
end = global_bins.max()
top = hist.max()
p = figure(
title="%s hist" % ticker,
plot_width=500, plot_height=200,
tools="",
title_text_font_size="10pt",
x_range=[start, end],
y_range=[0, top],
)
p.rect(center, hist / 2.0, width, hist)
return p
def line_plot(ticker, source, x_range=None):
p = figure(
title=ticker,
x_range=x_range,
x_axis_type='datetime',
plot_width=1000, plot_height=200,
title_text_font_size="10pt",
tools="pan,wheel_zoom,box_select,reset"
)
p.circle(
'date', ticker,
size=2,
source=source,
nonselection_alpha=0.02
)
return p
# build up list of stock data in the daily folder
data_dir = join(dirname(__file__), "daily")
try:
tickers = listdir(data_dir)
except OSError as e:
print('Stock data not available, see README for download instructions.')
raise e
tickers = [splitext(x)[0].split("table_")[-1] for x in tickers]
# cache stock data as dict of pandas DataFrames
pd_cache = {}
def get_ticker_data(ticker):
fname = join(data_dir, "table_%s.csv" % ticker.lower())
data = pd.read_csv(
fname,
names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'],
header=False,
parse_dates=['date']
)
data = data.set_index('date')
data = pd.DataFrame({ticker: data.c, ticker + "_returns": data.c.diff()})
return data
def get_data(ticker1, ticker2):
if pd_cache.get((ticker1, ticker2)) is not None:
return pd_cache.get((ticker1, ticker2))
# only append columns if it is the same ticker
if ticker1 != ticker2:
data1 = get_ticker_data(ticker1)
data2 = get_ticker_data(ticker2)
data = pd.concat([data1, data2], axis=1)
else:
data = get_ticker_data(ticker1)
data = data.dropna()
pd_cache[(ticker1, ticker2)] = data
return data
# class StockApp(VBox):
# extra_generated_classes = [["StockApp", "StockApp", "VBox"]]
# jsmodel = "VBox"
# # text statistics
# pretext = Instance(PreText)
# # plots
# plot = Instance(Plot)
# line_plot1 = Instance(Plot)
# line_plot2 = Instance(Plot)
# hist1 = Instance(Plot)
# hist2 = Instance(Plot)
# # data source
# source = Instance(ColumnDataSource)
# # layout boxes
# mainrow = Instance(HBox)
# histrow = Instance(HBox)
# statsbox = Instance(VBox)
# # inputs
# ticker1 = String(default="AAPL")
# ticker2 = String(default="GOOG")
# ticker1_select = Instance(Select)
# ticker2_select = Instance(Select)
# input_box = Instance(VBoxForm)
# def __init__(self, *args, **kwargs):
# super(StockApp, self).__init__(*args, **kwargs)
# self._dfs = {}
# @classmethod
# def create(cls):
# """
# This function is called once, and is responsible for
# creating all objects (plots, datasources, etc)
# """
# # create layout widgets
# obj = cls()
# # create input widgets
# obj.make_inputs()
# # outputs
# obj.pretext = PreText(text="", width=500)
# obj.make_source()
# obj.make_plots()
# obj.make_stats()
# # layout
# obj.set_children()
# return obj
# def make_inputs(self):
# self.ticker1_select = Select(
# name='ticker1',
# value='AAPL',
# options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
# )
# self.ticker2_select = Select(
# name='ticker2',
# value='GOOG',
# options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
# )
# @property
# def selected_df(self):
# pandas_df = self.df
# selected = self.source.selected
# if selected:
# pandas_df = pandas_df.iloc[selected, :]
# return pandas_df
# def make_source(self):
# self.source = ColumnDataSource(data=self.df)
# def line_plot(self, ticker, x_range=None):
# p = figure(
# title=ticker,
# x_range=x_range,
# x_axis_type='datetime',
# plot_width=1000, plot_height=200,
# title_text_font_size="10pt",
# tools="pan,wheel_zoom,box_select,reset"
# )
# p.circle(
# 'date', ticker,
# size=2,
# source=self.source,
# nonselection_alpha=0.02
# )
# return p
# def hist_plot(self, ticker):
# global_hist, global_bins = np.histogram(self.df[ticker + "_returns"], bins=50)
# hist, bins = np.histogram(self.selected_df[ticker + "_returns"], bins=50)
# width = 0.7 * (bins[1] - bins[0])
# center = (bins[:-1] + bins[1:]) / 2
# start = global_bins.min()
# end = global_bins.max()
# top = hist.max()
# p = figure(
# title="%s hist" % ticker,
# plot_width=500, plot_height=200,
# tools="",
# title_text_font_size="10pt",
# x_range=[start, end],
# y_range=[0, top],
# )
# p.rect(center, hist / 2.0, width, hist)
# return p
# def make_plots(self):
# ticker1 = self.ticker1
# ticker2 = self.ticker2
# p = figure(
# title="%s vs %s" % (ticker1, ticker2),
# plot_width=400, plot_height=400,
# tools="pan,wheel_zoom,box_select,reset",
# title_text_font_size="10pt",
# )
# p.circle(ticker1 + "_returns", ticker2 + "_returns",
# size=2,
# nonselection_alpha=0.02,
# source=self.source
# )
# self.plot = p
# self.line_plot1 = self.line_plot(ticker1)
# self.line_plot2 = self.line_plot(ticker2, self.line_plot1.x_range)
# self.hist_plots()
# def hist_plots(self):
# ticker1 = self.ticker1
# ticker2 = self.ticker2
# self.hist1 = self.hist_plot(ticker1)
# self.hist2 = self.hist_plot(ticker2)
# def set_children(self):
# self.children = [self.mainrow, self.histrow, self.line_plot1, self.line_plot2]
# self.mainrow.children = [self.input_box, self.plot, self.statsbox]
# self.input_box.children = [self.ticker1_select, self.ticker2_select]
# self.histrow.children = [self.hist1, self.hist2]
# self.statsbox.children = [self.pretext]
# def input_change(self, obj, attrname, old, new):
# if obj == self.ticker2_select:
# self.ticker2 = new
# if obj == self.ticker1_select:
# self.ticker1 = new
# self.make_source()
# self.make_plots()
# self.set_children()
# curdoc().add(self)
# def setup_events(self):
# super(StockApp, self).setup_events()
# if self.source:
# self.source.on_change('selected', self, 'selection_change')
# if self.ticker1_select:
# self.ticker1_select.on_change('value', self, 'input_change')
# if self.ticker2_select:
# self.ticker2_select.on_change('value', self, 'input_change')
# def make_stats(self):
# stats = self.selected_df.describe()
# self.pretext.text = str(stats)
# def selection_change(self, obj, attrname, old, new):
# self.make_stats()
# self.hist_plots()
# self.set_children()
# curdoc().add(self)
# @property
# def df(self):
# return get_data(self.ticker1, self.ticker2)
# # The following code adds a "/bokeh/stocks/" url to the bokeh-server. This URL
# # will render this StockApp. If you don't want serve this applet from a Bokeh
# # server (for instance if you are embedding in a separate Flask application),
# # then just remove this block of code.
# @bokeh_app.route("/bokeh/stocks/")
# @object_page("stocks")
# def make_object():
# app = StockApp.create()
# return app
| bsd-3-clause |
mhdella/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
adamcandy/qgis-plugins-meshing | dev/old/shape/slowExtract.py | 3 | 5318 | import shapefile
##########################################################################
#
# QGIS-meshing plugins.
#
# Copyright (C) 2012-2013 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a
# full list of copyright holders.
#
# Dr Adam S. Candy, [email protected]
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
##########################################################################
from shapely.geometry import *
import sys
import matplotlib.pyplot as pyplot
def checkIfPointIsInPointList(points, pt):
for p in points:
if p[0]==pt.x and p[1]== pt.y:
return True
return False
"""
This function checks if the given point is on the boundary.
@param point : specifies the point which has to be checked
@param bouds : specifies all the bpundary objects
@return : retucheckPointOnBoundary(p, boundaryPointList)rns true iff the point in on the boundary lines
print coords
"""
def checkPointOnBoundary(point, boundaryPoints):
for points in boundaryPoints:
if checkIfPointIsInPointList(points, p):
return True
#for s in bounds.shapes():
numberOfPoints = len(points)
for i in range(numberOfPoints-1):
line = LineString([(points[i][0],points[i][1]),(points[i+1][0], points[i+1][1])])
if (line.contains(point)):
return True
return False
"""
This method returns a list of all the boundary points
on the boundaries given. This method also returns polygons
consited of the given shapes
@param bounds : specfies the boundary shapes
@return : returns a tuple containing the polygons shapes
of the boundary and the exterior points list for
them
"""
def getBoundaryPointsList(bounds):
shapes = bounds.shapes()
pointsList = []
for i in range(len(shapes)):
pointsList.append(shapes[i].points)
polygons = []
for j in range(len(pointsList)):
polygons.append(Polygon([pointsList[j][i] for i in range(len(pointsList[j]))]))
return (polygons,pointsList)
def check_point_within_boundary(p, boundaries):
for poly in boundaries :
if p.within(poly):
return True
return False
def writeShapeFile(points, filepath) :
#begin the instance pf writer class
w = shapefile.Writer()
#ensure shape and records the balance
w.autobalance = 1
i = 0
for l in points:
pList = []
pList.append(l)
if len(l)==1 :
w.point(pList[0][0],pList[0][1])
w.field("%d_FLD"%i,"C","40")
i+=1
elif len(l)==2 :
w.line(parts = pList)
w.field("%d_FLD"%i,"C","40")
i+=1
else :
w.poly(parts = pList)
w.field("%d_FLD"%i,"C","40")
i += 1
w.save(filepath)
print("Number of shapes Written %d" %i)
#check if the number of command line arguments are
#ok
assert len(sys.argv)==4, "Incorrect Nu mber of Arguments passed"
"""
Sets the read and the write file stream according to
the command line arguments given.
The first argument specifies which shape file the user
wants to specify the boundaries of
The second arguments specifies the boundary polygon
The third argument specifies the file path to which the
new shape has top be written
"""
readPath = sys.argv[1]
boundaryPath = sys.argv[2]
writePath = sys.argv[3]
#input stream for the given shape
sf = shapefile.Reader(readPath)
#input stream of the boundaries
#bounds = shapefile.Reader(boundaryPath)
#shapes contained in the given file
shapes = sf.shapes();
#checks that there shouldonly be one boudary
#assert len(bounds.shapes())==1, "More than one shape in the boundary. Currently only one shape can be specified as a boundary"
#boundary = bounds.shapes[0]
boundary = shapefile.Reader(boundaryPath)
# Create list of the points defined in the .shp file.
pointslist = []
for i in range(len(shapes)):
# Check datapoint is valid.
pointslist.append(shapes[i].points)
boundaryPolygons, boundaryPointList = getBoundaryPointsList(boundary)
i = -1
c = 1
points = []
list_to_save = []
for shape in shapes:
i += 1
x =[]; y=[]
pList = []
for point in pointslist[i]:
p = Point(point[0],point[1])
if check_point_within_boundary(p, boundaryPolygons) or checkPointOnBoundary(p, boundaryPointList) :
x.append(p.x)
y.append(p.y)
points.append(p)
pList.append(point)
c +=1
pyplot.plot(x,y)
if len(pList)>0 :
list_to_save.append(pList)
for p in points:
print(p)
if (len(list_to_save)>0) : writeShapeFile(list_to_save,writePath)
x=[]
y=[]
for p in boundaryPointList :
for p2 in p :
x.append(p2[0])
y.append(p2[1])
pyplot.plot(x,y)
pyplot.xlim(min(x)-1,max(x)+1)
pyplot.ylim(min(y)-1,max(y)+1)
pyplot.show()
| lgpl-2.1 |
RobertABT/heightmap | build/matplotlib/examples/user_interfaces/embedding_in_gtk2.py | 9 | 1452 | #!/usr/bin/env python
"""
show how to add a matplotlib FigureCanvasGTK or FigureCanvasGTKAgg widget and
a toolbar to a gtk.Window
"""
import gtk
from matplotlib.figure import Figure
from numpy import arange, sin, pi
# uncomment to select /GTK/GTKAgg/GTKCairo
#from matplotlib.backends.backend_gtk import FigureCanvasGTK as FigureCanvas
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
#from matplotlib.backends.backend_gtkcairo import FigureCanvasGTKCairo as FigureCanvas
# or NavigationToolbar for classic
#from matplotlib.backends.backend_gtk import NavigationToolbar2GTK as NavigationToolbar
from matplotlib.backends.backend_gtkagg import NavigationToolbar2GTKAgg as NavigationToolbar
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
win = gtk.Window()
win.connect("destroy", lambda x: gtk.main_quit())
win.set_default_size(400,300)
win.set_title("Embedding in GTK")
vbox = gtk.VBox()
win.add(vbox)
fig = Figure(figsize=(5,4), dpi=100)
ax = fig.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
ax.plot(t,s)
canvas = FigureCanvas(fig) # a gtk.DrawingArea
vbox.pack_start(canvas)
toolbar = NavigationToolbar(canvas, win)
vbox.pack_start(toolbar, False, False)
def on_key_event(event):
print('you pressed %s'%event.key)
key_press_handler(event, canvas, toolbar)
canvas.mpl_connect('key_press_event', on_key_event)
win.show_all()
gtk.main()
| mit |
bradleyhd/netsim | speedup_params.py | 1 | 2674 | import argparse, json, logging, random, datetime, os.path, pickle, requests
import networkx as nx
import time as time
import matplotlib.pyplot as plt
import numpy as np
from networkx.readwrite import json_graph
from mapsim.simulation.sim import Sim
from copy import deepcopy
parser = argparse.ArgumentParser(description='Draws a graph.')
parser.add_argument('graph_file', help='the name of the graph file')
parser.add_argument('--saveas', help='the name of the output file')
args = parser.parse_args()
smoothing_factors = [0.3, 0.5, 0.7, 0.9]
decay_factors = [1]
# smoothing_factors = [0.1]
# decay_factors = [0.9]
results = []
def run(sim, smoothing_factor, decay_factor, routes):
sim._config['graph_weight_smoothing_factor'] = smoothing_factor
sim._config['graph_weight_decay_factor'] = decay_factor
# --
# Run without adaptive routing
# --
sim._config['adaptive_routing'] = False
res = requests.get('%s/restart/%s/%s' % (config['routing_server_url'], smoothing_factor, decay_factor))
sim.setup()
history1 = sim.run()
cars1 = []
for car in sim.cars:
cars1.append({
'id': car.id,
'driving_time': car.total_driving_time,
'done': car.done
})
# --
# Run with adaptive routing
# --
sim._config['adaptive_routing'] = True
res = requests.get('%s/restart/%s/%s' % (config['routing_server_url'], smoothing_factor, decay_factor))
sim.setup()
history2 = sim.run()
cars2 = []
for car in sim.cars:
cars2.append({
'id': car.id,
'driving_time': car.total_driving_time,
'done': car.done
})
# --
# Calculate speedup
# --
xs = []
ys = []
for i in range(len(cars2)):
if cars1[i]['done'] and cars2[i]['done']:
speedup = (cars1[i]['driving_time'] - cars2[i]['driving_time'])
xs.append(i);
ys.append(speedup)
print('Smoothing: %s Decay: %s' % (smoothing_factor, decay_factor))
results.append((smoothing_factor, decay_factor, np.mean(ys)))
try:
print('Mean: %s Median: %s' % (np.mean(ys), np.median(ys)))
except:
pass
if __name__ == '__main__':
config = {}
with open('config.json', 'r') as file:
config = json.load(file)
config['graph_file'] = 'data/%s.graph' % args.graph_file
sim_data = {}
with open('data/' + args.graph_file + '.sim', 'rb') as file:
sim_data = pickle.load(file)
res = requests.get('http://localhost:5000/routes/generate/%d' % (config['num_cars']))
routes = res.json()
sim = Sim(config, sim_data['segments'], routes)
for s in smoothing_factors:
for d in decay_factors:
run(sim, s, d, routes)
f = open('results.txt', 'w')
f.write(json.dumps(results))
f.close()
| gpl-3.0 |
compops/gpo-joe2015 | scripts-paper/example1-spsa.py | 2 | 3787 | ##############################################################################
##############################################################################
# Estimating the volatility of synthetic data
# using a stochastic volatility (SV) model with Gaussian log-returns.
#
# The SV model is inferred using the SPSA algorithm.
#
# For more details, see https://github.com/compops/gpo-abc2015
#
# (c) 2016 Johan Dahlin
# liu (at) johandahlin.com
#
##############################################################################
##############################################################################
import sys
sys.path.insert(0, '/media/sf_home/src/gpo-abc2015')
# Setup files
output_file = 'results/example1/example1-spsa'
# Load packages and helpers
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from state import smc
from para import ml_spsa
from models import hwsv_4parameters
from misc.portfolio import ensure_dir
# Set the seed for re-producibility
np.random.seed(87655678)
##############################################################################
# Arrange the data structures
##############################################################################
sm = smc.smcSampler()
ml = ml_spsa.stMLspsa()
##############################################################################
# Setup the system
##############################################################################
sys = hwsv_4parameters.ssm()
sys.par = np.zeros((sys.nPar, 1))
sys.par[0] = 0.20
sys.par[1] = 0.96
sys.par[2] = 0.15
sys.par[3] = 0.00
sys.T = 500
sys.xo = 0.0
sys.version = "standard"
##############################################################################
# Generate data
##############################################################################
sys.generateData(
fileName='data/hwsv_4parameters_syntheticT500.csv', order="xy")
##############################################################################
# Setup the parameters
##############################################################################
th = hwsv_4parameters.ssm()
th.nParInference = 3
th.copyData(sys)
th.version = "standard"
##############################################################################
# Setup the SMC algorithm
##############################################################################
sm.filter = sm.bPF
sm.nPart = 1000
sm.genInitialState = True
sm.xo = sys.xo
th.xo = sys.xo
##############################################################################
# Setup the SPSA algorithm
##############################################################################
ml.a = 0.001
ml.c = 0.30
ml.maxIter = 350
ml.initPar = np.array([0.50, 0.95, 0.50])
##############################################################################
# SPSA using the Particle filter
##############################################################################
# Run the SPSA routine
ml.bayes(sm, sys, th)
# Write output for plotting
out = np.hstack((ml.th, ml.ll))
out = out.transpose()
#############################################################################
# Write results to file
##############################################################################
ensure_dir(output_file + '.csv')
# Model parameters
fileOut = pd.DataFrame(out)
fileOut.to_csv(output_file + '-model.csv')
##############################################################################
##############################################################################
# End of file
##############################################################################
##############################################################################
| mit |
aflaxman/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 9 | 2365 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
# fit the model and get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
# plot separating hyperplanes and samples
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired, edgecolors='k')
plt.legend()
# plot the decision functions for both classifiers
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# create grid to evaluate model
xx = np.linspace(xlim[0], xlim[1], 30)
yy = np.linspace(ylim[0], ylim[1], 30)
YY, XX = np.meshgrid(yy, xx)
xy = np.vstack([XX.ravel(), YY.ravel()]).T
# get the separating hyperplane
Z = clf.decision_function(xy).reshape(XX.shape)
# plot decision boundary and margins
a = ax.contour(XX, YY, Z, colors='k', levels=[0], alpha=0.5, linestyles=['-'])
# get the separating hyperplane for weighted classes
Z = wclf.decision_function(xy).reshape(XX.shape)
# plot decision boundary and margins for weighted classes
b = ax.contour(XX, YY, Z, colors='r', levels=[0], alpha=0.5, linestyles=['-'])
plt.legend([a.collections[0], b.collections[0]], ["non weighted", "weighted"],
loc="upper right")
| bsd-3-clause |
bucricket/projectMASviirs | processviirs/processVIIRSorig.py | 1 | 149212 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 6 08:54:48 2017
@author: mschull
"""
import os
import datetime
import pandas as pd
import numpy as np
import glob
import h5py
from pyresample import kd_tree, geometry
from pyresample import utils
import numpy.ma as ma
from osgeo import gdal,osr
import shutil
import gzip
import ephem
import subprocess
from osgeo.gdalconst import GA_ReadOnly
from joblib import Parallel, delayed
import time as timer
from pyresample.ewa import ll2cr, fornav
import argparse
import warnings
import sqlite3
#from .downloadData import runProcess
warnings.filterwarnings("ignore",category =RuntimeWarning)
#========utility functions=====================================================
def folders(base):
data_path = os.path.abspath(os.path.join(base,os.pardir,'VIIRS_DATA'))
if not os.path.exists(data_path):
os.makedirs(data_path)
processing_path = os.path.join(base,"PROCESSING")
if not os.path.exists(processing_path):
os.makedirs(processing_path)
static_path = os.path.join(base,"STATIC")
if not os.path.exists(static_path):
os.makedirs(static_path)
tile_base_path = os.path.join(base,"TILES")
if not os.path.exists(tile_base_path):
os.makedirs(tile_base_path)
grid_I5_path = os.path.join(processing_path,'grid_i5_data')
if not os.path.exists(grid_I5_path):
os.makedirs(grid_I5_path)
grid_I5_temp_path = os.path.join(grid_I5_path,'temp_i5_data')
if not os.path.exists(grid_I5_temp_path):
os.makedirs(grid_I5_temp_path)
agg_I5_path = os.path.join(processing_path,'agg_i5_data')
if not os.path.exists(agg_I5_path):
os.makedirs(agg_I5_path)
cloud_grid = os.path.join(processing_path,'grid_CM')
if not os.path.exists(cloud_grid):
os.makedirs(cloud_grid)
agg_cloud_path = os.path.join(cloud_grid,'agg_cloud_data')
if not os.path.exists(agg_cloud_path):
os.makedirs(agg_cloud_path)
temp_cloud_data = os.path.join(cloud_grid,'temp_cloud_data')
if not os.path.exists(temp_cloud_data):
os.makedirs(temp_cloud_data)
calc_rnet_path = os.path.join(processing_path,'CALC_RNET')
if not os.path.exists(calc_rnet_path):
os.makedirs(calc_rnet_path)
overpass_correction_path = os.path.join(processing_path,"overpass_corr")
if not os.path.exists(overpass_correction_path):
os.makedirs(overpass_correction_path)
CFSR_path = os.path.join(static_path,"CFSR")
if not os.path.exists(CFSR_path):
os.makedirs(CFSR_path)
fsun_trees_path = os.path.join(processing_path,'FSUN_TREES')
if not os.path.exists(fsun_trees_path):
os.makedirs(fsun_trees_path)
rnet_tile_path = os.path.join(calc_rnet_path,'tiles')
if not os.path.exists(rnet_tile_path):
os.makedirs(rnet_tile_path)
dtrad_path = os.path.join(processing_path,'DTRAD_PREDICTION')
if not os.path.exists(dtrad_path):
os.makedirs(dtrad_path)
out = {'grid_I5_path':grid_I5_path,'grid_I5_temp_path':grid_I5_temp_path,
'agg_I5_path':agg_I5_path,'data_path':data_path,
'cloud_grid': cloud_grid,'temp_cloud_data':temp_cloud_data,
'agg_cloud_path':agg_cloud_path,'processing_path':processing_path,
'static_path':static_path,'tile_base_path':tile_base_path,
'overpass_correction_path':overpass_correction_path,
'CFSR_path':CFSR_path,'calc_rnet_path':calc_rnet_path,
'fsun_trees_path':fsun_trees_path,'rnet_tile_path':rnet_tile_path}
return out
base = os.getcwd()
Folders = folders(base)
grid_I5_path = Folders['grid_I5_path']
grid_I5_temp_path = Folders['grid_I5_temp_path']
agg_I5_path = Folders['agg_I5_path']
data_path = Folders['data_path']
cloud_grid = Folders['cloud_grid']
cloud_temp_path = Folders['temp_cloud_data']
agg_cloud_path = Folders['agg_cloud_path']
processing_path = Folders['processing_path']
static_path = Folders['static_path']
tile_base_path = Folders['tile_base_path']
overpass_corr_path = Folders['overpass_correction_path']
CFSR_path = Folders['CFSR_path']
calc_rnet_path = Folders['calc_rnet_path']
fsun_trees_path = Folders['fsun_trees_path']
rnet_tile_path = Folders['rnet_tile_path']
def tile2latlon(tile):
row = tile/24
col = tile-(row*24)
# find lower left corner
lat= (75.-row*15.)-15.
lon=(col*15.-180.)-15.
return [lat,lon]
def writeArray2Tiff(data,res,UL,inProjection,outfile,outFormat):
xres = res[0]
yres = res[1]
ysize = data.shape[0]
xsize = data.shape[1]
ulx = UL[0] #- (xres / 2.)
uly = UL[1]# - (yres / 2.)
driver = gdal.GetDriverByName('GTiff')
ds = driver.Create(outfile, xsize, ysize, 1, outFormat)
#ds = driver.Create(outfile, xsize, ysize, 1, gdal.GDT_Int16)
srs = osr.SpatialReference()
if isinstance(inProjection, basestring):
srs.ImportFromProj4(inProjection)
else:
srs.ImportFromEPSG(inProjection)
ds.SetProjection(srs.ExportToWkt())
gt = [ulx, xres, 0, uly, 0, -yres ]
ds.SetGeoTransform(gt)
ds.GetRasterBand(1).WriteArray(data)
ds.FlushCache()
def convertBin2tif(inFile,inUL,shape,res,informat,outFormat,flip=False):
inProj4 = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
read_data = np.fromfile(inFile, dtype=informat)
dataset = read_data.reshape([shape[0],shape[1]])
dataset = np.array(dataset,dtype=informat)
if flip == True:
dataset = np.flipud(dataset)
outTif = inFile[:-4]+".tif"
writeArray2Tiff(dataset,res,inUL,inProj4,outTif,outFormat)
def gunzip(fn, *positional_parameters, **keyword_parameters):
inF = gzip.GzipFile(fn, 'rb')
s = inF.read()
inF.close()
if ('out_fn' in keyword_parameters):
outF = file(keyword_parameters['out_fn'], 'wb')
else:
outF = file(fn[:-3], 'wb')
outF.write(s)
outF.close()
def get_rise55(year,doy,tile):
dd=datetime.datetime(year,1,1)+datetime.timedelta(days=doy-1)
o = ephem.Observer()
lat,lon = tile2latlon(tile)
o.lat, o.long = '%3.2f' % (lat+7.5), '%3.2f' % (lon+7.5)
sun = ephem.Sun()
sunrise = o.previous_rising(sun, start=dd)
noon = o.next_transit(sun, start=sunrise)
hr = noon.datetime().hour
minute = noon.datetime().minute
minfraction = minute/60.
return (hr+minfraction)-1.5
def is_odd(num):
return num % 2 != 0
def getGrabTime(time):
return int(((time/600)+1)*600)
def getGrabTimeInv(grab_time,doy):
if is_odd(grab_time):
hr = grab_time-3
forecastHR = 3
else:
hr = grab_time
forecastHR = 0
if hr == 24:
hr = 0
doy+=1
return hr, forecastHR,doy
def writeCTL(tile,year,doy):
LLlat,LLlon = tile2latlon(tile)
#====create insol.ctl====================================
date = "%d%03d" % (year,doy)
date_tile_str = "T%03d_%s" % (tile,date)
srcfn = os.path.join(static_path,'INSOL','deg125','insol55_2011%03d.dat' % doy)
dtimedates = np.array(range(1,366,7))
rday = dtimedates[dtimedates>=doy][0]
# riseddd="%d%03d" %(year,rday)
riseddd="%d%03d" %(2015,rday) # FOR RT UNTIL I GET RT DATA FROM CHRIS
data = './%s_insol.dat' % date_tile_str
shutil.copyfile(srcfn,data)
fn = os.path.join('./%s_insol.ctl'% date_tile_str)
file = open(fn, "w")
file.write("dset ^%s\n" % data)
file.write("options template\n")
file.write("title soil moisture\n")
file.write("undef -9999.\n")
file.write("xdef 2880 linear -180.0 .125\n")
file.write("ydef 1200 linear -60.0 .125\n")
file.write("zdef 1 levels 1 1\n")
file.write("tdef 365 linear 0z01jan2002 1dy\n")
file.write("vars 1\n")
file.write("soil 0 0 soil\n")
file.write("endvars\n")
file.close()
#====create rnet.ctl======================================
srcfn = os.path.join(static_path,'5KM','RNET','RNET%s.dat.gz' % riseddd)
data = os.path.join('./%s_rnet.dat' % date_tile_str)
gunzip(srcfn,out_fn=data)
rnet05 = np.fromfile(data, dtype=np.float32)
rnet05 = np.flipud(rnet05.reshape([3000,7200]))
rnet05.tofile(data)
fn = os.path.join('./%s_rnet.ctl' % date_tile_str)
file = open(fn, "w")
file.write("dset ^%s\n" % data)
file.write("options yrev template\n")
file.write("title soil moisture\n")
file.write("undef -9999.\n")
file.write("xdef 7200 linear -180.0 .05\n")
file.write("ydef 3000 linear -60.0 .05\n")
file.write("zdef 1 levels 1 1\n")
file.write("tdef 365 linear 0z01jan2002 1dy\n")
file.write("vars 1\n")
file.write("soil 0 0 soil\n")
file.write("endvars\n")
file.close()
#====create albedo.ctl======================================
srcfn = os.path.join(static_path,'ALBEDO','ALBEDO_T%03d.dat' % tile)
data = os.path.join('./%s_albedo.dat' % date_tile_str)
shutil.copyfile(srcfn,data)
fn = os.path.join('./%s_albedo.ctl' % date_tile_str)
file = open(fn, "w")
file.write("dset ^%s\n" % data)
file.write("options template\n")
file.write("title soil moisture\n")
file.write("undef -9999.\n")
file.write("xdef 3750 linear %3.2f .004\n" % LLlon)
file.write("ydef 3750 linear %3.2f .004\n" % LLlat)
file.write("zdef 1 levels 1 1\n")
file.write("tdef 365 linear 0z01jan2002 1dy\n")
file.write("vars 1\n")
file.write("soil 0 0 soil\n")
file.write("endvars\n")
file.close()
#====create lst2.ctl======================================
srcfn = os.path.join(tile_base_path,'T%03d' % tile,'FINAL_DAY_LST_TIME2_%s_T%03d.dat' % (date,tile))
data = os.path.join('./%s_lst2.dat' % date_tile_str)
shutil.copyfile(srcfn,data)
fn = os.path.join('./%s_lst2.ctl' % date_tile_str)
file = open(fn, "w")
file.write("dset ^%s\n" % data)
file.write("options template\n")
file.write("title soil moisture\n")
file.write("undef -9999.\n")
file.write("xdef 3750 linear %3.2f .004\n" % LLlon)
file.write("ydef 3750 linear %3.2f .004\n" % LLlat)
file.write("zdef 1 levels 1 1\n")
file.write("tdef 365 linear 0z01jan2002 1dy\n")
file.write("vars 1\n")
file.write("soil 0 0 soil\n")
file.write("endvars\n")
file.close()
#====create lwdn.ctl======================================
time = get_rise55(year,doy,tile)
grab_time = getGrabTime(int(time)*100)
hr,forecastHR,cfsr_doy = getGrabTimeInv(grab_time/100,doy)
cfsr_date = "%d%03d" % (year,cfsr_doy)
if (grab_time)==2400:
grab_time = 0000
srcfn = os.path.join(static_path,'CFSR','%d' % year,'%03d' % cfsr_doy,'sfc_lwdn_%s_%02d00.dat' % (cfsr_date,grab_time/100))
data = os.path.join('./%s_lwdn.dat' % date_tile_str)
shutil.copyfile(srcfn,data)
fn = os.path.join('./%s_lwdn.ctl' % date_tile_str)
file = open(fn, "w")
file.write("dset ^%s\n" % data)
file.write("options template\n")
file.write("title soil moisture\n")
file.write("undef -9999.\n")
file.write("xdef 1440 linear -180.0 .25\n")
file.write("ydef 720 linear -90.0 .25\n")
file.write("zdef 1 levels 1 1\n")
file.write("tdef 365 linear 0z01jan2002 1dy\n")
file.write("vars 1\n")
file.write("soil 0 0 soil\n")
file.write("endvars\n")
file.close()
def write_agg_insol(outfn,date_tile_str):
fn = os.path.join('./%s_agg_insol.gs' % date_tile_str)
file = open(fn, "w")
file.write("function main(args)\n")
file.write("lat1=subwrd(args,1);if(lat1='');lat1"";endif\n")
file.write("lat2=subwrd(args,2);if(lat2='');lat2"";endif\n")
file.write("lon1=subwrd(args,3);if(lon1='');lon1"";endif\n")
file.write("lon2=subwrd(args,4);if(lon2='');lon2"";endif\n")
file.write("\n")
file.write("say lat1\n")
file.write("say lat2\n")
file.write("say lon1\n")
file.write("say lon2\n")
file.write("\n")
file.write("'reinit'\n")
file.write("'open ./%s_insol.ctl'\n" % date_tile_str)
file.write("'set lat ' lat1+0.025 ' ' lat2-0.025\n")
file.write("'set lon ' lon1+0.025 ' ' lon2-0.025\n")
file.write("'set undef -9999.'\n")
file.write("'define test=re(soil.1,0.05,0.05)'\n")
file.write("'set gxout fwrite'\n")
file.write("'set fwrite %s'\n" % outfn)
file.write("'d test'\n")
file.write("'disable fwrite'\n")
file.close()
def write_agg_insol_viirs(outfn,date_tile_str):
fn = os.path.join('./%s_agg_insol_viirs.gs' % date_tile_str)
file = open(fn, "w")
file.write("function main(args)\n")
file.write("lat1=subwrd(args,1);if(lat1='');lat1"";endif\n")
file.write("lat2=subwrd(args,2);if(lat2='');lat2"";endif\n")
file.write("lon1=subwrd(args,3);if(lon1='');lon1"";endif\n")
file.write("lon2=subwrd(args,4);if(lon2='');lon2"";endif\n")
file.write("\n")
file.write("say lat1\n")
file.write("say lat2\n")
file.write("say lon1\n")
file.write("say lon2\n")
file.write("\n")
file.write("'reinit'\n")
file.write("'open ./%s_insol.ctl'\n" % date_tile_str)
file.write("'set lat ' lat1+0.002 ' ' lat2-0.002\n")
file.write("'set lon ' lon1+0.002 ' ' lon2-0.002\n")
file.write("'set undef -9999.'\n")
file.write("'define test=re(soil.1,0.004,0.004)'\n")
file.write("'set gxout fwrite'\n")
file.write("'set fwrite %s'\n" % outfn)
file.write("'d test'\n")
file.write("'disable fwrite'\n")
file.close()
def write_agg_rnet(outfn,date_tile_str):
fn = os.path.join('./%s_agg_rnet.gs' % date_tile_str)
file = open(fn, "w")
file.write("function main(args)\n")
file.write("lat1=subwrd(args,1);if(lat1='');lat1"";endif\n")
file.write("lat2=subwrd(args,2);if(lat2='');lat2"";endif\n")
file.write("lon1=subwrd(args,3);if(lon1='');lon1"";endif\n")
file.write("lon2=subwrd(args,4);if(lon2='');lon2"";endif\n")
file.write("\n")
file.write("say lat1\n")
file.write("say lat2\n")
file.write("say lon1\n")
file.write("say lon2\n")
file.write("\n")
file.write("'reinit'\n")
file.write("'open ./%s_rnet.ctl'\n" % date_tile_str)
file.write("'set lat ' lat1+0.025 ' ' lat2-0.025\n")
file.write("'set lon ' lon1+0.025 ' ' lon2-0.025\n")
file.write("'set undef -9999.'\n")
file.write("'define test=re(soil.1,0.05,0.05)'\n")
file.write("'set gxout fwrite'\n")
file.write("'set fwrite %s'\n" % outfn)
file.write("'d test'\n")
file.write("'disable fwrite'\n")
file.close()
def write_agg_albedo(outfn,date_tile_str):
fn = os.path.join('./%s_agg_albedo.gs' % date_tile_str)
file = open(fn, "w")
file.write("function main(args)\n")
file.write("lat1=subwrd(args,1);if(lat1='');lat1"";endif\n")
file.write("lat2=subwrd(args,2);if(lat2='');lat2"";endif\n")
file.write("lon1=subwrd(args,3);if(lon1='');lon1"";endif\n")
file.write("lon2=subwrd(args,4);if(lon2='');lon2"";endif\n")
file.write("\n")
file.write("say lat1\n")
file.write("say lat2\n")
file.write("say lon1\n")
file.write("say lon2\n")
file.write("\n")
file.write("'reinit'\n")
file.write("'open ./%s_albedo.ctl'\n" % date_tile_str)
file.write("'set lat ' lat1+0.025 ' ' lat2-0.025\n")
file.write("'set lon ' lon1+0.025 ' ' lon2-0.025\n")
file.write("'set undef -9999.'\n")
file.write("'define test=re(soil.1,0.05,0.05)'\n")
file.write("'set gxout fwrite'\n")
file.write("'set fwrite %s'\n" % outfn)
file.write("'d test'\n")
file.write("'disable fwrite'\n")
file.close()
def write_agg_lst2(outfn,date_tile_str):
fn = os.path.join('./%s_agg_lst2.gs' % date_tile_str)
file = open(fn, "w")
file.write("function main(args)\n")
file.write("lat1=subwrd(args,1);if(lat1='');lat1"";endif\n")
file.write("lat2=subwrd(args,2);if(lat2='');lat2"";endif\n")
file.write("lon1=subwrd(args,3);if(lon1='');lon1"";endif\n")
file.write("lon2=subwrd(args,4);if(lon2='');lon2"";endif\n")
file.write("\n")
file.write("say lat1\n")
file.write("say lat2\n")
file.write("say lon1\n")
file.write("say lon2\n")
file.write("\n")
file.write("'reinit'\n")
file.write("'open ./%s_lst2.ctl'\n" % date_tile_str)
file.write("'set lat ' lat1+0.025 ' ' lat2-0.025\n")
file.write("'set lon ' lon1+0.025 ' ' lon2-0.025\n")
file.write("'set undef -9999.'\n")
file.write("'define test=re(soil.1,0.05,0.05)'\n")
file.write("'set gxout fwrite'\n")
file.write("'set fwrite %s'\n" % outfn)
file.write("'d test'\n")
file.write("'disable fwrite'\n")
file.close()
def write_agg_lwdn(outfn,date_tile_str):
fn = os.path.join('./%s_agg_lwdn.gs' % date_tile_str)
file = open(fn, "w")
file.write("function main(args)\n")
file.write("lat1=subwrd(args,1);if(lat1='');lat1"";endif\n")
file.write("lat2=subwrd(args,2);if(lat2='');lat2"";endif\n")
file.write("lon1=subwrd(args,3);if(lon1='');lon1"";endif\n")
file.write("lon2=subwrd(args,4);if(lon2='');lon2"";endif\n")
file.write("\n")
file.write("say lat1\n")
file.write("say lat2\n")
file.write("say lon1\n")
file.write("say lon2\n")
file.write("\n")
file.write("'reinit'\n")
file.write("'open ./%s_lwdn.ctl'\n" % date_tile_str)
file.write("'set lat ' lat1+0.025 ' ' lat2-0.025\n")
file.write("'set lon ' lon1+0.025 ' ' lon2-0.025\n")
file.write("'set undef -9999.'\n")
file.write("'define test=re(soil.1,0.05,0.05)'\n")
file.write("'set gxout fwrite'\n")
file.write("'set fwrite %s'\n" % outfn)
file.write("'d test'\n")
file.write("'disable fwrite'\n")
file.close()
def write_agg_lwdn_viirs(outfn,date_tile_str):
fn = os.path.join('./%s_agg_lwdn_viirs.gs'% date_tile_str)
file = open(fn, "w")
file.write("function main(args)\n")
file.write("lat1=subwrd(args,1);if(lat1='');lat1"";endif\n")
file.write("lat2=subwrd(args,2);if(lat2='');lat2"";endif\n")
file.write("lon1=subwrd(args,3);if(lon1='');lon1"";endif\n")
file.write("lon2=subwrd(args,4);if(lon2='');lon2"";endif\n")
file.write("\n")
file.write("say lat1\n")
file.write("say lat2\n")
file.write("say lon1\n")
file.write("say lon2\n")
file.write("\n")
file.write("'reinit'\n")
file.write("'open ./%s_lwdn.ctl'\n" % date_tile_str)
file.write("'set lat ' lat1+0.002 ' ' lat2-0.002\n")
file.write("'set lon ' lon1+0.002 ' ' lon2-0.002\n")
file.write("'set undef -9999.'\n")
file.write("'define test=re(soil.1,0.004,0.004)'\n")
file.write("'set gxout fwrite'\n")
file.write("'set fwrite %s'\n" % outfn)
file.write("'d test'\n")
file.write("'disable fwrite'\n")
file.close()
def get_tiles_fstem_names(namefn):
of = open(namefn,'w')
of.write("rnet.\n")
of.write("\n")
of.write("rnet: continuous.\n")
of.write("albedo: continuous.\n")
of.write("insol: continuous.\n")
of.write("lwdn: continuous.\n")
of.write("lst2: continuous.\n")
of.close()
def get_trees_fstem_names(namefn):
of = open(namefn,'w')
of.write("fsun.\n")
of.write("\n")
of.write("fsun: continuous.\n")
of.write("dthr: continuous.\n")
of.write("rnet_dthr: continuous.\n")
of.write("rnet: continuous.\n")
of.write("trad2: continuous.\n")
of.close()
def get_trees_fstem_namesV2(namefn):
of = open(namefn,'w')
of.write("fsun.\n")
of.write("\n")
of.write("fsun: continuous.\n")
of.write("dthr_corr: continuous.\n")
of.write("lai: continuous.\n")
of.write("trad2: continuous.\n")
of.close()
def readCubistOut(input,outDF):
outAltSplit = input.split('\n')
b = np.char.strip(outAltSplit)
ifIndex = np.argwhere(np.array(b) == "if")
thenIndex = np.argwhere(np.array(b) == "then")
lstOut = np.zeros([outDF.shape[0]])
count = 0
mask_formula = ''
for name in list(outDF):
count+=1
if count < len(list(outDF)):
mask_formula = mask_formula + '(outDF["%s"] < 0.0) | ' % name
else:
mask_formula = mask_formula + '(outDF["%s"] < 0.0) ' % name
mask = eval('(%s)' % mask_formula)
if len(ifIndex)<1: # Theres only 1 rule
print(b)
modelIndex = np.argwhere(np.array(b) == "Model:")
formulaSplit = b[modelIndex+2][0].split(' ')
for k in xrange(len(formulaSplit)/3):
if k == 0:
formula = '%s' % formulaSplit[2]
else:
formSub = '%s %s*outDF.%s' % (formulaSplit[(k*3)],formulaSplit[(k*3)+1],formulaSplit[(k*3)+2])
formula = '%s%s' % (formula,formSub)
#===Check for another line of equation
if (modelIndex+3 < len(b)):
formulaSplit = b[modelIndex+3][0].split(' ')
k = 0
if len(formulaSplit) > 1:
formSub = '%s %s*outDF[rule2use].%s' % (formulaSplit[(k*3)],formulaSplit[(k*3)+1],formulaSplit[(k*3)+2])
formula = '%s%s' % (formula,formSub)
lstOut = eval('(%s)' % formula)
lstOut[np.where(mask)] = -9999.
else:
for i in xrange(len(ifIndex)): # rules
for j in xrange((thenIndex[i][0]-ifIndex[i][0])-1): #rule branches (i.e. if x>y)
treeRule = b[ifIndex[i][0]+1:thenIndex[i][0]]
treeRuleSplit = treeRule[j].split(' ')
treeRuleSplit[0] = 'outDF.%s' % treeRule[j].split(' ')[0]
if j < 1:
rule = '(%s)' % ' '.join(treeRuleSplit)
else:
rule = '%s & (%s)' % (rule, ' '.join(treeRuleSplit))
# run the rule on the dataset
rule2use=eval('(%s)'% rule)
formulaSplit = b[thenIndex[i]+1][0].split(' ')
for k in xrange(len(formulaSplit)/3):
if k == 0:
formula = '%s' % formulaSplit[2]
else:
formSub = '%s %s*outDF[rule2use].%s' % (formulaSplit[(k*3)],formulaSplit[(k*3)+1],formulaSplit[(k*3)+2])
formula = '%s%s' % (formula,formSub)
#===Check for another line of equation
if (thenIndex[i]+2 < len(b)):
formulaSplit = b[thenIndex[i]+2][0].split(' ')
k = 0
if len(formulaSplit) > 1:
formSub = '%s %s*outDF[rule2use].%s' % (formulaSplit[(k*3)],formulaSplit[(k*3)+1],formulaSplit[(k*3)+2])
formula = '%s%s' % (formula,formSub)
lstOut[np.where(rule2use)] = eval('(%s)' % formula)
lstOut[np.where(mask)] = -9999.
return lstOut
def get_results_cubist_model(infile,outDF):
f = open(infile,'r')
all_lines = f.readlines()
f.close()
var = np.zeros([outDF.shape[0]])
count1=0
mask_formula = ''
for name in list(outDF):
count1+=1
if count1 < len(list(outDF)):
mask_formula = mask_formula + '(outDF["%s"] < 0.0) | ' % name
else:
mask_formula = mask_formula + '(outDF["%s"] < 0.0) ' % name
mask = eval('(%s)' % mask_formula)
count=0
for line in all_lines:
chars = line.split()
condition = chars[0].split('=')
count+=1
if condition[0] == 'conds':
var1 = condition[1].split('"')
nconds = var1[1]
rules = ''
for x in range(int(nconds)):
# x+=1
# print(x)
c1 = all_lines[count+x].split()
# print(c1)
cvar = c1[1].split('"')
cval = c1[2].split('"')
cond = c1[3].split('"')
if x < int(nconds)-1:
rules = rules +'(outDF.'+str(cvar[1])+str(cond[1])+str(cval[1])+') & '
elif x == int(nconds)-1:
# if x == (int(nconds)):
rules = rules +'(outDF.'+str(cvar[1])+str(cond[1])+str(cval[1])+')'
c1 = all_lines[count+x+1].split()
# else:
# if x == int(nconds):
# print("I'm here")
# print c1
a0=c1[0].split('"')
# print str(a0[1])
formula=' '+str(a0[1])
for y in range(1,len(c1),2):
# print y, len(c1)
a1=c1[y].split('"')
a2=c1[y+1].split('"')
formula=formula+'+('+str(a2[1])+'*outDF.'+str(a1[1])+'[rule2use]'+')'
print(rules)
print(formula)
rule2use=eval('(%s)'% rules)
var[np.where(rule2use)] = eval('(%s)' % formula)
var[np.where(mask)] = -9999.
return var
def planck(X,ANV):
C1=1.191E-9
C2=1.439
C1P = C1*ANV**3 # different wavelength##
C2P = C2*ANV
return C1P/(np.exp(C2P/X)-1.)
def invplanck(X,ANV):
C1=1.191E-9
C2=1.439
C1P = C1*ANV**3
C2P = C2*ANV
return C2P/np.log(1.+C1P/X)
# This function contains an empirical correction for absorption
# by non-water-vapor constituents. It was developed in comparison
# with a series of MODTRAN experiments.
# FUNCTION DTAUDP(WAVENUMBER,TEMPERATURE,VAPOR PRESSURE,PRESSURE)
def dtaudp(W,X,Y,Z):
GO=9.78
DTAUDP1=(4.18+5578.*np.exp((-7.87E-3)*W))*np.exp(1800.*
(1.0/X-1.0/296.))*(Y/Z+.002)*(.622/(101.3*GO))*Y
return DTAUDP1+(0.00004+Y/60000.)
#=======functions for processing===============================================
#==============================================================================
def processTrees(year=None,doy=None):
inProjection = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
if year==None:
dd = datetime.date.today()+datetime.timedelta(days=-1)
year = dd.year
if doy==None:
doy = (datetime.date.today()-datetime.date(year,1,1)).days-1
year = 2016 # TEMP FOR RT PROCESSING
dtimedates = np.array(range(1,366,7))
r7day = dtimedates[dtimedates>=doy][0]
riseddd="%d%03d" %(year,r7day)
fsun_trees_tile_ctl = os.path.join(fsun_trees_path,'tiles_ctl')
# if not os.path.exists(fsun_trees_tile_ctl):
# os.makedirs(fsun_trees_tile_ctl)
##===========create dictionary and convert to csv=======
#======load 5 km data and subset it========================================
# dthr_zip_fn = os.path.join(static_path,"5KM","DTHR","DTHR%s.dat.gz" % riseddd)
dthr_fn = glob.glob(os.path.join(static_path,"5KM","DTHR","DTHR_*%s.dat" % r7day))[0]
# dthr_fn = os.path.join("./DTHR%s.dat" % date)
# gunzip(dthr_zip_fn)
# dthr = np.fromfile(dthr_zip_fn[:-3], dtype=np.float32)
dthr = np.fromfile(dthr_fn, dtype=np.float32)
dthr = np.flipud(dthr.reshape([3000,7200]))
dthr_sub = dthr[901:1801,3201:4801]
# plt.imshow(dthr_sub)
dthr = np.reshape(dthr_sub,[dthr_sub.size])
# rnet_zip_fn = os.path.join(static_path,"5KM","RNET","RNET%s.dat.gz" % riseddd)
rnet_fn = glob.glob(os.path.join(static_path,"5KM","RNET","RNET_*%s.dat" % r7day))[0]
# rnet_fn = os.path.join("./RNET%s.dat" % date)
# gunzip(rnet_zip_fn)
# rnet = np.fromfile(rnet_zip_fn[:-3], dtype=np.float32)
rnet = np.fromfile(rnet_fn, dtype=np.float32)
rnet = np.flipud(rnet.reshape([3000,7200]))
rnet_sub = rnet[901:1801,3201:4801]
# plt.imshow(rnet_sub)
rnet = np.reshape(rnet_sub,[rnet_sub.size])
# fsun_src_fn = os.path.join(static_path,"5KM","FSUN","FSUN%s.dat" % riseddd)
fsun_fn = glob.glob(os.path.join(static_path,"5KM","FSUN","FSUN_*%s.dat" % r7day))[0]
# fsun_fn = os.path.join("./FSUN%s.dat" % date)
# shutil.copyfile(fsun_src_fn)
fsun = np.fromfile(fsun_fn, dtype=np.float32)
fsun = np.flipud(fsun.reshape([3000,7200]))
writeArray2Tiff(fsun,[0.05,0.05],[-180.,90],inProjection,fsun_fn[:-4]+'.tif',gdal.GDT_Float32)
fsun_sub = fsun[901:1801,3201:4801]
# plt.imshow(fsun_sub[100:400,1000:1300],vmin=0, vmax=0.5)
fsun = np.reshape(fsun_sub,[fsun_sub.size])
rnet_dthr = rnet/dthr
# note* FMAX is actually max LAI here
fmax_src_fn = os.path.join(static_path,"5KM","FMAX","FMAX.dat")
# fmax_fn = os.path.join("./FMAX.dat")
# shutil.copyfile(fmax_src_fn,fmax_fn)
fmax = np.fromfile(fmax_src_fn, dtype=np.float32)
fmax = 1-np.exp(-0.5*fmax)
fmax_sub = np.flipud(fmax.reshape([900,1600]))
# plt.imshow(fmax_sub, vmin=0, vmax=0.8)
fmax = np.reshape(fmax_sub,[fmax_sub.size])
precip_src_fn = os.path.join(static_path,"5KM","PRECIP","PRECIP.dat")
# precip_fn = os.path.join("./PRECIP.dat")
# shutil.copyfile(precip_src_fn,precip_fn)
precip = np.fromfile(precip_src_fn, dtype=np.float32)
precip_sub = np.flipud(precip.reshape([900,1600]))
# plt.imshow(precip_sub)
precip = np.reshape(precip_sub,[precip_sub.size])
# trad2_src_fn = os.path.join(static_path,"5KM","TRAD2","TRD2%s.dat.gz" % riseddd)
trad2_fn = glob.glob(os.path.join(static_path,"5KM","TRAD2","TRD2_*%s.dat" % r7day))[0]
# trad2_fn = os.path.join("./TRD2%s.dat" % date)
# gunzip(trad2_src_fn)
# trad2 = np.fromfile(trad2_src_fn[:-3], dtype=np.float32)
trad2 = np.fromfile(trad2_fn, dtype=np.float32)
trad2 = np.flipud(trad2.reshape([3000,7200]))
trad2_sub = trad2[901:1801,3201:4801]
# plt.imshow(trad2_sub,vmin=280, vmax=320)
trad2 = np.reshape(trad2_sub,[trad2_sub.size])
lai_src_fn = os.path.join(static_path,"5KM","LAI","MLAI2014%03d.dat" % r7day)
# lai_fn = os.path.join("./LAI2014%03d.dat" % doy)
# shutil.copyfile(lai_src_fn,lai_fn)
lai = np.fromfile(lai_src_fn, dtype=np.float32)
lai = lai.reshape([3000,7200])
lai_sub = lai[901:1801,3201:4801]
# plt.imshow(lai_sub,vmin=0, vmax=2)
lai = np.reshape(lai_sub,[lai_sub.size])
outDict = {'fsun':fsun, 'dthr':dthr, 'rnet_dthr':rnet_dthr,
'rnet': rnet, 'fmax':fmax, 'precip':precip,
'lai':lai,'trad2':trad2}
outDF = pd.DataFrame.from_dict(outDict)
#============create final_p250_fmax0.f90===================================
#========create fsun.data==================================================
p1 = 0
p2 = 250
f1 = 0
f2 = 0.2
out = outDF.loc[(outDF["fsun"] > 0.0) & (outDF["rnet"] > 0.0) &
(outDF["lai"] > 0.0) & (outDF["trad2"] > 0.0) &
(outDF["dthr"] > 0.0) & (outDF["precip"] >= p1) &
(outDF["precip"] < p2) & (outDF["fmax"] >= f1) &
(outDF["fmax"] < f2), ["fsun","dthr","rnet_dthr","rnet","trad2"]]
file_data = os.path.join(fsun_trees_tile_ctl,'fsun.data')
out.to_csv(file_data , header=True, index=False,columns=["fsun",
"dthr","rnet_dthr","rnet","trad2"])
file_names = os.path.join(fsun_trees_tile_ctl,'fsun.names')
get_trees_fstem_names(file_names)
#====run cubist======================================
cubist_name = os.path.join(fsun_trees_tile_ctl,'fsun')
out1 = subprocess.check_output("cubist -f %s -u -a -i -r 8 -S 75" % cubist_name, shell=True)
#============create final_p250_fmax20.f90==================================
#========create fsun.data==================================================
p1 = 0
p2 = 250
f1 = 0.2
f2 = 1.0
out = outDF.loc[(outDF["fsun"] > 0.0) & (outDF["rnet"] > 0.0) &
(outDF["lai"] > 0.0) & (outDF["trad2"] > 0.0) &
(outDF["dthr"] > 0.0) & (outDF["precip"] >= p1) &
(outDF["precip"] < p2) & (outDF["fmax"] >= f1) &
(outDF["fmax"] < f2), ["fsun","dthr","rnet_dthr","rnet","trad2"]]
file_data = os.path.join(fsun_trees_tile_ctl,'fsun.data')
out.to_csv(file_data , header=True, index=False,columns=["fsun",
"dthr","rnet_dthr","rnet","trad2"])
file_names = os.path.join(fsun_trees_tile_ctl,'fsun.names')
get_trees_fstem_names(file_names)
#====run cubist======================================
cubist_name = os.path.join(fsun_trees_tile_ctl,'fsun')
out2 = subprocess.check_output("cubist -f %s -u -a -i -r 8 -S 75" % cubist_name, shell=True)
#============create final_p500.f90=========================================
#========create fsun.data==================================================
p1 = 250
p2 = 500
f1 = 0.0
f2 = 1.0
out = outDF.loc[(outDF["fsun"] > 0.0) & (outDF["rnet"] > 0.0) &
(outDF["lai"] > 0.0) & (outDF["trad2"] > 0.0) &
(outDF["dthr"] > 0.0) & (outDF["precip"] >= p1) &
(outDF["precip"] < p2) & (outDF["fmax"] >= f1) &
(outDF["fmax"] < f2), ["fsun","dthr","rnet_dthr","rnet","trad2"]]
file_data = os.path.join(fsun_trees_tile_ctl,'fsun.data')
out.to_csv(file_data , header=True, index=False,columns=["fsun",
"dthr","rnet_dthr","rnet","trad2"])
file_names = os.path.join(fsun_trees_tile_ctl,'fsun.names')
get_trees_fstem_names(file_names)
#====run cubist======================================
cubist_name = os.path.join(fsun_trees_tile_ctl,'fsun')
out3 = subprocess.check_output("cubist -f %s -u -a -i -r 8 -S 50" % cubist_name, shell=True)
#============create final_p1000.f90========================================
#========create fsun.data==================================================
p1 = 500
p2 = 1000
f1 = 0.0
f2 = 1.0
out = outDF.loc[(outDF["fsun"] > 0.0) & (outDF["rnet"] > 0.0) &
(outDF["lai"] > 0.0) & (outDF["trad2"] > 0.0) &
(outDF["dthr"] > 0.0) & (outDF["precip"] >= p1) &
(outDF["precip"] < p2) & (outDF["fmax"] >= f1) &
(outDF["fmax"] < f2), ["fsun","dthr","rnet_dthr","rnet","trad2"]]
file_data = os.path.join(fsun_trees_tile_ctl,'fsun.data')
out.to_csv(file_data , header=True, index=False,columns=["fsun",
"dthr","rnet_dthr","rnet","trad2"])
file_names = os.path.join(fsun_trees_tile_ctl,'fsun.names')
get_trees_fstem_names(file_names)
#====run cubist======================================
cubist_name = os.path.join(fsun_trees_tile_ctl,'fsun')
out4 = subprocess.check_output("cubist -f %s -u -a -i -r 8 -S 50" % cubist_name, shell=True)
#============create final_p2000.f90========================================
#========create fsun.data==================================================
p1 = 1000
p2 = 9999
f1 = 0.0
f2 = 1.0
out = outDF.loc[(outDF["fsun"] > 0.0) & (outDF["rnet"] > 0.0) &
(outDF["lai"] > 0.0) & (outDF["trad2"] > 0.0) &
(outDF["dthr"] > 0.0) & (outDF["precip"] >= p1) &
(outDF["precip"] < p2) & (outDF["fmax"] >= f1) &
(outDF["fmax"] < f2), ["fsun","dthr","rnet_dthr","rnet","trad2"]]
file_data = os.path.join(fsun_trees_tile_ctl,'fsun.data')
out.to_csv(file_data , header=True, index=False,columns=["fsun",
"dthr","rnet_dthr","rnet","trad2"])
file_names = os.path.join(fsun_trees_tile_ctl,'fsun.names')
get_trees_fstem_names(file_names)
#====run cubist======================================
cubist_name = os.path.join(fsun_trees_tile_ctl,'fsun')
out5 = subprocess.check_output("cubist -f %s -u -a -i -r 8 -S 50" % cubist_name, shell=True)
return [out1,out2,out3,out4,out5]
#======This was implemented as part of v0.2 update============================
def processTreesV2(doy):
inProjection = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
dtimedates = np.array(range(1,366,7))
r7day = dtimedates[dtimedates>=doy][0]
fsun_trees_tile_ctl = os.path.join(fsun_trees_path,'tiles_ctl')
##===========create dictionary and convert to csv=======
#======load 5 km data and subset it========================================
dthr_fn = glob.glob(os.path.join(static_path,"5KM","DTHR","DTHR_*%s.dat" % r7day))[0]
dthr = np.fromfile(dthr_fn, dtype=np.float32)
dthr = np.flipud(dthr.reshape([3000,7200]))
dthr_sub = dthr[901:1801,3201:4801]
dthr = np.reshape(dthr_sub,[dthr_sub.size])
trad2_fn = glob.glob(os.path.join(static_path,"5KM","TRAD2","TRD2_*%s.dat" % r7day))[0]
trad2 = np.fromfile(trad2_fn, dtype=np.float32)
trad2 = np.flipud(trad2.reshape([3000,7200]))
trad2_sub = trad2[901:1801,3201:4801]
trad2 = np.reshape(trad2_sub,[trad2_sub.size])
# print "trad2 max %f" % np.nanmax(trad2)
rnet_fn = glob.glob(os.path.join(static_path,"5KM","RNET","RNET_*%s.dat" % r7day))[0]
rnet = np.fromfile(rnet_fn, dtype=np.float32)
rnet = np.flipud(rnet.reshape([3000,7200]))
rnet_sub = rnet[901:1801,3201:4801]
rnet = np.reshape(rnet_sub,[rnet_sub.size])
# print "rnet max %f" % np.nanmax(rnet)
lai_src_fn = os.path.join(static_path,"5KM","LAI","MLAI2014%03d.dat" % r7day)
lai = np.fromfile(lai_src_fn, dtype=np.float32)
lai = lai.reshape([3000,7200])
lai_sub = lai[901:1801,3201:4801]
lai = np.reshape(lai_sub,[lai_sub.size])
# print "lai max %f" % np.nanmax(lai)
# fsun_src_fn = os.path.join(static_path,"5KM","FSUN","FSUN%s.dat" % riseddd)
fsun_fn = glob.glob(os.path.join(static_path,"5KM","FSUN","FSUN_*%s.dat" % r7day))[0]
fsun = np.fromfile(fsun_fn, dtype=np.float32)
fsun = np.flipud(fsun.reshape([3000,7200]))
writeArray2Tiff(fsun,[0.05,0.05],[-180.,90],inProjection,fsun_fn[:-4]+'.tif',gdal.GDT_Float32)
fsun_sub = fsun[901:1801,3201:4801]
fsun = np.reshape(fsun_sub,[fsun_sub.size])
# print "fsun max %f" % np.nanmax(fsun)
vegt_src_fn = os.path.join(static_path,"5KM","VEGT","VEG_TYPE_MODIS.dat")
vegt = np.fromfile(vegt_src_fn, dtype=np.float32)
vegt = np.flipud(vegt.reshape([3000,7200]))
vegt_sub = vegt[901:1801,3201:4801]
vegt = np.reshape(vegt_sub,[vegt_sub.size])
corr_src_fn = glob.glob(os.path.join(static_path,"5KM","CORR","DTHR_CORR_DTLOC150_SEP17_FINAL_*%s.dat" % r7day))[0]
corr = np.fromfile(corr_src_fn, dtype=np.float32)
corr = np.flipud(corr.reshape([3000,7200]))
corr_sub = corr[901:1801,3201:4801]
corr = np.reshape(corr_sub,[corr_sub.size])
# note* FMAX is actually max LAI here
fmax_src_fn = os.path.join(static_path,"5KM","FMAX","FMAX.dat")
fmax = np.fromfile(fmax_src_fn, dtype=np.float32)
fmax = 1-np.exp(-0.5*fmax)
fmax_sub = np.flipud(fmax.reshape([900,1600]))
fmax = np.reshape(fmax_sub,[fmax_sub.size])
# print "fmax max %f" % np.nanmax(fmax)
precip_src_fn = os.path.join(static_path,"5KM","PRECIP","PRECIP.dat")
precip = np.fromfile(precip_src_fn, dtype=np.float32)
precip_sub = np.flipud(precip.reshape([900,1600]))
precip = np.reshape(precip_sub,[precip_sub.size])
# print "precip max %f" % np.nanmax(precip)
dthr_corr = dthr*corr
# print "dthr_corr max %f" % np.nanmax(dthr_corr)
outDict = {'fsun':fsun, 'dthr_corr':dthr_corr,'dthr':dthr,
'rnet': rnet, 'vegt':vegt, 'corr':corr,'lai':lai,
'trad2':trad2, 'fmax':fmax, 'precip':precip}
outDF = pd.DataFrame.from_dict(outDict)
#=========build a funciton for building fsun trees=========================
def getTree(lc,p1,p2,f1,f2,v1,v2):
out = outDF.loc[(outDF["fsun"] > 0.0) & (outDF["rnet"] > 0.0) &
(outDF["lai"] > 0.0) & (outDF["trad2"] > 0.0) &
(outDF["dthr"] > 0.0) & (outDF["precip"] >= p1) &
(outDF["precip"] < p2) & (outDF["fmax"] >= f1) &
(outDF["fmax"] < f2) & (outDF["vegt"] >= v1) &
(outDF["vegt"] <= v2), ["fsun","dthr_corr","lai","trad2"]]
file_data = os.path.join(fsun_trees_tile_ctl,'fsun_%s_%03d.data' % (lc,doy))
out.to_csv(file_data , header=True, index=False,columns=["fsun",
"dthr_corr","lai","trad2"])
file_names = os.path.join(fsun_trees_tile_ctl,'fsun_%s_%03d.names'% (lc,doy))
get_trees_fstem_namesV2(file_names)
#====run cubist======================================
cubist_name = os.path.join(fsun_trees_tile_ctl,'fsun_%s_%03d' % (lc,doy))
out1 = subprocess.check_output("cubist -f %s -r 10" % cubist_name, shell=True)
return out1
#========get trees====================================================
cropTree = getTree('crop',0, 2000, 0, 1, 12, 12)
grassTree = getTree('grass',0, 2000, 0, 1, 11, 11)
shrubTree = getTree('shrub',0, 2000, 0, 1, 9, 10)
forestTree = getTree('forest',0, 2000, 0, 1, 1, 8)
bareTree = getTree('bare',0, 2000, 0, 1, 13, 14)
return [cropTree,grassTree,shrubTree,forestTree,bareTree]
def getIJcoordsPython(tile):
lat,lon = tile2latlon(tile)
# lat = lat
tile_lut_path = os.path.join(static_path,"CFSR","viirs_tile_lookup_tables")
if not os.path.exists(tile_lut_path):
os.makedirs(tile_lut_path)
icoordpath = os.path.join(tile_lut_path,"CFSR_T%03d_lookup_icoord.dat" % tile)
jcoordpath = os.path.join(tile_lut_path,"CFSR_T%03d_lookup_jcoord.dat" % tile)
istart = (180+lon)*4
addArray = np.floor(np.array(range(3750))*0.004/0.25)
icor = istart+addArray
icormat = np.repeat(np.reshape(icor,[icor.size,1]),3750,axis=1)
icormat = icormat.T
icormat = np.array(icormat,dtype='int32')
icormat.tofile(icoordpath)
jstart = (89.875+lat)*4
jcor = jstart+addArray
jcormat = np.repeat(np.reshape(jcor,[jcor.size,1]),3750,axis=1)
jcormat = np.array(jcormat,dtype='int32')
jcormat.tofile(jcoordpath)
def gridMergePythonEWA(tile,year,doy):
tile_path = os.path.join(tile_base_path,"T%03d" % tile)
# if not os.path.exists(tile_path):
# os.makedirs(tile_path)
dd = datetime.datetime(year, 1, 1) + datetime.timedelta(doy - 1)
date = "%d%03d" % (year,doy)
LLlat,LLlon = tile2latlon(tile)
URlat = LLlat+15.
URlon = LLlon+15.
inUL = [LLlon,URlat]
ALEXIshape = [3750,3750]
ALEXIres = [0.004,0.004]
latmid = LLlat+7.5
lonmid = LLlon+7.5
# dirpath = os.path.join(data_path,"%d" % year, "%02d" % dd.month)
# db = pd.read_csv(os.path.join(dirpath,'I5_database.csv'))
# db = pd.read_csv(os.path.join(data_path,'I5_database.csv'))
I5_db_name = os.path.join(data_path,'viirs_database.db')
# db = pd.DataFrame.drop_duplicates(db)
#=====================Day==================================================
#==========================================================================
# files = db[(db['south']-5 <= latmid) & (db['north']+5 >= latmid) &
# (db['west']-5 <= lonmid) & (db['east']+5 >= lonmid) &
# (db['year'] == year) & (db['doy'] == doy) & (db['N_Day_Night_Flag'] == 'Day')]
# filenames = files['filename']
conn = sqlite3.connect( I5_db_name )
filenames = pd.read_sql_query("SELECT * from i5 WHERE (year = %d) AND "
"(doy = %03d) AND (south-5 <= %f) AND "
"(north+5 >= %f) AND (west-5 <= %f) "
"AND (east+5 >= %f) AND (N_Day_Night_Flag = 'Day')"
% (year,doy,latmid,latmid,lonmid,lonmid), conn).filename
conn.close()
fileProcessed=0
print filenames
orbits = []
for fn in filenames:
parts = fn.split(os.sep)[-1].split('_')
orbits.append(parts[5])
orbits = list(set(orbits))
orbitcount = 0
print "number of orbits: %d" % len(orbits)
for orbit in orbits:
fns = [s for s in filenames if orbit in s.lower()]
count = 0
validFiles = []
for filename in fns:
folder = os.sep.join(filename.split(os.sep)[:-1])
parts = filename.split(os.sep)[-1].split('_')
common_fn = os.path.join("_".join((parts[1],parts[2],parts[3],parts[4],parts[5])))
search_files = glob.glob(os.path.join(folder,"*"+common_fn+"*"))
if len(search_files)==4:
validFiles.append(common_fn)
for common_fn in validFiles:
count+=1
search_file = os.path.join(folder,"*SVI05_"+common_fn+"*")
search_geofile = os.path.join(folder,"*GITCO_"+common_fn+"*")
search_cloudgeofile = os.path.join(folder,"*GMTCO_"+common_fn+"*")
datet = datetime.datetime(year,dd.month, dd.day,0,0,0)
if datet > datetime.datetime(2017,3,8,0,0,0):
cloud_prefix = "VICMO"
else:
cloud_prefix = "IICMO"
search_cloudfile = os.path.join(folder,"*%s_" % cloud_prefix+common_fn+"*")
filename = glob.glob(search_file)[0]
geofile = glob.glob(search_geofile)[0]
cloudfile = glob.glob(search_cloudfile)[0]
cloudgeofile = glob.glob(search_cloudgeofile)[0]
# load cloud data==========
f=h5py.File(cloudfile,'r')
g=h5py.File(cloudgeofile,'r')
if datet > datetime.datetime(2017,3,8,0,0,0):
data_array = f['/All_Data/VIIRS-CM-EDR_All/QF1_VIIRSCMEDR'][()]
else:
data_array = f['/All_Data/VIIRS-CM-IP_All/QF1_VIIRSCMIP'][()]
lat_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/Latitude'][()]
lon_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/Longitude'][()]
view_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/SatelliteZenithAngle'][()]
start=filename.find('_t')
out_time=filename[start+2:start+6]
if count ==1:
latcloud = np.array(lat_array,'float32')
loncloud=np.array(lon_array,'float32')
cloud=np.array(data_array,'float32')
viewcloud=np.array(view_array,'float32')
else:
latcloud = np.vstack((latcloud,np.array(lat_array,'float32')))
loncloud = np.vstack((loncloud,np.array(lon_array,'float32')))
cloud = np.vstack((cloud,np.array(data_array,'float32')))
viewcloud = np.vstack((viewcloud,np.array(view_array,'float32')))
# load water mask===========
f=h5py.File(cloudfile,'r')
g=h5py.File(cloudgeofile,'r')
if datet > datetime.datetime(2017,3,8,0,0,0):
data_array = f['/All_Data/VIIRS-CM-EDR_All/QF2_VIIRSCMEDR'][()]
else:
data_array = f['/All_Data/VIIRS-CM-IP_All/QF2_VIIRSCMIP'][()]
lat_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/Latitude'][()]
lon_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/Longitude'][()]
view_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/SatelliteZenithAngle'][()]
start=filename.find('_t')
out_time=filename[start+2:start+6]
if count ==1:
watermask=np.array(data_array,'float32')
else:
watermask = np.vstack((watermask,np.array(data_array,'float32')))
# load BT data============
f=h5py.File(filename,'r')
g=h5py.File(geofile,'r')
data_array = f['/All_Data/VIIRS-I5-SDR_All/BrightnessTemperature'][()]
lat_array = g['/All_Data/VIIRS-IMG-GEO-TC_All/Latitude'][()]
lon_array = g['/All_Data/VIIRS-IMG-GEO-TC_All/Longitude'][()]
view_array = g['/All_Data/VIIRS-IMG-GEO-TC_All/SatelliteZenithAngle'][()]
if count ==1:
lat=np.array(lat_array,'float32')
lon=np.array(lon_array,'float32')
data=np.array(data_array,'float32')
view=np.array(view_array,'float32')
else:
lat = np.vstack((lat,np.array(lat_array,'float32')))
lon = np.vstack((lon,np.array(lon_array,'float32')))
data = np.vstack((data,np.array(data_array,'float32')))
view = np.vstack((view,np.array(view_array,'float32')))
#====cloud gridding=====================
if len(validFiles) == 0:
continue
orbitcount+=1
cloudOrig=cloud.copy()
#get 2-3 bits
cloud=np.array(cloud,'uint8')
cloud = np.reshape(cloud,[cloud.size, 1])
b = np.unpackbits(cloud, axis=1)
cloud = np.sum(b[:,4:6],axis=1)
cloud = np.reshape(cloud,[cloudOrig.shape[0],cloudOrig.shape[1]])
cloud = np.array(cloud, dtype='float32')
#====get water mask from bits===========
watermask=np.array(watermask,'uint8')
watermask = np.reshape(watermask,[watermask.size, 1])
b = np.unpackbits(watermask, axis=1)
watermask = np.sum(b[:,5:7],axis=1)
watermask = np.reshape(watermask,[cloudOrig.shape[0],cloudOrig.shape[1]])
watermask = np.array(watermask, dtype='float32')
mask = (cloudOrig==0.)
cloud[mask]=np.nan
viewcloud[mask]=np.nan
watermask[mask]=np.nan
#=====check if data is in range========================================
rangeIndex = ((latcloud<-90.) | (latcloud > 90.) | (loncloud < -180.) | (loncloud > 180.))
latcloud[rangeIndex] = np.nan
loncloud[rangeIndex] = np.nan
cloud[rangeIndex] = np.nan
viewcloud[rangeIndex] = np.nan
watermask[rangeIndex] = np.nan
if np.nansum(cloud)==0: # check if there is any data
continue
projection = '+proj=longlat +ellps=WGS84 +datum=WGS84'
area_id ='tile'
proj_id = 'latlon'
description = 'lat lon grid'
swath_def = geometry.SwathDefinition(lons=loncloud, lats=latcloud)
x_size = 3750
y_size = 3750
area_extent = (LLlon,LLlat,URlon,URlat)
area_def = utils.get_area_def(area_id, description, proj_id, projection,
x_size, y_size, area_extent)
swath_points_in_grid, cols, rows = ll2cr(swath_def, area_def, copy=False)
rows_per_scan = 16
try: # if there are no valid pixels in the region move on
num_valid_points, gridded_cloud = fornav(cols, rows, area_def, cloud, rows_per_scan=rows_per_scan)
except:
continue
try:
num_valid_points, gridded_cloudview = fornav(cols, rows, area_def, viewcloud, rows_per_scan=rows_per_scan)
except:
continue
try:
num_valid_points, gridded_watermask = fornav(cols, rows, area_def, watermask, rows_per_scan=rows_per_scan)
except:
continue
gridded_cloud[gridded_cloudview>60.0]=np.nan
gridded_watermask[gridded_cloudview>60.0]=np.nan
#stack data
if orbitcount==1:
cloud_stack = gridded_cloud
cloudview_stack = gridded_cloudview
watermask_stack = gridded_watermask
else:
cloud_stack = np.dstack((cloud_stack,gridded_cloud))
cloudview_stack = np.dstack((cloudview_stack,gridded_cloudview))
watermask_stack = np.dstack((watermask_stack,gridded_watermask))
#==LST gridding===========================
mask = (data>65527.)
data[mask]=np.nan
view[mask]=np.nan
#=====check if data is in range========================================
rangeIndex = ((lat<-90.) | (lat > 90.) | (lon < -180.) | (lon > 180.))
lat[rangeIndex] = np.nan
lon[rangeIndex] = np.nan
data[rangeIndex] = np.nan
view[rangeIndex] = np.nan
if np.nansum(data)==0: # check if there is any data
continue
projection = '+proj=longlat +ellps=WGS84 +datum=WGS84'
area_id ='tile'
proj_id = 'latlon'
description = 'lat lon grid'
swath_def = geometry.SwathDefinition(lons=lon, lats=lat)
x_size = 3750
y_size = 3750
area_extent = (LLlon,LLlat,URlon,URlat)
area_def = utils.get_area_def(area_id, description, proj_id, projection,
x_size, y_size, area_extent)
swath_points_in_grid, cols, rows = ll2cr(swath_def, area_def, copy=False)
rows_per_scan = 32
try: # if there are no valid pixels in the region move on
num_valid_points, gridded_data = fornav(cols, rows, area_def, data, rows_per_scan=rows_per_scan)
except:
continue
try:
num_valid_points, gridded_view = fornav(cols, rows, area_def, view, rows_per_scan=rows_per_scan)
except:
continue
lst = gridded_data*0.00351+150.0
lst[gridded_view>60.0]=-9999.
#stack data
if orbitcount==1:
lst_stack = lst
view_stack = gridded_view
else:
lst_stack = np.dstack((lst_stack,lst))
view_stack = np.dstack((view_stack,gridded_view))
#=========CLOUD:doing angle clearing======================================
if orbitcount > 0:
fileProcessed+=1
if cloudview_stack.ndim == 2:
dims = [cloudview_stack.shape[0],cloudview_stack.shape[0],1]
else:
dims = cloudview_stack.shape
aa = np.reshape(cloudview_stack,[dims[0]*dims[1],dims[2]])
aa[np.isnan(aa)]=9999.
indcol = np.argmin(aa,axis=1)
indrow = range(0,len(indcol))
bb = np.reshape(cloud_stack,[dims[0]*dims[1],dims[2]])
cloud = bb[indrow,indcol]
cloud = np.reshape(cloud,[3750,3750])
#=========WATERMASK:doing angle clearing======================================
if watermask_stack.ndim == 2:
dims = [watermask_stack.shape[0],watermask_stack.shape[0],1]
else:
dims = watermask_stack.shape
aa = np.reshape(watermask_stack,[dims[0]*dims[1],dims[2]])
aa[np.isnan(aa)]=9999.
indcol = np.argmin(aa,axis=1)
indrow = range(0,len(indcol))
bb = np.reshape(watermask_stack,[dims[0]*dims[1],dims[2]])
watermask = bb[indrow,indcol]
watermask = np.reshape(watermask,[3750,3750])
#=========BT:doing angle and cloud clearing================================
aa = np.reshape(view_stack,[dims[0]*dims[1],dims[2]])
aa[np.isnan(aa)]=9999.
indcol = np.argmin(aa,axis=1)
indrow = range(0,len(indcol))
bb = np.reshape(lst_stack,[dims[0]*dims[1],dims[2]])
lst = bb[indrow,indcol]
lst = np.reshape(lst,[3750,3750])
lst = np.array(lst,dtype='Float32')
# out_bt_fn = os.path.join(tile_base_path,"bt.dat" )
out_bt_fn = os.path.join(tile_path,"merged_day_bt_%s_T%03d_%s.dat" % (date,tile,out_time))
lst[cloud>1]=-9999.
lst[(watermask==1) | (watermask==2)]=np.nan
lst.tofile(out_bt_fn)
convertBin2tif(out_bt_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
#=========VIEW:doing angle and cloud clearing================================
aa = np.reshape(view_stack,[dims[0]*dims[1],dims[2]])
aa[np.isnan(aa)]=9999.
indcol = np.argmin(aa,axis=1)
indrow = range(0,len(indcol))
bb = np.reshape(view_stack,[dims[0]*dims[1],dims[2]])
view = bb[indrow,indcol]
view = np.reshape(view,[3750,3750])
view = np.array(view,dtype='Float32')
# out_bt_fn = os.path.join(tile_base_path,"bt.dat" )
out_view_fn = os.path.join(tile_path,"merged_day_view_%s_T%03d_%s.dat" % (date,tile,out_time))
view[cloud>1]=-9999.
view[(watermask==1) | (watermask==2)]=np.nan
view.tofile(out_view_fn)
convertBin2tif(out_view_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
#=====================Night================================================
#==========================================================================
# files = db[(db['south']-5 <= latmid) & (db['north']+5 >= latmid) &
# (db['west']-5 <= lonmid) & (db['east']+5 >= lonmid) &
# (db['year'] == year) & (db['doy'] == doy) & (db['N_Day_Night_Flag'] == 'Night')]
# filenames = files['filename']
conn = sqlite3.connect( I5_db_name )
filenames = pd.read_sql_query("SELECT * from i5 WHERE (year = %d) AND "
"(doy = %03d) AND (south-5 <= %f) AND "
"(north+5 >= %f) AND (west-5 <= %f) "
"AND (east+5 >= %f) AND (N_Day_Night_Flag = 'Night')"
% (year,doy,latmid,latmid,lonmid,lonmid), conn).filename
conn.close()
orbits = []
for fn in filenames:
parts = fn.split(os.sep)[-1].split('_')
orbits.append(parts[5])
orbits = list(set(orbits))
orbitcount = 0
for orbit in orbits:
fns = [s for s in filenames if orbit in s.lower()]
count = 0
validFiles = []
for filename in fns:
folder = os.sep.join(filename.split(os.sep)[:-1])
parts = filename.split(os.sep)[-1].split('_')
common_fn = os.path.join("_".join((parts[1],parts[2],parts[3],parts[4],parts[5])))
search_files = glob.glob(os.path.join(folder,"*"+common_fn+"*"))
if len(search_files)==4:
validFiles.append(common_fn)
for common_fn in validFiles:
count+=1
search_file = os.path.join(folder,"*SVI05_"+common_fn+"*")
search_geofile = os.path.join(folder,"*GITCO_"+common_fn+"*")
search_cloudgeofile = os.path.join(folder,"*GMTCO_"+common_fn+"*")
datet = datetime.datetime(year,dd.month, dd.day,0,0,0)
if datet > datetime.datetime(2017,3,8,0,0,0):
cloud_prefix = "VICMO"
else:
cloud_prefix = "IICMO"
search_cloudfile = os.path.join(folder,"*%s_" % cloud_prefix+common_fn+"*")
filename = glob.glob(search_file)[0]
geofile = glob.glob(search_geofile)[0]
cloudfile = glob.glob(search_cloudfile)[0]
cloudgeofile = glob.glob(search_cloudgeofile)[0]
# load cloud data==========
f=h5py.File(cloudfile,'r')
g=h5py.File(cloudgeofile,'r')
if datet > datetime.datetime(2017,3,8,0,0,0):
data_array = f['/All_Data/VIIRS-CM-EDR_All/QF1_VIIRSCMEDR'][()]
else:
data_array = f['/All_Data/VIIRS-CM-IP_All/QF1_VIIRSCMIP'][()]
lat_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/Latitude'][()]
lon_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/Longitude'][()]
view_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/SatelliteZenithAngle'][()]
start=filename.find('_t')
out_time=filename[start+2:start+6]
if count ==1:
latcloud = np.array(lat_array,'float32')
loncloud=np.array(lon_array,'float32')
cloud=np.array(data_array,'float32')
viewcloud=np.array(view_array,'float32')
else:
latcloud = np.vstack((latcloud,np.array(lat_array,'float32')))
loncloud = np.vstack((loncloud,np.array(lon_array,'float32')))
cloud = np.vstack((cloud,np.array(data_array,'float32')))
viewcloud = np.vstack((viewcloud,np.array(view_array,'float32')))
# load water mask===========
f=h5py.File(cloudfile,'r')
g=h5py.File(cloudgeofile,'r')
if datet > datetime.datetime(2017,3,8,0,0,0):
data_array = f['/All_Data/VIIRS-CM-EDR_All/QF2_VIIRSCMEDR'][()]
else:
data_array = f['/All_Data/VIIRS-CM-IP_All/QF2_VIIRSCMIP'][()]
lat_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/Latitude'][()]
lon_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/Longitude'][()]
view_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/SatelliteZenithAngle'][()]
start=filename.find('_t')
out_time=filename[start+2:start+6]
if count ==1:
watermask=np.array(data_array,'float32')
else:
watermask = np.vstack((watermask,np.array(data_array,'float32')))
# Load BT data=============
f=h5py.File(filename,'r')
g=h5py.File(geofile,'r')
data_array = f['/All_Data/VIIRS-I5-SDR_All/BrightnessTemperature'][()]
lat_array = g['/All_Data/VIIRS-IMG-GEO-TC_All/Latitude'][()]
lon_array = g['/All_Data/VIIRS-IMG-GEO-TC_All/Longitude'][()]
view_array = g['/All_Data/VIIRS-IMG-GEO-TC_All/SatelliteZenithAngle'][()]
if count ==1:
lat=np.array(lat_array,'float32')
lon=np.array(lon_array,'float32')
data=np.array(data_array,'float32')
view=np.array(view_array,'float32')
else:
lat = np.vstack((lat,np.array(lat_array,'float32')))
lon = np.vstack((lon,np.array(lon_array,'float32')))
data = np.vstack((data,np.array(data_array,'float32')))
view = np.vstack((view,np.array(view_array,'float32')))
#====cloud gridding=====================
if len(validFiles) == 0:
continue
orbitcount+=1
cloudOrig=cloud.copy()
#get 2-3 bits
cloud=np.array(cloud,'uint8')
cloud = np.reshape(cloud,[cloud.size, 1])
b = np.unpackbits(cloud, axis=1)
cloud = np.sum(b[:,4:6],axis=1)
cloud = np.reshape(cloud,[cloudOrig.shape[0],cloudOrig.shape[1]])
cloud = np.array(cloud, dtype='float32')
mask = (cloudOrig==0.)
cloud[mask]=np.nan
viewcloud[mask]=np.nan
#====get water mask from bits===========
watermask=np.array(watermask,'uint8')
watermask = np.reshape(watermask,[watermask.size, 1])
b = np.unpackbits(watermask, axis=1)
watermask = np.sum(b[:,5:7],axis=1)
watermask = np.reshape(watermask,[cloudOrig.shape[0],cloudOrig.shape[1]])
watermask = np.array(watermask, dtype='float32')
#=====check if data is in range========================================
rangeIndex = ((latcloud<-90.) | (latcloud > 90.) | (loncloud < -180.) | (loncloud > 180.))
latcloud[rangeIndex] = np.nan
loncloud[rangeIndex] = np.nan
cloud[rangeIndex] = np.nan
viewcloud[rangeIndex] = np.nan
watermask[rangeIndex] = np.nan
if np.nansum(cloud)==0: # check if there is any data
continue
projection = '+proj=longlat +ellps=WGS84 +datum=WGS84'
area_id ='tile'
proj_id = 'latlon'
description = 'lat lon grid'
swath_def = geometry.SwathDefinition(lons=loncloud, lats=latcloud)
x_size = 3750
y_size = 3750
area_extent = (LLlon,LLlat,URlon,URlat)
area_def = utils.get_area_def(area_id, description, proj_id, projection,
x_size, y_size, area_extent)
swath_points_in_grid, cols, rows = ll2cr(swath_def, area_def, copy=False)
rows_per_scan = 16
try: # if there are no valid pixels in the region move on
num_valid_points, gridded_cloud = fornav(cols, rows, area_def, cloud, rows_per_scan=rows_per_scan)
except:
continue
try:
num_valid_points, gridded_cloudview = fornav(cols, rows, area_def, viewcloud, rows_per_scan=rows_per_scan)
except:
continue
try:
num_valid_points, gridded_watermask = fornav(cols, rows, area_def, watermask, rows_per_scan=rows_per_scan)
except:
continue
gridded_cloud[gridded_cloudview>60.0]=np.nan
gridded_watermask[gridded_cloudview>60.0]=np.nan
#stack data
if orbitcount==1:
cloud_stack = gridded_cloud
cloudview_stack = gridded_cloudview
watermask_stack = gridded_watermask
else:
cloud_stack = np.dstack((cloud_stack,gridded_cloud))
cloudview_stack = np.dstack((cloudview_stack,gridded_cloudview))
watermask_stack = np.dstack((watermask_stack,gridded_watermask))
#==LST gridding===========================
mask = (data>65527.)
data[mask]=np.nan
view[mask]=np.nan
#=====check if data is in range========================================
rangeIndex = ((lat<-90.) | (lat > 90.) | (lon < -180.) | (lon > 180.))
lat[rangeIndex] = np.nan
lon[rangeIndex] = np.nan
data[rangeIndex] = np.nan
view[rangeIndex] = np.nan
if np.nansum(data)==0: # check if there is any data
continue
projection = '+proj=longlat +ellps=WGS84 +datum=WGS84'
area_id ='tile'
proj_id = 'latlon'
description = 'lat lon grid'
swath_def = geometry.SwathDefinition(lons=lon, lats=lat)
x_size = 3750
y_size = 3750
area_extent = (LLlon,LLlat,URlon,URlat)
area_def = utils.get_area_def(area_id, description, proj_id, projection,
x_size, y_size, area_extent)
swath_points_in_grid, cols, rows = ll2cr(swath_def, area_def, copy=False)
rows_per_scan = 32
try: # if there are no valid pixels in the region move on
num_valid_points, gridded_data = fornav(cols, rows, area_def, data, rows_per_scan=rows_per_scan)
except:
continue
try:
num_valid_points, gridded_view = fornav(cols, rows, area_def, view, rows_per_scan=rows_per_scan)
except:
continue
lst = gridded_data*0.00351+150.0
lst[gridded_view>60.0]=-9999.
#stack data
if orbitcount==1:
lst_stack = lst
view_stack = gridded_view
else:
lst_stack = np.dstack((lst_stack,lst))
view_stack = np.dstack((view_stack,gridded_view))
#=========CLOUD:doing angle clearing======================================
if orbitcount > 0:
fileProcessed+=1
if cloudview_stack.ndim == 2:
dims = [cloudview_stack.shape[0],cloudview_stack.shape[0],1]
else:
dims = cloudview_stack.shape
aa = np.reshape(cloudview_stack,[dims[0]*dims[1],dims[2]])
aa[np.isnan(aa)]=9999.
indcol = np.argmin(aa,axis=1)
indrow = range(0,len(indcol))
bb = np.reshape(cloud_stack,[dims[0]*dims[1],dims[2]])
cloud = bb[indrow,indcol]
cloud = np.reshape(cloud,[3750,3750])
#=========WATERMASK:doing angle clearing======================================
if watermask_stack.ndim == 2:
dims = [watermask_stack.shape[0],watermask_stack.shape[0],1]
else:
dims = watermask_stack.shape
aa = np.reshape(watermask_stack,[dims[0]*dims[1],dims[2]])
aa[np.isnan(aa)]=9999.
indcol = np.argmin(aa,axis=1)
indrow = range(0,len(indcol))
bb = np.reshape(watermask_stack,[dims[0]*dims[1],dims[2]])
watermask = bb[indrow,indcol]
watermask = np.reshape(watermask,[3750,3750])
#=========BT:doing angle and cloud clearing================================
aa = np.reshape(view_stack,[dims[0]*dims[1],dims[2]])
aa[np.isnan(aa)]=9999.
indcol = np.argmin(aa,axis=1)
indrow = range(0,len(indcol))
bb = np.reshape(lst_stack,[dims[0]*dims[1],dims[2]])
lst = bb[indrow,indcol]
lst = np.reshape(lst,[3750,3750])
lst = np.array(lst,dtype='Float32')
# out_bt_fn = os.path.join(tile_base_path,"bt.dat" )
out_bt_fn = os.path.join(tile_path,"merged_night_bt_%s_T%03d_%s.dat" % (date,tile,out_time))
lst[cloud>1]=-9999.
lst[(watermask==1) | (watermask==2)]=-9999.
lst.tofile(out_bt_fn)
convertBin2tif(out_bt_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
#=========VIEW:doing angle and cloud clearing================================
aa = np.reshape(view_stack,[dims[0]*dims[1],dims[2]])
aa[np.isnan(aa)]=9999.
indcol = np.argmin(aa,axis=1)
indrow = range(0,len(indcol))
bb = np.reshape(view_stack,[dims[0]*dims[1],dims[2]])
view = bb[indrow,indcol]
view = np.reshape(view,[3750,3750])
view = np.array(view,dtype='Float32')
# out_bt_fn = os.path.join(tile_base_path,"bt.dat" )
out_view_fn = os.path.join(tile_path,"merged_night_view_%s_T%03d_%s.dat" % (date,tile,out_time))
view[cloud>1]=np.nan
view[(watermask==1) | (watermask==2)]=-9999.
view.tofile(out_view_fn)
convertBin2tif(out_view_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
return (fileProcessed==2)
def gridMergePython(tile,year,doy):
tile_path = os.path.join(tile_base_path,"T%03d" % tile)
if not os.path.exists(tile_path):
os.makedirs(tile_path)
dd = datetime.datetime(year, 1, 1) + datetime.timedelta(doy - 1)
date = "%d%03d" % (year,doy)
LLlat,LLlon = tile2latlon(tile)
URlat = LLlat+15.
URlon = LLlon+15.
inUL = [LLlon,URlat]
ALEXIshape = [3750,3750]
ALEXIres = [0.004,0.004]
latmid = LLlat+7.5
lonmid = LLlon+7.5
# db = pd.read_csv(os.path.join(data_path,'I5_database.csv'))
dirpath = os.path.join(data_path,"%d" % year, "%02d" % dd.month)
db = pd.read_csv(os.path.join(dirpath,'I5_database.csv'))
db = pd.DataFrame.drop_duplicates(db)
#=====================Day==================================================
#==========================================================================
files = db[(db['south']-5 <= latmid) & (db['north']+5 >= latmid) &
(db['west']-5 <= lonmid) & (db['east']+5 >= lonmid) &
(db['year'] == year) & (db['doy'] == doy) & (db['N_Day_Night_Flag'] == 'Day')]
filenames = files['filename']
mergedata =np.array([])
mergelat = np.array([])
mergelon = np.array([])
mergeview = np.array([])
mergecloudlat = np.array([])
mergecloudlon = np.array([])
mergecloud = np.array([])
for i in range(len(filenames)):
filename = filenames.iloc[i]
folder = os.sep.join(filename.split(os.sep)[:-1])
parts = filename.split(os.sep)[-1].split('_')
search_geofile = os.path.join(folder,"*"+"_".join(("GITCO",parts[1],parts[2],parts[3],parts[4])))
geofile = glob.glob(search_geofile+'*')[0]
search_geofile = os.path.join(folder,"*"+"_".join(("GMTCO",parts[1],parts[2],parts[3],parts[4])))
datet = datetime.datetime(year,dd.month, dd.day,0,0,0)
if datet > datetime.datetime(2017,3,8,0,0,0):
search_cloudfile = os.path.join(folder,"*"+"_".join(("VICMO",parts[1],parts[2],parts[3],parts[4])))
else:
search_cloudfile = os.path.join(folder,"*"+"_".join(("IICMO",parts[1],parts[2],parts[3],parts[4])))
cloudgeofile = glob.glob(search_geofile+'*')[0]
cloudfile = glob.glob(search_cloudfile+'*')[0]
f=h5py.File(cloudfile,'r')
g=h5py.File(cloudgeofile,'r')
data_array = f['/All_Data/VIIRS-CM-IP_All/QF1_VIIRSCMIP'][()]
lat_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/Latitude'][()]
lon_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/Longitude'][()]
view_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/SatelliteZenithAngle'][()]
latcloud=np.array(lat_array,'float32')
loncloud=np.array(lon_array,'float32')
cloud=np.array(data_array,'uint8')
viewcloud=np.array(view_array,'float32')
start=filename.find('_t')
out_time=filename[start+2:start+6]
f=h5py.File(filename,'r')
g=h5py.File(geofile,'r')
data_array = f['/All_Data/VIIRS-I5-SDR_All/BrightnessTemperature'][()]
lat_array = g['/All_Data/VIIRS-IMG-GEO-TC_All/Latitude'][()]
lon_array = g['/All_Data/VIIRS-IMG-GEO-TC_All/Longitude'][()]
view_array = g['/All_Data/VIIRS-IMG-GEO-TC_All/SatelliteZenithAngle'][()]
lat=np.array(lat_array,'float32')
lon=np.array(lon_array,'float32')
data=np.array(data_array,'float32')
view=np.array(view_array,'float32')
vals = data[np.where((lat>LLlat) & (lat <=URlat) & (lon>LLlon) & (lon<=URlon)
& (abs(view)<60.0))]
lats = lat[np.where((lat>LLlat) & (lat <=URlat) & (lon>LLlon) & (lon<=URlon)
& (abs(view)<60.0))]
lons = lon[np.where((lat>LLlat) & (lat <=URlat) & (lon>LLlon) & (lon<=URlon)
& (abs(view)<60.0))]
views = view[np.where((lat>LLlat) & (lat <=URlat) & (lon>LLlon) & (lon<=URlon)
& (abs(view)<60.0))]
cloudlats = latcloud[np.where((latcloud>LLlat) & (latcloud <=URlat) & (loncloud>LLlon) & (loncloud<=URlon)
& (abs(viewcloud)<60.0))]
cloudlons = loncloud[np.where((latcloud>LLlat) & (latcloud <=URlat) & (loncloud>LLlon) & (loncloud<=URlon)
& (abs(viewcloud)<60.0))]
clouds = cloud[np.where((latcloud>LLlat) & (latcloud <=URlat) & (loncloud>LLlon) & (loncloud<=URlon)
& (abs(viewcloud)<60.0))]
mergedata = np.append(mergedata,vals)
mergelat = np.append(mergelat,lats)
mergelon = np.append(mergelon,lons)
mergeview = np.append(mergeview,views)
mergecloudlat = np.append(mergecloudlat,cloudlats)
mergecloudlon = np.append(mergecloudlon,cloudlons)
mergecloud = np.append(mergecloud,clouds)
res=0
if mergelat.any():
mergecloudOrig=mergecloud.copy()
#get 2-3 bits
mergecloud=np.array(mergecloud,'uint8')
mergecloud = np.reshape(mergecloud,[mergecloud.size, 1])
b = np.unpackbits(mergecloud, axis=1)
mergecloud = np.sum(b[:,4:6],axis=1)
mergelat = ma.array(mergelat, mask = (mergedata>65527.),copy=False)
mergelon = ma.array(mergelon, mask = (mergedata>65527.),copy=False)
mergedata = ma.array(mergedata, mask = (mergedata>65527.),copy=False)
mergecloudlat = ma.array(mergecloudlat, mask = (mergecloudOrig==0.),copy=False)
mergecloudlon = ma.array(mergecloudlon, mask = (mergecloudOrig==0.),copy=False)
mergecloud = ma.array(mergecloud, mask = (mergecloudOrig==0.),copy=False)
projection = '+proj=longlat +ellps=WGS84 +datum=WGS84'
area_id ='tile'
proj_id = 'latlon'
description = 'lat lon grid'
x_size = 3750
y_size = 3750
area_extent = (LLlon,LLlat,URlon,URlat)
area_def = utils.get_area_def(area_id, description, proj_id, projection,
x_size, y_size, area_extent)
swath_def = geometry.SwathDefinition(lons=mergecloudlon, lats=mergecloudlat)
# swath_con = image.ImageContainerNearest(mergecloud, swath_def, radius_of_influence=5000)
# area_con = swath_con.resample(area_def)
# cloud = area_con.image_data
cloud = kd_tree.resample_nearest(swath_def, mergecloud.ravel(),area_def, radius_of_influence=500, fill_value=None)
inProjection = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
outfn = os.path.join(tile_path,"cloud_day.tif")
writeArray2Tiff(cloud,ALEXIres,inUL,inProjection,outfn,gdal.GDT_Float32)
swath_def = geometry.SwathDefinition(lons=mergelon, lats=mergelat)
# swath_con = image.ImageContainerNearest(mergedata, swath_def, radius_of_influence=5000)
# area_con = swath_con.resample(area_def)
# lst = area_con.image_data*0.00351+150.0
gridded = kd_tree.resample_nearest(swath_def, mergedata.ravel(),area_def, radius_of_influence=500, fill_value=None)
lst = gridded*0.00351+150.0
lst[lst==150]=-9999.
lst[cloud>1]=-9999.
lst=np.array(lst,'float32')
out_bt_fn = os.path.join(tile_path,"merged_day_bt_%s_T%03d_%s.dat" % (date,tile,out_time))
lst.tofile(out_bt_fn)
convertBin2tif(out_bt_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
# swath_con = image.ImageContainerNearest(mergeview, swath_def, radius_of_influence=5000)
# area_con = swath_con.resample(area_def)
# view = area_con.image_data
view = kd_tree.resample_nearest(swath_def, mergeview.ravel(),area_def, radius_of_influence=500, fill_value=None)
out_view_fn =os.path.join(tile_path,"merged_day_view_%s_T%03d_%s.dat" % (date,tile,out_time))
view[view==0]=-9999.
view[cloud>1]=-9999.
view=np.array(view,'float32')
view.tofile(out_view_fn)
else:
res+=1
#================Night=====================================================
#==========================================================================
files = db[(db['south']-5 <= latmid) & (db['north']+5 >= latmid) &
(db['west']-5 <= lonmid) & (db['east']+5 >= lonmid) &
(db['year'] == year) & (db['doy'] == doy) & (db['N_Day_Night_Flag'] == 'Night')]
filenames = files['filename']
mergedata =np.array([])
mergelat = np.array([])
mergelon = np.array([])
mergeview = np.array([])
mergecloudlat = np.array([])
mergecloudlon = np.array([])
mergecloud = np.array([])
for i in range(len(filenames)):
filename = filenames.iloc[i]
folder = os.sep.join(filename.split(os.sep)[:-1])
parts = filename.split(os.sep)[-1].split('_')
search_geofile = os.path.join(folder,"*"+"_".join(("GITCO",parts[1],parts[2],parts[3],parts[4])))
geofile = glob.glob(search_geofile+'*')[0]
search_geofile = os.path.join(folder,"*"+"_".join(("GMTCO",parts[1],parts[2],parts[3],parts[4])))
datet = datetime.datetime(year,dd.month, dd.day,0,0,0)
if datet > datetime.datetime(2017,3,8,0,0,0):
search_cloudfile = os.path.join(folder,"*"+"_".join(("VICMO",parts[1],parts[2],parts[3],parts[4])))
else:
search_cloudfile = os.path.join(folder,"*"+"_".join(("IICMO",parts[1],parts[2],parts[3],parts[4])))
cloudgeofile = glob.glob(search_geofile+'*')[0]
cloudfile = glob.glob(search_cloudfile+'*')[0]
f=h5py.File(cloudfile,'r')
g=h5py.File(cloudgeofile,'r')
data_array = f['/All_Data/VIIRS-CM-IP_All/QF1_VIIRSCMIP'][()]
lat_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/Latitude'][()]
lon_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/Longitude'][()]
view_array = g['/All_Data/VIIRS-MOD-GEO-TC_All/SatelliteZenithAngle'][()]
latcloud=np.array(lat_array,'float32')
loncloud=np.array(lon_array,'float32')
cloud=np.array(data_array,'uint8')
viewcloud=np.array(view_array,'float32')
start=filename.find('_t')
out_time=filename[start+2:start+6]
f=h5py.File(filename,'r')
g=h5py.File(geofile,'r')
data_array = f['/All_Data/VIIRS-I5-SDR_All/BrightnessTemperature'][()]
lat_array = g['/All_Data/VIIRS-IMG-GEO-TC_All/Latitude'][()]
lon_array = g['/All_Data/VIIRS-IMG-GEO-TC_All/Longitude'][()]
view_array = g['/All_Data/VIIRS-IMG-GEO-TC_All/SatelliteZenithAngle'][()]
lat=np.array(lat_array,'float32')
lon=np.array(lon_array,'float32')
data=np.array(data_array,'float32')
view=np.array(view_array,'float32')
vals = data[np.where((lat>LLlat) & (lat <=URlat) & (lon>LLlon) & (lon<=URlon)
& (abs(view)<60.0))]
lats = lat[np.where((lat>LLlat) & (lat <=URlat) & (lon>LLlon) & (lon<=URlon)
& (abs(view)<60.0))]
lons = lon[np.where((lat>LLlat) & (lat <=URlat) & (lon>LLlon) & (lon<=URlon)
& (abs(view)<60.0))]
views = view[np.where((lat>LLlat) & (lat <=URlat) & (lon>LLlon) & (lon<=URlon)
& (abs(view)<60.0))]
cloudlats = latcloud[np.where((latcloud>LLlat) & (latcloud <=URlat) & (loncloud>LLlon) & (loncloud<=URlon)
& (abs(viewcloud)<60.0))]
cloudlons = loncloud[np.where((latcloud>LLlat) & (latcloud <=URlat) & (loncloud>LLlon) & (loncloud<=URlon)
& (abs(viewcloud)<60.0))]
clouds = cloud[np.where((latcloud>LLlat) & (latcloud <=URlat) & (loncloud>LLlon) & (loncloud<=URlon)
& (abs(viewcloud)<60.0))]
mergedata = np.append(mergedata,vals)
mergelat = np.append(mergelat,lats)
mergelon = np.append(mergelon,lons)
mergeview = np.append(mergeview,views)
mergecloudlat = np.append(mergecloudlat,cloudlats)
mergecloudlon = np.append(mergecloudlon,cloudlons)
mergecloud = np.append(mergecloud,clouds)
if mergelat.any():
mergecloudOrig=mergecloud.copy()
#get 2-3 bits
mergecloud=np.array(mergecloud,'uint8')
mergecloud = np.reshape(mergecloud,[mergecloud.size, 1])
b = np.unpackbits(mergecloud, axis=1)
mergecloud = np.sum(b[:,4:6],axis=1)
mergelat = ma.array(mergelat, mask = (mergedata>65527.),copy=False)
mergelon = ma.array(mergelon, mask = (mergedata>65527.),copy=False)
mergedata = ma.array(mergedata, mask = (mergedata>65527.),copy=False)
mergecloudlat = ma.array(mergecloudlat, mask = (mergecloudOrig==0.),copy=False)
mergecloudlon = ma.array(mergecloudlon, mask = (mergecloudOrig==0.),copy=False)
mergecloud = ma.array(mergecloud, mask = (mergecloudOrig==0.),copy=False)
projection = '+proj=longlat +ellps=WGS84 +datum=WGS84'
area_id ='tile'
proj_id = 'latlon'
description = 'lat lon grid'
x_size = 3750
y_size = 3750
area_extent = (LLlon,LLlat,URlon,URlat)
area_def = utils.get_area_def(area_id, description, proj_id, projection,
x_size, y_size, area_extent)
swath_def = geometry.SwathDefinition(lons=mergecloudlon, lats=mergecloudlat)
# swath_con = image.ImageContainerNearest(mergecloud, swath_def, radius_of_influence=5000)
# area_con = swath_con.resample(area_def)
# cloud = area_con.image_data
cloud = kd_tree.resample_nearest(swath_def, mergecloud.ravel(),area_def, radius_of_influence=500, fill_value=None)
swath_def = geometry.SwathDefinition(lons=mergelon, lats=mergelat)
# swath_con = image.ImageContainerNearest(mergedata, swath_def, radius_of_influence=5000)
# area_con = swath_con.resample(area_def)
# lst = area_con.image_data*0.00351+150.0
gridded = kd_tree.resample_nearest(swath_def, mergedata.ravel(),area_def, radius_of_influence=500, fill_value=None)
lst = gridded*0.00351+150.0
lst[lst==150]=-9999.
lst[cloud>1]=-9999.
lst=np.array(lst,'float32')
out_bt_fn = os.path.join(tile_path,"merged_night_bt_%s_T%03d_%s.dat" % (date,tile,out_time))
lst.tofile(out_bt_fn)
convertBin2tif(out_bt_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
swath_def = geometry.SwathDefinition(lons=mergelon, lats=mergelat)
# swath_con = image.ImageContainerNearest(mergeview, swath_def, radius_of_influence=5000)
# area_con = swath_con.resample(area_def)
# view = area_con.image_data
view = kd_tree.resample_nearest(swath_def, mergeview.ravel(),area_def, radius_of_influence=500, fill_value=None)
out_view_fn =os.path.join(tile_path,"merged_night_view_%s_T%03d_%s.dat" % (date,tile,out_time))
view[view==0]=-9999.
view[cloud>1]=-9999.
view=np.array(view,'float32')
view.tofile(out_view_fn)
else:
res+=1
return res
def atmosCorrection(tile,year,doy):
LLlat,LLlon = tile2latlon(tile)
URlat = LLlat+15.
inUL = [LLlon,URlat]
ALEXIshape = [3750,3750]
ALEXIres = [0.004,0.004]
day_minus_coeff = [0.0504,1.384,2.415,3.586,4.475,4.455]
day_minus_b=[-0.023,0.003,0.088,0.221,0.397,0.606]
#====get week date=====
nweek=(doy-1)/7
cday=nweek*7
offset=(doy-cday)/7
rday=((offset+nweek)*7)+1
avgddd=2014*1000+rday
date = "%d%03d" % (year,doy)
#=========================
offset = "calc_offset_correction"
run_correction = "run_correction"
overpass_corr_cache = os.path.join(static_path,"OVERPASS_OFFSET_CORRECTION")
ztime_fn = os.path.join(overpass_corr_path,"CURRENT_DAY_ZTIME_T%03d.dat" % tile)
gunzip(os.path.join(overpass_corr_cache,"DAY_ZTIME_T%03d.dat.gz" % tile),
out_fn=ztime_fn)
ztime= np.fromfile(ztime_fn, dtype=np.float32)
dztime= ztime.reshape([3750,3750])
convertBin2tif(ztime_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
dtrad_cache = os.path.join(static_path,"dtrad_avg")
dtrad_fn =os.path.join(overpass_corr_path,"CURRENT_DTRAD_AVG_T%03d.dat" % tile)
gunzip(os.path.join(dtrad_cache,"DTRAD_T%03d_%d.dat.gz" % (tile,avgddd)),
out_fn=dtrad_fn)
dtrad= np.fromfile(dtrad_fn, dtype=np.float32)
dtrad= np.flipud(dtrad.reshape([3750,3750]))
# dtrad.tofile(dtrad_fn)
convertBin2tif(dtrad_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
tile_path = os.path.join(tile_base_path,"T%03d" % tile)
tile_lut_path = os.path.join(CFSR_path,"viirs_tile_lookup_tables")
#=========================Day==============================================
#==========================================================================
out_bt_fn = glob.glob(os.path.join(tile_path,"merged_day_bt_%s_T%03d*.dat" % (date,tile)))[0]
out_view_fn1 = glob.glob(os.path.join(tile_path,"merged_day_view_%s_T%03d*.dat" % (date,tile)))[0]
time_str = out_bt_fn.split(os.sep)[-1].split("_")[5].split(".")[0]
grab_time = getGrabTime(int(time_str))
#===========use forecast hour==============================================
if (grab_time)==2400:
time = 0000
else:
time = grab_time
hr,forcastHR,cfsr_doy = getGrabTimeInv(grab_time/100,doy)
cfsr_date = "%d%03d" % (year,cfsr_doy)
cfsr_tile_path = os.path.join(CFSR_path,"%d" % year,"%03d" % cfsr_doy)
#==================io filenames============================================
tprof = os.path.join(cfsr_tile_path,"temp_profile_%s_%04d.dat" % (cfsr_date,time))
qprof = os.path.join(cfsr_tile_path,"spfh_profile_%s_%04d.dat" % (cfsr_date,time))
tsfcfile = os.path.join(cfsr_tile_path,"sfc_temp_%s_%04d.dat" % (cfsr_date,time))
presfile = os.path.join(cfsr_tile_path,"sfc_pres_%s_%04d.dat" % (cfsr_date,time))
qsfcfile = os.path.join(cfsr_tile_path,"sfc_spfh_%s_%04d.dat" % (cfsr_date,time))
icoordpath = os.path.join(tile_lut_path,"CFSR_T%03d_lookup_icoord.dat" % tile)
jcoordpath = os.path.join(tile_lut_path,"CFSR_T%03d_lookup_jcoord.dat" % tile)
raw_trad_fn = os.path.join(overpass_corr_path,"RAW_TRAD1_T%03d.dat" % tile)
trad_fn = os.path.join(overpass_corr_path,"TRAD1_T%03d.dat" % tile)
out_view_fn = os.path.join(overpass_corr_path,"VIEW_ANGLE_T%03d.dat" % tile)
#==============preparing data==============================================
shutil.copyfile(out_bt_fn,raw_trad_fn)
shutil.copyfile(out_view_fn1,out_view_fn)
day_lst = np.fromfile(raw_trad_fn, dtype=np.float32)
day_lst= day_lst.reshape([3750,3750])
view = np.fromfile(out_view_fn1, dtype=np.float32)
view = np.flipud(view.reshape([3750,3750]))
view.tofile(out_view_fn)
# bt.tofile(raw_trad_fn)
# view = np.fromfile(out_view_fn, dtype=np.float32)
# view= np.flipud(view.reshape([3750,3750]))
# view.tofile(out_view_fn)
#=====get the offset for day FORTRAN VERSION===============================
# subprocess.check_output(["%s" % offset, "%d" % year, "%03d" % doy, "%s" % time_str,
# "T%03d" % tile, "%s" % ztime_fn, "%s" % raw_trad_fn,
# "%s" % dtrad_fn, "%s" % trad_fn])
###=====python version=====================================================
ctime = grab_time/100.
tdiff_day=abs(ctime-dztime)
tindex1=np.array(tdiff_day, dtype=int)
tindex2=tindex1+1
tindex1[np.where((day_lst==-9999.) | (dtrad==-9999.))]=0
tindex2[np.where((day_lst==-9999.) | (dtrad==-9999.))]=0
w2=(tdiff_day-tindex1)
w1=(1.0-w2)
c1 = np.empty([3750,3750])
c2 = np.empty([3750,3750])
day_corr = np.empty([3750,3750])
for i in range(len(day_minus_coeff)-1):
c1[np.where(tindex1==i)]=day_minus_coeff[i]+(day_minus_b[i]*dtrad[np.where(tindex1==i)])
c2[np.where(tindex2==i+1)]=day_minus_coeff[i+1]+(day_minus_b[i+1]*dtrad[np.where(tindex2==i+1)])
day_corr[np.where(tindex1==i)] = day_lst[np.where(tindex1==i)]+(c1[np.where(tindex1==i)]*w1[np.where(tindex1==i)]+c2[np.where(tindex1==i)]*w2[np.where(tindex1==i)])
day_corr[np.where(dtrad==-9999.)]=-9999.
day_corr = np.array(np.flipud(day_corr),dtype='Float32')
day_corr.tofile(trad_fn)
convertBin2tif(trad_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
#=======run atmospheric correction=========================================
outfn = os.path.join(tile_path,"FINAL_DAY_LST_%s_T%03d.dat" % (date,tile))
out = subprocess.check_output(["%s" % run_correction,"%s" % tprof,
"%s" % qprof,"%s" % tsfcfile,
"%s" % presfile, "%s" % qsfcfile,
"%s" % icoordpath, "%s" % jcoordpath,
"%s" % trad_fn,"%s" % out_view_fn, "%s" % outfn])
# print out
convertBin2tif(outfn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
#======Night===============================================================
#==========================================================================
out_bt_fn = glob.glob(os.path.join(tile_path,"merged_night_bt_%s_T%03d*.dat" % (date,tile)))[0]
out_view_fn1 = glob.glob(os.path.join(tile_path,"merged_night_view_%s_T%03d*.dat" % (date,tile)))[0]
time_str = out_bt_fn.split(os.sep)[-1].split("_")[5].split(".")[0]
grab_time = getGrabTime(int(time_str))
# use forecast hour
if (grab_time)==2400:
time = 0
else:
time = grab_time
hr,forcastHR,cfsr_doy = getGrabTimeInv(grab_time/100,doy)
cfsr_date = "%d%03d" % (year,cfsr_doy)
cfsr_tile_path = os.path.join(CFSR_path,"%d" % year,"%03d" % cfsr_doy)
#======io filenames========================================================
tprof = os.path.join(cfsr_tile_path,"temp_profile_%s_%04d.dat" % (cfsr_date,time))
qprof = os.path.join(cfsr_tile_path,"spfh_profile_%s_%04d.dat" % (cfsr_date,time))
tsfcfile = os.path.join(cfsr_tile_path,"sfc_temp_%s_%04d.dat" % (cfsr_date,time))
presfile = os.path.join(cfsr_tile_path,"sfc_pres_%s_%04d.dat" % (cfsr_date,time))
qsfcfile = os.path.join(cfsr_tile_path,"sfc_spfh_%s_%04d.dat" % (cfsr_date,time))
icoordpath = os.path.join(tile_lut_path,"CFSR_T%03d_lookup_icoord.dat" % tile)
jcoordpath = os.path.join(tile_lut_path,"CFSR_T%03d_lookup_jcoord.dat" % tile)
trad_fn = os.path.join(overpass_corr_path,"TRAD1_T%03d.dat" % tile)
out_view_fn = os.path.join(overpass_corr_path,"VIEW_ANGLE_T%03d.dat" % tile)
#=======run atmospheric correction=========================================
# shutil.copyfile(out_bt_fn,trad_fn)
bt = np.fromfile(out_bt_fn, dtype=np.float32)
bt= np.flipud(bt.reshape([3750,3750]))
bt.tofile(trad_fn)
# shutil.copyfile(out_view_fn1,out_view_fn)
view = np.fromfile(out_view_fn1, dtype=np.float32)
view = np.flipud(view.reshape([3750,3750]))
view.tofile(out_view_fn)
outfn = os.path.join(tile_path,"lst_%s_T%03d_%s.dat" % (date,tile,time_str))
outfn = os.path.join(tile_path,"FINAL_NIGHT_LST_%s_T%03d.dat" % (date,tile))
out = subprocess.check_output(["%s" % run_correction,"%s" % tprof,
"%s" % qprof,"%s" % tsfcfile,
"%s" % presfile, "%s" % qsfcfile,
"%s" % icoordpath, "%s" % jcoordpath,
"%s" % trad_fn,"%s" % out_view_fn, "%s" % outfn])
convertBin2tif(outfn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
def atmosCorrectPython(tile,year,doy):
tile_path = os.path.join(tile_base_path,"T%03d" % tile)
LLlat,LLlon = tile2latlon(tile)
URlat = LLlat+15.
inUL = [LLlon,URlat]
ALEXIshape = [3750,3750]
ALEXIres = [0.004,0.004]
day_minus_coeff = [0.0504,1.384,2.415,3.586,4.475,4.455]
day_minus_b=[-0.023,0.003,0.088,0.221,0.397,0.606]
#====get week date=====
nweek=(doy-1)/7
cday=nweek*7
offset=(doy-cday)/7
rday=((offset+nweek)*7)+1
avgddd=2014*1000+rday
date = "%d%03d" % (year,doy)
#=========================
istart = abs(-89.875+URlat)*4
addArray = np.floor(np.array(range(3750))*0.004/0.25)
icor = istart+addArray
icormat = np.repeat(np.reshape(icor,[icor.size,1]),3750,axis=1)
icormat = icormat.T
icormat = np.array(icormat,dtype='int32')
icormat = np.reshape(icormat,[3750*3750,1])
jstart = (180+LLlon)*4
jcor = jstart+addArray
jcormat = np.repeat(np.reshape(jcor,[jcor.size,1]),3750,axis=1)
jcormat = np.array(jcormat,dtype='int32')
jcormat = np.reshape(jcormat,[3750*3750,1])
#=========================Day==============================================
#==========================================================================
out_bt_fn = glob.glob(os.path.join(tile_path,"merged_day_bt_%s_T%03d*.dat" % (date,tile)))
out_view_fn1 = glob.glob(os.path.join(tile_path,"merged_day_view_%s_T%03d*.dat" % (date,tile)))
out_bt_fn = out_bt_fn[0]
out_view_fn1 = out_view_fn1[0]
time_str = out_bt_fn.split(os.sep)[-1].split("_")[5].split(".")[0]
grab_time = getGrabTime(int(time_str))
#===========use forecast hour==============================================
if (grab_time)==2400:
time = 0000
else:
time = grab_time
hr,forcastHR,cfsr_doy = getGrabTimeInv(grab_time/100,doy)
cfsr_date = "%d%03d" % (year,cfsr_doy)
cfsr_tile_path = os.path.join(CFSR_path,"%d" % year,"%03d" % cfsr_doy)
temp_prof_fn = os.path.join(cfsr_tile_path,"temp_profile_%s_%04d.dat" % (cfsr_date,time))
spfh_prof_fn = os.path.join(cfsr_tile_path,"spfh_profile_%s_%04d.dat" % (cfsr_date,time))
sfc_temp_fn = os.path.join(cfsr_tile_path,"sfc_temp_%s_%04d.dat" % (cfsr_date,time))
sfc_pres_fn = os.path.join(cfsr_tile_path,"sfc_pres_%s_%04d.dat" % (cfsr_date,time))
sfc_spfh_fn = os.path.join(cfsr_tile_path,"sfc_spfh_%s_%04d.dat" % (cfsr_date,time))
overpass_corr_cache = os.path.join(static_path,"OVERPASS_OFFSET_CORRECTION")
ztime_fn = os.path.join(overpass_corr_path,"CURRENT_DAY_ZTIME_T%03d.dat" % tile)
gunzip(os.path.join(overpass_corr_cache,"DAY_ZTIME_T%03d.dat.gz" % tile),
out_fn=ztime_fn)
ztime= np.fromfile(ztime_fn, dtype=np.float32)
dztime= ztime.reshape([3750,3750])
dtrad_cache = os.path.join(static_path,"dtrad_avg")
dtrad_fn =os.path.join(overpass_corr_path,"CURRENT_DTRAD_AVG_T%03d.dat" % tile)
gunzip(os.path.join(dtrad_cache,"DTRAD_T%03d_%d.dat.gz" % (tile,avgddd)),
out_fn=dtrad_fn)
dtrad= np.fromfile(dtrad_fn, dtype=np.float32)
dtrad= np.flipud(dtrad.reshape([3750,3750]))
convertBin2tif(ztime_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
convertBin2tif(dtrad_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
#==============preparing data==============================================
day_lst = np.fromfile(out_bt_fn, dtype=np.float32)
day_lst= day_lst.reshape([3750,3750])
###=====python version=====================================================
ctime = int(time_str)/100.
tdiff_day=abs(ctime-dztime)
tindex1=np.array(tdiff_day, dtype=int)
tindex2=tindex1+1
tindex1[np.where((day_lst==-9999.) | (dtrad==-9999.))]=0
tindex2[np.where((day_lst==-9999.) | (dtrad==-9999.))]=0
w2=(tdiff_day-tindex1)
w1=(1.0-w2)
c1 = np.empty([3750,3750])
c2 = np.empty([3750,3750])
day_corr = np.empty([3750,3750])
for i in range(1,len(day_minus_coeff)-1):
c1[np.where(tindex1==i)]=day_minus_coeff[i]+(day_minus_b[i]*dtrad[np.where(tindex1==i)])
c2[np.where(tindex2==i+1)]=day_minus_coeff[i+1]+(day_minus_b[i+1]*dtrad[np.where(tindex2==i+1)])
day_corr[np.where(tindex1==i)] = day_lst[np.where(tindex1==i)]+(c1[np.where(tindex1==i)]*w1[np.where(tindex1==i)]+c2[np.where(tindex1==i)]*w2[np.where(tindex1==i)])
day_corr[np.where(tindex1<1)] = day_lst[np.where(tindex1<1)]+c2[np.where(tindex1<1)]*w2[np.where(tindex1<1)]
day_corr[np.where(dtrad==-9999.)]=-9999.
day_corr = np.array(np.flipud(day_corr),dtype='Float32')
inProjection = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
outfn = os.path.join(tile_path,"tindex1.tif")
writeArray2Tiff(tindex1,ALEXIres,inUL,inProjection,outfn,gdal.GDT_Float32)
#=======run atmospheric correction=========================================
view = np.fromfile(out_view_fn1, dtype=np.float32)
view = view.reshape([3750,3750])
spfh_prof = np.fromfile(spfh_prof_fn, dtype=np.float32)
spfh_prof = spfh_prof.reshape([21,720,1440])
temp_prof = np.fromfile(temp_prof_fn, dtype=np.float32)
temp_prof = temp_prof.reshape([21,720,1440])
temp_prof1 = np.empty([21,720,1440])
for i in range(21):
temp_prof1[i,:,:] = np.flipud(np.squeeze(temp_prof[i,:,:]))
spfh_prof1 = np.empty([21,720,1440])
for i in range(21):
spfh_prof1[i,:,:] = np.flipud(np.squeeze(spfh_prof[i,:,:]))
trad = day_corr
trad = day_lst
trad = np.reshape(trad,[3750*3750,1])
sfc_temp = np.fromfile(sfc_temp_fn, dtype=np.float32)
sfc_temp = np.flipud(sfc_temp.reshape([720,1440]))
sfc_pres = np.fromfile(sfc_pres_fn, dtype=np.float32)
sfc_pres = np.flipud(sfc_pres.reshape([720,1440]))
sfc_spfh = np.fromfile(sfc_spfh_fn, dtype=np.float32)
sfc_spfh = np.flipud(sfc_spfh.reshape([720,1440]))
sfc_temp = np.reshape(sfc_temp,[720*1440,1])
sfc_pres = np.reshape(sfc_pres,[720*1440,1])
sfc_spfh = np.reshape(sfc_spfh,[720*1440,1])
temp_prof = np.reshape(temp_prof1,[21,720*1440]).T
spfh_prof = np.reshape(spfh_prof1,[21,720*1440]).T
view1 = np.reshape(view,[3750*3750,1])
pres = np.array([1000,975,950,925,900,850,800,750,700,650,600,550,500,450,400,350,300,250,200,150,100])
ta=temp_prof/(1000/pres)**0.286
ei=spfh_prof*temp_prof/(.378*spfh_prof+.622)
anv=873.6
epsln=0.98
emis = np.empty([720*1440,21])
tau = np.empty([720*1440,21])
for i in range(20):
emis[:,i]=0.5*(planck(ta[:,i],anv)+planck(ta[:,i+1],anv))
tau[:,i]=(pres[i]-pres[i+1])*(dtaudp(anv,ta[:,i],emis[:,i],pres[i])+
dtaudp(anv,ta[:,i+1],emis[:,i+1],pres[i+1]) +4.*
dtaudp(anv,(ta[:,i]+ta[:,i+1])/2.,(emis[:,i]+emis[:,i+1])/2.,
(pres[i]-pres[i+1])/2.))/6
optd = np.sum(tau,axis=1)
cs=np.cos(np.deg2rad(view1)/np.deg2rad(57.29))
optd = np.array(optd,dtype=np.float32)
optd = np.reshape(optd,[720,1440])
optd = optd[icormat,jcormat]
a = -optd/cs
trans=np.exp(a)
#=========angular invariant sky================
cs=np.cos(np.deg2rad(0.)/np.deg2rad(57.29))
a = -tau[:,0]/cs
sky1 = np.empty([720*1440,21])
sky1[:,1]=emis[:,0]*(1.0-np.exp(a))
for i in range(20):
sky1[:,i]=emis[:,i-1]+(sky1[:,i-1]-emis[:,i-1])*np.exp(-tau[:,i-1]/cs)
sky1 = np.reshape(sky1,[21,720,1440])
sky1 = np.squeeze(sky1[:,icormat,jcormat]).T
sky1 = np.reshape(sky1,[3750*3750,21])
#====final results=============================
grndrad1=(planck(trad[:,0],anv)-sky1[:,20]*(1.0+trans[:,0]*(1.0-epsln)))
trad11=invplanck(grndrad1/epsln,anv)
trad11 = np.reshape(trad11,[3750,3750])
trad11[trad11<0]=-9999.
outfn = os.path.join(tile_path,"FINAL_DAY_LST_%s_T%03d.dat" % (date,tile))
trad11 = np.array(trad11, dtype=np.float32)
trad11.tofile(outfn)
#======Night===============================================================
#==========================================================================
out_bt_fn = glob.glob(os.path.join(tile_path,"merged_night_bt_%s_T%03d*.dat" % (date,tile)))
out_view_fn1 = glob.glob(os.path.join(tile_path,"merged_night_view_%s_T%03d*.dat" % (date,tile)))
out_bt_fn = out_bt_fn[0]
out_view_fn1 = out_view_fn1[0]
#=======run atmospheric correction=========================================
# shutil.copyfile(out_bt_fn,trad_fn)
bt = np.fromfile(out_bt_fn, dtype=np.float32)
trad = bt.reshape([3750,3750])
trad = np.reshape(trad,[3750*3750,1])
#=======run atmospheric correction=========================================
view = np.fromfile(out_view_fn1, dtype=np.float32)
view = view.reshape([3750,3750])
spfh_prof = np.fromfile(spfh_prof_fn, dtype=np.float32)
spfh_prof = spfh_prof.reshape([21,720,1440])
temp_prof = np.fromfile(temp_prof_fn, dtype=np.float32)
temp_prof = temp_prof.reshape([21,720,1440])
temp_prof1 = np.empty([21,720,1440])
for i in range(21):
temp_prof1[i,:,:] = np.flipud(np.squeeze(temp_prof[i,:,:]))
spfh_prof1 = np.empty([21,720,1440])
for i in range(21):
spfh_prof1[i,:,:] = np.flipud(np.squeeze(spfh_prof[i,:,:]))
sfc_temp = np.fromfile(sfc_temp_fn, dtype=np.float32)
sfc_temp = np.flipud(sfc_temp.reshape([720,1440]))
sfc_pres = np.fromfile(sfc_pres_fn, dtype=np.float32)
sfc_pres = np.flipud(sfc_pres.reshape([720,1440]))
sfc_spfh = np.fromfile(sfc_spfh_fn, dtype=np.float32)
sfc_spfh = np.flipud(sfc_spfh.reshape([720,1440]))
sfc_temp = np.reshape(sfc_temp,[720*1440,1])
sfc_pres = np.reshape(sfc_pres,[720*1440,1])
sfc_spfh = np.reshape(sfc_spfh,[720*1440,1])
temp_prof = np.reshape(temp_prof1,[21,720*1440]).T
spfh_prof = np.reshape(spfh_prof1,[21,720*1440]).T
view1 = np.reshape(view,[3750*3750,1])
pres = np.array([1000,975,950,925,900,850,800,750,700,650,600,550,500,450,400,350,300,250,200,150,100])
ta=temp_prof/(1000/pres)**0.286
ei=spfh_prof*temp_prof/(.378*spfh_prof+.622)
anv=873.6
epsln=0.98
emis = np.empty([720*1440,21])
tau = np.empty([720*1440,21])
for i in range(20):
emis[:,i]=0.5*(planck(ta[:,i],anv)+planck(ta[:,i+1],anv))
tau[:,i]=(pres[i]-pres[i+1])*(dtaudp(anv,ta[:,i],emis[:,i],pres[i])+
dtaudp(anv,ta[:,i+1],emis[:,i+1],pres[i+1]) +4.*
dtaudp(anv,(ta[:,i]+ta[:,i+1])/2.,(emis[:,i]+emis[:,i+1])/2.,
(pres[i]-pres[i+1])/2.))/6
optd = np.sum(tau,axis=1)
cs=np.cos(np.deg2rad(view1)/np.deg2rad(57.29))
optd = np.array(optd,dtype=np.float32)
optd = np.reshape(optd,[720,1440])
optd = optd[icormat,jcormat]
a = -optd/cs
trans=np.exp(a)
#=========angular invariant sky================
cs=np.cos(np.deg2rad(0.)/np.deg2rad(57.29))
a = -tau[:,0]/cs
sky1 = np.empty([720*1440,21])
sky1[:,1]=emis[:,0]*(1.0-np.exp(a))
for i in range(20):
sky1[:,i]=emis[:,i-1]+(sky1[:,i-1]-emis[:,i-1])*np.exp(-tau[:,i-1]/cs)
sky1 = np.reshape(sky1,[21,720,1440])
sky1 = np.squeeze(sky1[:,icormat,jcormat]).T
sky1 = np.reshape(sky1,[3750*3750,21])
#====final results=============================
grndrad1=(planck(trad[:,0],anv)-sky1[:,20]*(1.0+trans[:,0]*(1.0-epsln)))
trad11=invplanck(grndrad1/epsln,anv)
trad11 = np.reshape(trad11,[3750,3750])
trad11[trad11<0]=-9999.
outfn = os.path.join(tile_path,"FINAL_NIGHT_LST_%s_T%03d.dat" % (date,tile))
trad11 = np.array(trad11, dtype=np.float32)
trad11.tofile(outfn)
convertBin2tif(outfn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
def pred_dtrad(tile,year,doy):
LLlat,LLlon = tile2latlon(tile)
URlat = LLlat+15.
inUL = [LLlon,URlat]
ALEXIshape = [3750,3750]
ALEXIres = [0.004,0.004]
tile_path = os.path.join(tile_base_path,"T%03d" % tile)
final_dtrad_p250_fmax0 = 'final_dtrad_p250_fmax0'
final_dtrad_p250_fmax20 = 'final_dtrad_p250_fmax20'
final_dtrad_p500 = 'final_dtrad_p500'
final_dtrad_p750 = 'final_dtrad_p750'
final_dtrad_p1000 = 'final_dtrad_p1000'
final_dtrad_p2000 = 'final_dtrad_p2000'
merge = 'merge'
calc_predicted_trad2= 'calc_predicted_trad2'
#====create processing folder========
# dtrad_path = os.path.join(processing_path,'DTRAD_PREDICTION')
# if not os.path.exists(dtrad_path):
# os.makedirs(dtrad_path)
date = "%d%03d" % (year,doy)
dtimedates = np.array(range(1,366,7))
rday = dtimedates[dtimedates>=doy][0]
risedoy = rday
laidates = np.array(range(1,366,4))
rday = laidates[laidates>=doy][0]
laiddd="%d%03d" %(year,rday)
precip_fn = os.path.join(base,'STATIC','PRECIP','PRECIP_T%03d.dat' % tile)
fmax_fn = os.path.join(base,'STATIC','FMAX','FMAX_T%03d.dat' % tile)
terrain_fn = os.path.join(base,'STATIC','TERRAIN_SD','TERRAIN_T%03d.dat' % tile)
daylst_fn = os.path.join(base,'TILES','T%03d' % tile,'FINAL_DAY_LST_%s_T%03d.dat' % (date,tile))
nightlst_fn = os.path.join(base,'TILES','T%03d' % tile,'FINAL_NIGHT_LST_%s_T%03d.dat' % (date,tile))
# lai_fn = os.path.join(base,'STATIC','LAI','MLAI_%s_T%03d.dat' % (laiddd,tile))
lai_fn = os.path.join(base,'STATIC','LAI','MLAI_2015%03d_T%03d.dat' % (rday,tile))
dtime_fn = os.path.join(base,'STATIC','DTIME','DTIME_2014%03d_T%03d.dat' % (risedoy,tile))
# lst_day = np.fromfile(daylst_fn, dtype=np.float32)
# lst_day= np.flipud(lst_day.reshape([3750,3750]))
# lst_day.tofile(daylst_fn)
convertBin2tif(daylst_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
# lst_night = np.fromfile(nightlst_fn, dtype=np.float32)
# lst_night= np.flipud(lst_night.reshape([3750,3750]))
# lst_night.tofile(nightlst_fn)
convertBin2tif(nightlst_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
# precip = np.fromfile(precip_fn, dtype=np.float32)
# precip= np.flipud(precip.reshape([3750,3750]))
# plt.imshow(precip)
# precip.tofile(precip_fn)
convertBin2tif(precip_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
# fmax = np.fromfile(fmax_fn, dtype=np.float32)
# fmax= np.flipud(fmax.reshape([3750,3750]))
# plt.imshow(fmax, vmin=0, vmax=0.8)
# fmax.tofile(fmax_fn)
convertBin2tif(fmax_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
# terrain = np.fromfile(terrain_fn, dtype=np.float32)
# terrain= np.flipud(terrain.reshape([3750,3750]))
# plt.imshow(terrain)
# terrain.tofile(terrain_fn)
convertBin2tif(terrain_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
# lai = np.fromfile(lai_fn, dtype=np.float32)
# lai= np.flipud(lai.reshape([3750,3750]))
# plt.imshow(lai,vmin=0,vmax = 3)
# lai.tofile(lai_fn)
convertBin2tif(lai_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
# dtime = np.fromfile(dtime_fn, dtype=np.float32)
# dtime= np.flipud(dtime.reshape([3750,3750]))
# plt.imshow(dtime,vmin=3,vmax=4)
# dtime.tofile(dtime_fn)
convertBin2tif(dtime_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
fn1 = os.path.join(base,'PROCESSING','DTRAD_PREDICTION','comp1_T%03d.dat' % tile)
fn2 = os.path.join(base,'PROCESSING','DTRAD_PREDICTION','comp2_T%03d.dat' % tile)
fn3 = os.path.join(base,'PROCESSING','DTRAD_PREDICTION','comp3_T%03d.dat' % tile)
fn4 = os.path.join(base,'PROCESSING','DTRAD_PREDICTION','comp4_T%03d.dat' % tile)
fn5 = os.path.join(base,'PROCESSING','DTRAD_PREDICTION','comp5_T%03d.dat' % tile)
fn6 = os.path.join(base,'PROCESSING','DTRAD_PREDICTION','comp6_T%03d.dat' % tile)
subprocess.check_output(["%s" % final_dtrad_p250_fmax0,"%s" % precip_fn,
"%s" % fmax_fn, "%s" % terrain_fn, "%s" % daylst_fn,
"%s" % nightlst_fn, "%s" % lai_fn, "%s" % dtime_fn,
"%s" % fn1])
subprocess.check_output(["%s" % final_dtrad_p250_fmax20,"%s" % precip_fn,
"%s" % fmax_fn, "%s" % terrain_fn, "%s" % daylst_fn,
"%s" % nightlst_fn, "%s" % lai_fn, "%s" % dtime_fn,
"%s" % fn2])
subprocess.check_output(["%s" % final_dtrad_p500,"%s" % precip_fn,
"%s" % fmax_fn, "%s" % terrain_fn, "%s" % daylst_fn,
"%s" % nightlst_fn, "%s" % lai_fn, "%s" % dtime_fn,
"%s" % fn3])
subprocess.check_output(["%s" % final_dtrad_p750,"%s" % precip_fn,
"%s" % fmax_fn, "%s" % terrain_fn, "%s" % daylst_fn,
"%s" % nightlst_fn, "%s" % lai_fn, "%s" % dtime_fn,
"%s" % fn4])
subprocess.check_output(["%s" % final_dtrad_p1000,"%s" % precip_fn,
"%s" % fmax_fn, "%s" % terrain_fn, "%s" % daylst_fn,
"%s" % nightlst_fn, "%s" % lai_fn, "%s" % dtime_fn,
"%s" % fn5])
subprocess.check_output(["%s" % final_dtrad_p2000,"%s" % precip_fn,
"%s" % fmax_fn, "%s" % terrain_fn, "%s" % daylst_fn,
"%s" % nightlst_fn, "%s" % lai_fn, "%s" % dtime_fn,
"%s" % fn6])
dtrad_fn = os.path.join(tile_path,
"FINAL_DTRAD_%s_T%03d.dat" % ( date, tile))
subprocess.check_output(["%s" % merge,"%s" % fn1, "%s" % fn2,"%s" % fn3,
"%s" % fn4, "%s" % fn5, "%s" % fn6, "%s" % dtrad_fn])
lst_fn = os.path.join(tile_path,
"FINAL_DAY_LST_TIME2_%s_T%03d.dat" % ( date, tile))
subprocess.check_output(["%s" % calc_predicted_trad2,"%s" % nightlst_fn,
"%s" % daylst_fn, "%s" % lai_fn, "%s" % lst_fn ])
#================+TESTING==================================================
testing_path = os.path.join(tile_base_path,'DTRAD','%03d' % doy)
# if not os.path.exists(testing_path):
# os.makedirs(testing_path)
testing_fn = os.path.join(testing_path,'FINAL_DTRAD_%s_T%03d.dat' % (date,tile))
# dtime = np.fromfile(dtrad_fn, dtype=np.float32)
# dtime= np.flipud(dtime.reshape([3750,3750]))
# dtime.tofile(dtrad_fn)
shutil.copyfile(dtrad_fn,testing_fn)
convertBin2tif(testing_fn,inUL,ALEXIshape,ALEXIres,np.float32,gdal.GDT_Float32)
# dtime = np.fromfile(lst_fn, dtype=np.float32)
# dtime= np.flipud(dtime.reshape([3750,3750]))
# dtime.tofile(lst_fn)
testing_path = os.path.join(tile_base_path,'LST2','%03d' % doy)
# if not os.path.exists(testing_path):
# os.makedirs(testing_path)
testing_fn = os.path.join(testing_path,'FINAL_DAY_LST_TIME2_%s_T%03d.dat' % (date,tile))
shutil.copyfile(lst_fn,testing_fn)
convertBin2tif(testing_fn,inUL,ALEXIshape,ALEXIres,np.float32,gdal.GDT_Float32)
#====version 0.2 implemented on Dec. 6, 2017=======
def pred_dtradV2(tile,year,doy):
LLlat,LLlon = tile2latlon(tile)
URlat = LLlat+15.
inUL = [LLlon,URlat]
ALEXIshape = [3750,3750]
ALEXIres = [0.004,0.004]
#====create processing folder========
dtrad_path = os.path.join(static_path,'DTRAD_TREES')
date = "%d%03d" % (year,doy)
dtimedates = np.array(range(1,366,7))
rday = dtimedates[dtimedates>=doy][0]
risedoy = rday
laidates = np.array(range(1,366,4))
rday = laidates[laidates>=doy][0]
laiddd="%d%03d" %(year,rday)
#=====set up input dataframe===============================================
precip_fn = os.path.join(base,'STATIC','PRECIP','PRECIP_T%03d.tif' % tile)
fmax_fn = os.path.join(base,'STATIC','FMAX','FMAX_T%03d.tif' % tile)
sd_fn = os.path.join(base,'STATIC','TERRAIN_SD','TERRAIN_T%03d.tif' % tile)
terrain_fn = os.path.join(base,'STATIC','ELEVATION','ELEVATION_T%03d.tif' % tile)
daylst_fn = os.path.join(base,'TILES','T%03d' % tile,'FINAL_DAY_LST_%s_T%03d.dat' % (date,tile))
nightlst_fn = os.path.join(base,'TILES','T%03d' % tile,'FINAL_NIGHT_LST_%s_T%03d.dat' % (date,tile))
# lai_fn = os.path.join(base,'STATIC','LAI','MLAI_%s_T%03d.dat' % (laiddd,tile))
lai_fn = os.path.join(base,'STATIC','LAI','MLAI_2015%03d_T%03d.tif' % (rday,tile))
dtime_fn = os.path.join(base,'STATIC','DTIME','DTIME_2014%03d_T%03d.tif' % (risedoy,tile))
lc_crop_pert_fn = os.path.join(base,'STATIC','LC_PERT','LC_PERT_crop_T%03d.tif' % tile)
lc_grass_pert_fn = os.path.join(base,'STATIC','LC_PERT','LC_PERT_grass_T%03d.tif' % tile)
lc_forest_pert_fn = os.path.join(base,'STATIC','LC_PERT','LC_PERT_forest_T%03d.tif' % tile)
lc_shrub_pert_fn = os.path.join(base,'STATIC','LC_PERT','LC_PERT_shrub_T%03d.tif' % tile)
lc_bare_pert_fn = os.path.join(base,'STATIC','LC_PERT','LC_PERT_bare_T%03d.tif' % tile)
convertBin2tif(daylst_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
g = gdal.Open(daylst_fn[:-3]+"tif")
daylst = g.ReadAsArray()
daylst = np.reshape(daylst,[daylst.size])
convertBin2tif(nightlst_fn,inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32)
g = gdal.Open(nightlst_fn[:-3]+"tif")
nightlst = g.ReadAsArray()
nightlst = np.reshape(nightlst,[nightlst.size])
g = gdal.Open(precip_fn)
precip = g.ReadAsArray()
precip = np.reshape(precip,[precip.size])
g = gdal.Open(fmax_fn)
fmax = g.ReadAsArray()
fmax = np.reshape(fmax,[fmax.size])
g = gdal.Open(terrain_fn)
terrain = g.ReadAsArray()
terrain = np.reshape(terrain,[terrain.size])
g = gdal.Open(sd_fn)
sd = g.ReadAsArray()
sd = np.reshape(sd,[sd.size])
convertBin2tif(lai_fn[:-3]+'dat',inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32,True)
g = gdal.Open(lai_fn)
lai = g.ReadAsArray()
lai = np.reshape(lai,[lai.size])
convertBin2tif(dtime_fn[:-3]+'dat',inUL,ALEXIshape,ALEXIres,'float32',gdal.GDT_Float32,True)
g = gdal.Open(dtime_fn)
dtime = g.ReadAsArray()
dtime = np.reshape(dtime,[dtime.size])
daynight = daylst-nightlst
outDict = {"daynight":daynight,"day":daylst,"night":nightlst,
"sd":sd,"lai":lai,"dtime":dtime,"precip":precip,
"fmax":fmax,"terrain":terrain}
outDF = pd.DataFrame.from_dict(outDict)
#======read cubist models built by Chris Hain==============================
#----crop1------
crop1_model_fn = os.path.join(dtrad_path,"crop1.model")
crop1 = get_results_cubist_model(crop1_model_fn,outDF)
#----crop2------
crop2_model_fn = os.path.join(dtrad_path,"crop2.model")
crop2 = get_results_cubist_model(crop2_model_fn,outDF)
#----crop3------
crop3_model_fn = os.path.join(dtrad_path,"crop3.model")
crop3 = get_results_cubist_model(crop3_model_fn,outDF)
#----grass1------
grass1_model_fn = os.path.join(dtrad_path,"grass1.model")
grass1 = get_results_cubist_model(grass1_model_fn,outDF)
#----grass2------
grass2_model_fn = os.path.join(dtrad_path,"grass2.model")
grass2 = get_results_cubist_model(grass2_model_fn,outDF)
#----grass3------
grass3_model_fn = os.path.join(dtrad_path,"grass3.model")
grass3 = get_results_cubist_model(grass3_model_fn,outDF)
#----forest1------
forest1_model_fn = os.path.join(dtrad_path,"forest1.model")
forest1 = get_results_cubist_model(forest1_model_fn,outDF)
#----forest2------
forest2_model_fn = os.path.join(dtrad_path,"forest2.model")
forest2 = get_results_cubist_model(forest2_model_fn,outDF)
#----forest3------
forest3_model_fn = os.path.join(dtrad_path,"forest3.model")
forest3 = get_results_cubist_model(forest3_model_fn,outDF)
#----shrub1------
shrub1_model_fn = os.path.join(dtrad_path,"shrub1.model")
shrub1 = get_results_cubist_model(shrub1_model_fn,outDF)
#----shrub2------
shrub2_model_fn = os.path.join(dtrad_path,"shrub2.model")
shrub2 = get_results_cubist_model(shrub2_model_fn,outDF)
#----bare1------
bare1_model_fn = os.path.join(dtrad_path,"bare1.model")
bare1 = get_results_cubist_model(bare1_model_fn,outDF)
#====get dtrad============================================================
g = gdal.Open(lc_crop_pert_fn)
crop_pert = g.ReadAsArray()
crop_pert = np.reshape(crop_pert,[crop_pert.size])
g = gdal.Open(lc_grass_pert_fn)
grass_pert = g.ReadAsArray()
grass_pert = np.reshape(grass_pert,[grass_pert.size])
g = gdal.Open(lc_forest_pert_fn)
forest_pert = g.ReadAsArray()
forest_pert = np.reshape(forest_pert,[forest_pert.size])
g = gdal.Open(lc_shrub_pert_fn)
shrub_pert = g.ReadAsArray()
shrub_pert = np.reshape(shrub_pert,[shrub_pert.size])
g = gdal.Open(lc_bare_pert_fn)
bare_pert = g.ReadAsArray()
bare_pert = np.reshape(bare_pert,[bare_pert.size])
dtrad = np.tile(-9999.,[3750*3750])
ind1 = ((precip >= 0) & (precip < 600) & (crop1 != -9999.))
dtrad[ind1] = crop3[ind1]*crop_pert[ind1]
+forest1[ind1]*forest_pert[ind1]+shrub1[ind1]*shrub_pert[ind1]
+bare1[ind1]*bare_pert[ind1]+grass1[ind1]*grass_pert[ind1]
ind2 = ((precip>=600) & (precip <1200) & (crop2 != -9999.))
dtrad[ind2] = crop3[ind2]*crop_pert[ind2]
+forest2[ind2]*forest_pert[ind2]+shrub2[ind2]*shrub_pert[ind2]
+grass2[ind2]*grass_pert[ind2]
ind3 = ((precip >=1200) & (precip<6000) & (crop3 != -9999.))
dtrad[ind3] = crop3[ind3]*crop_pert[ind3]
+forest3[ind3]*forest_pert[ind3]+grass3[ind3]*grass_pert[ind3]
ind4 = ((dtrad <=2.0) | (dtrad > 40.))
dtrad[ind4] = -9999.
dtrad = np.reshape(dtrad,ALEXIshape)
#======calcaulate lst2=====================================================
c1=-9.6463
c1x1=-0.183506 # DAY-NIGHT
c1x2=1.04281 # DAY
c1x3=-0.0529513
lst2 = np.tile(-9999.,[3750*3750])
ind = ((daynight > 0.0) & (daylst > 0.0) & (nightlst > 0.0 ) & (lai >= 0.0))
lst2[ind] = c1+(c1x1*daynight[ind])+(c1x2*daylst[ind])+(c1x3*lai[ind])
lst2 = np.reshape(lst2,ALEXIshape)
#====save outputs==========================================================
dtrad_tile_path = os.path.join(tile_base_path,'DTRAD','%03d' % doy)
dtrad_fn = os.path.join(dtrad_tile_path ,'FINAL_DTRAD_%s_T%03d.tif' % (date,tile))
inProjection = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
writeArray2Tiff(dtrad,ALEXIres,inUL,inProjection,dtrad_fn,gdal.GDT_Float32)
lst2_tile_path = os.path.join(tile_base_path,'LST2','%03d' % doy)
lst2_fn = os.path.join(lst2_tile_path,'FINAL_DAY_LST_TIME2_%s_T%03d.tif' % (date,tile))
writeArray2Tiff(lst2,ALEXIres,inUL,inProjection,lst2_fn,gdal.GDT_Float32)
def buildRNETtrees(year,doy):
dtimedates = np.array(range(1,366,7))
r7day = dtimedates[dtimedates>=doy][0]
riseddd="%d%03d" %(year,r7day)
halfdeg_sizeArr = 900*1800
#========process insol====================================================
srcfn = os.path.join(static_path,'INSOL','deg05','insol55_2011%03d.tif' % doy)
g = gdal.Open(srcfn,GA_ReadOnly)
insol= g.ReadAsArray()
insol = np.reshape(insol,[halfdeg_sizeArr])
#======process RNET========================================================
# srcfn = os.path.join(static_path,'5KM','RNET','RNET%s.dat' % riseddd)
# srcfn = os.path.join(static_path,'5KM','RNET','RNET2015%03d.dat' % r7day)
srcfn = glob.glob(os.path.join(static_path,"5KM","RNET","RNET_AVG*%s.dat" % r7day))[0]
rnet = np.fromfile(srcfn, dtype=np.float32)
rnet = np.flipud(rnet.reshape([3000,7200]))
inProjection = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
tif_fn = srcfn[:-4]+'.tif'
if not os.path.exists(tif_fn):
writeArray2Tiff(rnet,[0.05,0.05],[-180.,90.],inProjection,tif_fn,gdal.GDT_Float32)
outfn = tif_fn[:-4]+'subset.tif'
out = subprocess.check_output('gdal_translate -of GTiff -projwin -30 45 60 0 -tr 0.05 0.05 %s %s' % (tif_fn,outfn), shell=True)
g = gdal.Open(outfn,GA_ReadOnly)
rnet= g.ReadAsArray()
rnet = np.reshape(rnet,[halfdeg_sizeArr])
#======process albedo======================================================
srcfn = os.path.join(static_path,'ALBEDO','ALBEDO.tif')
g = gdal.Open(srcfn,GA_ReadOnly)
albedo = g.ReadAsArray()
albedo = np.reshape(albedo,[halfdeg_sizeArr])
#=====process LST2=========================================================
srcPath = os.path.join(tile_base_path,'LST2','%03d' % doy)
searchPath = os.path.join(srcPath,'FINAL_DAY_LST_TIME2*.tif')
outfn = os.path.join(srcPath,'LST2.vrt')
outfn05 = outfn[:-4]+'05.tif'
subprocess.check_output('gdalbuildvrt %s %s' % (outfn, searchPath), shell=True)
out = subprocess.check_output('gdal_translate -of GTiff -tr 0.05 0.05 %s %s' % (outfn,outfn05), shell=True)
g = gdal.Open(outfn05,GA_ReadOnly)
lst2 = g.ReadAsArray()
lst2 = np.reshape(lst2,[halfdeg_sizeArr])
#====process LWDN==========================================================
time = get_rise55(year,doy,86)
grab_time = getGrabTime(int(time)*100)
hr,forecastHR,cfsr_doy = getGrabTimeInv(grab_time/100,doy)
cfsr_date = "%d%03d" % (year,cfsr_doy)
if (grab_time)==2400:
grab_time = 0000
srcfn = os.path.join(static_path,'CFSR','%d' % year,'%03d' % cfsr_doy,'sfc_lwdn_%s_%02d00.dat' % (cfsr_date,grab_time/100))
lwdn25 = np.fromfile(srcfn, dtype=np.float32)
lwdn25 = np.flipud(lwdn25.reshape([720, 1440]))
inProjection = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
tif_fn = srcfn[:-4]+'.tif'
if not os.path.exists(tif_fn):
writeArray2Tiff(lwdn25,[0.25,0.25],[-180.,90.],inProjection,tif_fn,gdal.GDT_Float32)
outfn = tif_fn[:-4]+'05.tif'
# outfn = os.path.join(outPath,tif_fn.split(os.sep)[-1])
out = subprocess.check_output('gdal_translate -of GTiff -projwin -30 45 60 0 -tr 0.05 0.05 %s %s' % (tif_fn,outfn), shell=True)
g = gdal.Open(outfn,GA_ReadOnly)
lwdn = g.ReadAsArray()
lwdn = np.reshape(lwdn,[halfdeg_sizeArr])
#==========create fstem.data for cubist====================================
outDict = {'rnet': rnet, 'albedo':albedo, 'insol':insol, 'lwdn': lwdn, 'lst2':lst2}
inDF = pd.DataFrame.from_dict(outDict)
outDF = inDF.loc[(inDF["rnet"] > 0.0) & (inDF["albedo"] > 0.0) &
(inDF["insol"] > 0.0) & (inDF["lwdn"] > 0.0) &
(inDF["lst2"] > 0.0), ["rnet","albedo","insol","lwdn","lst2"]]
calc_rnet_tile_ctl = os.path.join(calc_rnet_path,'tiles_ctl')
# if not os.path.exists(calc_rnet_tile_ctl):
# os.makedirs(calc_rnet_tile_ctl)
file_data = os.path.join(calc_rnet_tile_ctl,'rnet.data')
outDF.to_csv(file_data , header=True, index=False,columns=["rnet",
"albedo","insol","lwdn","lst2"])
file_names = os.path.join(calc_rnet_tile_ctl,'rnet.names')
get_tiles_fstem_names(file_names)
#====run cubist============================================================
# print("running cubist...")
cubist_name = os.path.join(calc_rnet_tile_ctl,'rnet')
rnet_cub_out = subprocess.check_output("cubist -f %s -u -a -r 20" % cubist_name, shell=True)
return rnet_cub_out
def getRNETfromTrees(tile,year,doy,rnet_cub_out):
calc_rnet_tile_ctl = os.path.join(calc_rnet_path,'tiles_ctl')
cubist_name = os.path.join(calc_rnet_tile_ctl,'rnet.model')
LLlat,LLlon = tile2latlon(tile)
URlat = LLlat+15.
URlon = LLlon+15.
inUL = [LLlon,URlat]
ALEXI_shape = [3750,3750]
ALEXI_res = [0.004,0.004]
date = '%d%03d' % (year,doy)
tile_path = os.path.join(tile_base_path,"T%03d" % tile)
#====open INSOL =============================================
srcfn = os.path.join(static_path,'INSOL','deg004','insol55_2011%03d.tif' % doy)
outfn = srcfn[:-4]+'_T%03d.tif' % tile
out = subprocess.check_output('gdalwarp -overwrite -of GTiff -te %f %f %f %f -tr 0.004 0.004 %s %s' % (LLlon,LLlat,URlon,URlat,srcfn,outfn), shell=True)
g = gdal.Open(outfn,GA_ReadOnly)
insol_viirs= g.ReadAsArray()
insol_viirs = np.reshape(insol_viirs,[3750*3750])
#======process albedo======================================================
albedo_fn = os.path.join(static_path,'ALBEDO','ALBEDO_T%03d.dat' % tile)
albedo = np.fromfile(albedo_fn, dtype=np.float32)
albedo = np.reshape(albedo,[3750*3750])
#=====process LST2=========================================================
# lst_fn = os.path.join(rnet_tile_path,'LST2_%03d_%s.dat' % (tile,date))
lst2_tile_path = os.path.join(tile_base_path,'LST2','%03d' % doy)
lst_fn = os.path.join(lst2_tile_path,
"FINAL_DAY_LST_TIME2_%s_T%03d.tif" % ( date, tile))
# lst = np.fromfile(lst_fn, dtype=np.float32)
g = gdal.Open(lst_fn,GA_ReadOnly)
lst= g.ReadAsArray()
lst2 = np.reshape(lst,[3750*3750])
#====process LWDN==========================================================
time = get_rise55(year,doy,tile)
grab_time = getGrabTime(int(time)*100)
hr,forecastHR,cfsr_doy = getGrabTimeInv(grab_time/100,doy)
cfsr_date = "%d%03d" % (year,cfsr_doy)
if (grab_time)==2400:
grab_time = 0000
srcfn = os.path.join(static_path,'CFSR','%d' % year,'%03d' % cfsr_doy,'sfc_lwdn_%s_%02d00.dat' % (cfsr_date,grab_time/100))
lwdn25 = np.fromfile(srcfn, dtype=np.float32)
lwdn25 = np.flipud(lwdn25.reshape([720, 1440]))
inProjection = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
tif_fn = srcfn[:-4]+'.tif'
if not os.path.exists(tif_fn):
writeArray2Tiff(lwdn25,[0.25,0.25],[-180.,90.],inProjection,tif_fn,gdal.GDT_Float32)
outfn = tif_fn[:-4]+'_T%03d.tif' % tile
out = subprocess.check_output('gdal_translate -of GTiff -projwin %f %f %f %f -tr 0.004 0.004 %s %s' % (LLlon,URlat,URlon,LLlat,tif_fn,outfn), shell=True)
g = gdal.Open(outfn,GA_ReadOnly)
lwdn = g.ReadAsArray()
lwdn_viirs = np.reshape(lwdn,[3750*3750])
#=======get the final_rnet=================================================
cubDict = {'albedo':albedo, 'insol':insol_viirs, 'lwdn': lwdn_viirs, 'lst2':lst2}
cubDF = pd.DataFrame.from_dict(cubDict)
rnet_out = readCubistOut(rnet_cub_out,cubDF)
# rnet_out = get_results_cubist_model(cubist_name,cubDF)
rnet_out = np.reshape(rnet_out, [3750,3750])
rnet_tile = os.path.join(tile_base_path,'T%03d' % tile)
# if not os.path.exists(rnet_tile):
# os.makedirs(rnet_tile)
finalrnet_fn = os.path.join(rnet_tile,'FINAL_RNET_%s_T%03d.dat' % (date,tile))
rnet_out = np.array(rnet_out,dtype='Float32')
rnet_out.tofile(finalrnet_fn)
convertBin2tif(finalrnet_fn,inUL,ALEXI_shape,ALEXI_res,'float32',gdal.GDT_Float32)
#======TESTING=============================================================
testing_path = os.path.join(tile_base_path,'RNET','%03d' % doy)
# if not os.path.exists(testing_path):
# os.makedirs(testing_path)
testing_fn = os.path.join(testing_path,'FINAL_RNET_%s_T%03d.dat' % (date,tile))
shutil.copyfile(finalrnet_fn,testing_fn)
convertBin2tif(testing_fn,inUL,ALEXI_shape,ALEXI_res,np.float32,gdal.GDT_Float32)
def useTrees(tile,year,doy,trees):
LLlat,LLlon = tile2latlon(tile)
URlat = LLlat+15.
inUL = [LLlon,URlat]
ALEXI_shape = [3750,3750]
ALEXI_res = [0.004,0.004]
dtimedates = np.array(range(1,366,7))
r7day = dtimedates[dtimedates>=doy][0]
date = '%d%03d' % (year,doy)
p1 = [0,0,250,500,1000]
p2 = [250,250,500,1000,9999]
f1 = [0,0.2,0.0,0.0,0.0]
f2 = [0.2,1.0,1.0,1.0,1.0]
#=======ALEXI resolution inputs===============================================
laidates = np.array(range(1,366,4))
r4day = laidates[laidates>=doy][0]
laiddd="%d%03d" %(year,r4day)
dtrad_tile_path = os.path.join(tile_base_path,'DTRAD','%03d' % doy)
dthr_fn = os.path.join(dtrad_tile_path ,'FINAL_DTRAD_%s_T%03d.tif' % (date,tile))
trad2_tile_path = os.path.join(tile_base_path,'LST2','%03d' % doy)
# dthr_fn = os.path.join(tile_base_path,'T%03d' % tile, 'FINAL_DTRAD_%s_T%03d.tif' % (date,tile))
trad2_fn = os.path.join(trad2_tile_path , 'FINAL_DAY_LST_TIME2_%s_T%03d.tif' % (date,tile))
rnet_fn = os.path.join(tile_base_path,'T%03d' % tile, 'FINAL_RNET_%s_T%03d.dat' % (date,tile))
# lai_fn = os.path.join(static_path,'LAI','MLAI_%s_T%03d.dat' % (laiddd,tile)) # only have 2015 so far
# lai_fn = os.path.join(static_path,'LAI','MLAI_2015%03d_T%03d.dat' % (r4day,tile)) # TEMPORARY FOR RT PROCESSING
lai_fn = os.path.join(static_path,'LAI','MLAI_2015%03d_T%03d.tif' % (r4day,tile))
dthr_corr_fn = os.path.join(static_path,'DTHR_CORR','DTHR_CORR_2010%03d_T%03d.dat' % (r7day,tile))
dtime_fn = os.path.join(static_path,'DTIME','DTIME_2014%03d_T%03d.dat' % (r7day,tile))
fmax_fn = os.path.join(static_path,'FMAX','FMAX_T%03d.dat' % (tile))
precip_fn = os.path.join(static_path,'PRECIP','PRECIP_T%03d.dat' % (tile))
dthr = np.fromfile(dthr_fn, dtype=np.float32)
g = gdal.Open(dthr_fn,GA_ReadOnly)
dthr= g.ReadAsArray()
dthr = np.reshape(dthr,[3750*3750])
# dthr = dthr.reshape([3750,3750])
# plt.imshow(dthr)
# dthr = np.reshape(dthr,[3750*3750])
# trad2 = np.fromfile(trad2_fn, dtype=np.float32)
g = gdal.Open(trad2_fn,GA_ReadOnly)
trad2= g.ReadAsArray()
trad2 = np.reshape(trad2,[3750*3750])
# trad2 = trad2.reshape([3750,3750])
# plt.imshow(trad2)
# trad2 = np.reshape(trad2,[3750*3750])
rnet = np.fromfile(rnet_fn, dtype=np.float32)
# rnet = rnet.reshape([3750,3750])
# plt.imshow(rnet)
# rnet = np.reshape(rnet,[3750*3750])
lai = np.fromfile(lai_fn, dtype=np.float32)
# lai = lai.reshape([3750,3750])
# plt.imshow(lai, vmin=0, vmax=2)
# lai = np.reshape(lai,[3750*3750])
dthr_corr = np.fromfile(dthr_corr_fn, dtype=np.float32)
dthr_corr = np.flipud(dthr_corr.reshape([3750,3750]))
# plt.imshow(dthr_corr, vmin=0,vmax=3)
dthr_corr = np.reshape(dthr_corr,[3750*3750])
dtime = np.fromfile(dtime_fn, dtype=np.float32)
# dtime = dtime.reshape([3750,3750])
# plt.imshow(dtime)
# dtime = np.reshape(dtime,[3750*3750])
fmax = np.fromfile(fmax_fn, dtype=np.float32)
# fmax = fmax.reshape([3750,3750])
# plt.imshow(fmax, vmin=0, vmax=0.3)
# fmax = np.reshape(fmax,[3750*3750])
precip = np.fromfile(precip_fn, dtype=np.float32)
# precip = precip.reshape([3750,3750])
# plt.imshow(precip)
# precip = np.reshape(precip,[3750*3750])
dthr = (dthr/dtime)*dthr_corr
rnet_dthr = rnet/dthr
predDict = {'dthr':dthr,'rnet_dthr':rnet_dthr,'rnet': rnet,'trad2':trad2,
'fmax':fmax, 'precip':precip, 'lai':lai}
predDF = pd.DataFrame.from_dict(predDict)
outDF = []
for i in range(len(trees)):
mask = ((predDF["rnet"] < 0.0) |
(predDF["lai"] < 0.0) | (predDF["trad2"] < 0.0) |
(predDF["precip"] < p1[i]) | (predDF["precip"] >= p2[i]) |
(predDF["fmax"] < f1[i]) | (predDF["fmax"] >= f2[i]))
out = readCubistOut(trees[i],predDF)
out[mask]=np.nan
outDF.append(out)
#=====use the trees to estimate fsun===========================================
aa = np.array(outDF)
a_nans = np.sum(np.isnan(aa),axis=0)
a_nans = a_nans.reshape([3750,3750])
a_nans = np.array(a_nans,dtype='Float32')
out_nancount_fn = os.path.join(tile_base_path,'T%03d' % tile, 'FINAL_NAN_COUNT_%s_T%03d.dat' % (date,tile))
a = np.nansum(aa,axis=0)
final_pred = a.reshape([3750,3750])
final_pred = np.array(final_pred,dtype='Float32')
out_fsun_fn = os.path.join(tile_base_path,'T%03d' % tile, 'FINAL_FSUN_%s_T%03d.dat' % (date,tile))
# =============================================================================
final_pred.tofile(out_fsun_fn)
convertBin2tif(out_fsun_fn,inUL,ALEXI_shape,ALEXI_res,'float32',gdal.GDT_Float32)
# a_nans.tofile(out_nancount_fn)
# convertBin2tif(out_nancount_fn,inUL,ALEXI_shape,ALEXI_res,'float32',gdal.GDT_Float32)
# =============================================================================
testing_path = os.path.join(tile_base_path,'FSUN','%03d' % doy)
# if not os.path.exists(testing_path):
# os.makedirs(testing_path)
testing_fn = os.path.join(testing_path,'FINAL_FSUN_%s_T%03d.dat' % (date,tile))
shutil.copyfile(out_fsun_fn,testing_fn)
convertBin2tif(testing_fn,inUL,ALEXI_shape,ALEXI_res,np.float32,gdal.GDT_Float32)
#=======this module is part of update version 0.2 on Dec. 6, 2017==============
def useTreesV2(tile,year,doy):
LLlat,LLlon = tile2latlon(tile)
URlat = LLlat+15.
inUL = [LLlon,URlat]
ALEXI_shape = [3750,3750]
ALEXI_res = [0.004,0.004]
dtimedates = np.array(range(1,366,7))
r7day = dtimedates[dtimedates>=doy][0]
date = '%d%03d' % (year,doy)
#=======ALEXI resolution inputs===============================================
laidates = np.array(range(1,366,4))
r4day = laidates[laidates>=doy][0]
laiddd="%d%03d" %(year,r4day)
# dthr_fn = os.path.join(tile_base_path,'T%03d' % tile, 'FINAL_DTRAD_%s_T%03d.dat' % (date,tile))
# trad2_fn = os.path.join(tile_base_path,'T%03d' % tile, 'FINAL_DAY_LST_TIME2_%s_T%03d.dat' % (date,tile))
dtrad_tile_path = os.path.join(tile_base_path,'DTRAD','%03d' % doy)
dthr_fn = os.path.join(dtrad_tile_path ,'FINAL_DTRAD_%s_T%03d.tif' % (date,tile))
lst2_tile_path = os.path.join(tile_base_path,'LST2','%03d' % doy)
trad2_fn = os.path.join(lst2_tile_path,'FINAL_DAY_LST_TIME2_%s_T%03d.tif' % (date,tile))
# lai_fn = os.path.join(static_path,'LAI','MLAI_%s_T%03d.dat' % (laiddd,tile)) # only have 2015 so far
lai_fn = os.path.join(static_path,'LAI','MLAI_2015%03d_T%03d.tif' % (r4day,tile)) # TEMPORARY FOR RT PROCESSING
dthr_corr_fn = os.path.join(static_path,'DTHR_CORR','DTHR_CORR_2010%03d_T%03d.dat' % (r7day,tile))
dtime_fn = os.path.join(static_path,'DTIME','DTIME_2014%03d_T%03d.tif' % (r7day,tile))
lc_crop_pert_fn = os.path.join(base,'STATIC','LC_PERT','LC_PERT_crop_T%03d.tif' % tile)
lc_grass_pert_fn = os.path.join(base,'STATIC','LC_PERT','LC_PERT_grass_T%03d.tif' % tile)
lc_forest_pert_fn = os.path.join(base,'STATIC','LC_PERT','LC_PERT_forest_T%03d.tif' % tile)
lc_shrub_pert_fn = os.path.join(base,'STATIC','LC_PERT','LC_PERT_shrub_T%03d.tif' % tile)
lc_bare_pert_fn = os.path.join(base,'STATIC','LC_PERT','LC_PERT_bare_T%03d.tif' % tile)
# dthr = np.fromfile(dthr_fn, dtype=np.float32)
# trad2 = np.fromfile(trad2_fn, dtype=np.float32)
g = gdal.Open(dthr_fn)
dthr = g.ReadAsArray()
dthr = np.reshape(dthr,[3750*3750])
g = gdal.Open(trad2_fn)
trad2 = g.ReadAsArray()
trad2 = np.reshape(trad2,[3750*3750])
# lai = np.fromfile(lai_fn, dtype=np.float32)
g = gdal.Open(lai_fn)
lai = g.ReadAsArray()
lai = np.reshape(lai,[lai.size])
dthr_corr = np.fromfile(dthr_corr_fn, dtype=np.float32)
dthr_corr = np.flipud(dthr_corr.reshape([3750,3750]))
dthr_corr = np.reshape(dthr_corr,[3750*3750])
# dtime = np.fromfile(dtime_fn, dtype=np.float32)
g = gdal.Open(dtime_fn)
dtime = g.ReadAsArray()
dtime = np.reshape(dtime,[dtime.size])
dthr = (dthr/dtime)*dthr_corr
predDict = {'dthr_corr':dthr_corr,'trad2':trad2,'lai':lai}
predDF = pd.DataFrame.from_dict(predDict)
fsun_trees_tile_ctl = os.path.join(fsun_trees_path,'tiles_ctl')
tree_fn = os.path.join(fsun_trees_tile_ctl,'fsun_%s_%03d.model'% ('crop',doy))
crop_fsun = get_results_cubist_model(tree_fn,predDF)
crop_fsun[crop_fsun<0.0] = 0.0
crop_fsun = crop_fsun.reshape([3750,3750])
tree_fn = os.path.join(fsun_trees_tile_ctl,'fsun_%s_%03d.model'% ('grass',doy))
grass_fsun = get_results_cubist_model(tree_fn,predDF)
grass_fsun[grass_fsun<0.0] = 0.0
grass_fsun = grass_fsun.reshape([3750,3750])
tree_fn = os.path.join(fsun_trees_tile_ctl,'fsun_%s_%03d.model'% ('shrub',doy))
shrub_fsun = get_results_cubist_model(tree_fn,predDF)
shrub_fsun[shrub_fsun<0.0] = 0.0
shrub_fsun = shrub_fsun.reshape([3750,3750])
tree_fn = os.path.join(fsun_trees_tile_ctl,'fsun_%s_%03d.model'% ('forest',doy))
forest_fsun = get_results_cubist_model(tree_fn,predDF)
forest_fsun[forest_fsun<0.0] = 0.0
forest_fsun = forest_fsun.reshape([3750,3750])
tree_fn = os.path.join(fsun_trees_tile_ctl,'fsun_%s_%03d.model'% ('bare',doy))
bare_fsun = get_results_cubist_model(tree_fn,predDF)
bare_fsun[bare_fsun<0.0] = 0.0
bare_fsun = bare_fsun.reshape([3750,3750])
# crop_fsun = readCubistOut(trees[0],predDF)
# crop_fsun[crop_fsun<0.0] = 0.0
# crop_fsun = crop_fsun.reshape([3750,3750])
# grass_fsun = readCubistOut(trees[1],predDF)
# grass_fsun[grass_fsun<0.0] = 0.0
# grass_fsun = grass_fsun.reshape([3750,3750])
# shrub_fsun = readCubistOut(trees[2],predDF)
# shrub_fsun[shrub_fsun<0.0] = 0.0
# shrub_fsun = shrub_fsun.reshape([3750,3750])
# forest_fsun = readCubistOut(trees[3],predDF)
# forest_fsun[forest_fsun<0.0] = 0.0
# forest_fsun = forest_fsun.reshape([3750,3750])
# bare_fsun = readCubistOut(trees[4],predDF)
# bare_fsun[bare_fsun<0.0] = 0.0
# bare_fsun = bare_fsun.reshape([3750,3750])
#======open LC percentage maps============================================
g = gdal.Open(lc_crop_pert_fn)
crop_pert = g.ReadAsArray()
g = gdal.Open(lc_grass_pert_fn)
grass_pert = g.ReadAsArray()
g = gdal.Open(lc_forest_pert_fn)
forest_pert = g.ReadAsArray()
g = gdal.Open(lc_shrub_pert_fn)
shrub_pert = g.ReadAsArray()
g = gdal.Open(lc_bare_pert_fn)
bare_pert = g.ReadAsArray()
#=====use the trees to estimate fsun=======================================
fsun = crop_fsun*crop_pert+grass_fsun*grass_pert+forest_fsun*forest_pert
+shrub_fsun*shrub_pert+bare_fsun*bare_pert
fsun = np.array(fsun,dtype='Float32')
#====save outputs==========================================================
out_fsun_fn = os.path.join(tile_base_path,'T%03d' % tile, 'FINAL_FSUN_%s_T%03d.tif' % (date,tile))
inProjection = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
writeArray2Tiff(fsun,ALEXI_res,inUL,inProjection,out_fsun_fn,gdal.GDT_Float32)
testing_path = os.path.join(tile_base_path,'FSUN','%03d' % doy)
testing_fn = os.path.join(testing_path,'FINAL_FSUN_%s_T%03d.tif' % (date,tile))
shutil.copyfile(out_fsun_fn,testing_fn)
def getDailyET(tile,year,doy):
inProjection = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
LLlat,LLlon = tile2latlon(tile)
URlat = LLlat+15.
inUL = [LLlon,URlat]
ALEXI_shape = [3750,3750]
ALEXI_res = [0.004,0.004]
date = '%d%03d' % (year,doy)
insol24_fn = os.path.join(static_path,'INSOL24', 'RS24_%s_T%03d.tif' % (date,tile))
g = gdal.Open(insol24_fn,GA_ReadOnly)
Rs24= g.ReadAsArray()
# Rs24=(Rs24*0.0864)/24.0
Rs24=(Rs24/8.)*0.0864 # there are 8 measurements of 3 hour averages from CFSR NOT 24!
fsun_fn = os.path.join(tile_base_path,'T%03d' % tile, 'FINAL_FSUN_%s_T%03d.tif' % (date,tile))
g = gdal.Open(fsun_fn)
Fsun = g.ReadAsArray()
# Fsun = np.fromfile(fsun_fn, dtype=np.float32)
# Fsun = Fsun.reshape([3750,3750])
EFeq=Fsun*(Rs24)
ET_24 = EFeq*0.408
ET_24[Fsun<0.0]=-9999.
ET_24[(ET_24>0.0) & (ET_24<0.01)] = 0.01
ET_24 = np.array(ET_24,dtype='Float32')
et_path = os.path.join(tile_base_path,'ET','%d' % year, '%03d' % doy)
# if not os.path.exists(et_path):
# os.makedirs(et_path)
et_fn = os.path.join(et_path,'FINAL_EDAY_%s_T%03d.tif' % (date,tile))
writeArray2Tiff(ET_24,ALEXI_res,inUL,inProjection,et_fn,gdal.GDT_Float32)
def createFolders(tile,year,doy):
fsun_trees_tile_ctl = os.path.join(fsun_trees_path,'tiles_ctl')
if not os.path.exists(fsun_trees_tile_ctl):
os.makedirs(fsun_trees_tile_ctl)
tile_path = os.path.join(tile_base_path,"T%03d" % tile)
if not os.path.exists(tile_path):
os.makedirs(tile_path)
testing_path = os.path.join(tile_base_path,'DTRAD','%03d' % doy)
if not os.path.exists(testing_path):
os.makedirs(testing_path)
testing_path = os.path.join(tile_base_path,'LST2','%03d' % doy)
if not os.path.exists(testing_path):
os.makedirs(testing_path)
calc_rnet_tile_ctl = os.path.join(calc_rnet_path,'tiles_ctl')
if not os.path.exists(calc_rnet_tile_ctl):
os.makedirs(calc_rnet_tile_ctl)
rnet_tile = os.path.join(tile_base_path,'T%03d' % tile)
if not os.path.exists(rnet_tile):
os.makedirs(rnet_tile)
testing_path = os.path.join(tile_base_path,'RNET','%03d' % doy)
if not os.path.exists(testing_path):
os.makedirs(testing_path)
viirs_tile_path = os.path.join(calc_rnet_path,'viirs','T%03d' % tile)
if not os.path.exists(viirs_tile_path):
os.makedirs(viirs_tile_path)
testing_path = os.path.join(tile_base_path,'FSUN','%03d' % doy)
if not os.path.exists(testing_path):
os.makedirs(testing_path)
et_path = os.path.join(tile_base_path,'ET','%d' % year, '%03d' % doy)
if not os.path.exists(et_path):
os.makedirs(et_path)
def cleanup(year,doy,tiles):
for tile in tiles:
shutil.rmtree(os.path.join(CFSR_path,year,doy,"T%03d" % tile))
os.remove(os.path.join(static_path,'INSOL','deg004','insol55_2011%03d_T%03d.tif' % (doy,tile)))
def runSteps(tile=None,year=None,doy=None):
if year==None:
dd = datetime.date.today()+datetime.timedelta(days=-1)
year = dd.year
if doy==None:# NOTE: this is for yesterday
doy = (datetime.date.today()-datetime.date(year,1,1)).days
# ============process one tile at a time ==================================
if ((tile!=None) and (len(tile)==1)):
tile = tile[0]
createFolders(tile,year,doy)
# runProcess(tiles,year,doy)
# print("building VIIRS coordinates LUT--------------->")
# getIJcoordsPython(tile)
print("gridding VIIRS data-------------------------->")
# res = gridMergePython(tile,year,doy)
tileProcessed = gridMergePythonEWA(tile,year,doy)
if tileProcessed ==2:
# if res > 0:
# print("no viirs data")
# else:
print("running I5 atmosperic correction------------->")
# startatmos = timer.time()
# atmosCorrection(tile,year,doy)
atmosCorrectPython(tile,year,doy)
# end = timer.time()
# print("atmoscorr time: %f" % (end - startatmos))
print("estimating dtrad and LST2-------------------->")
pred_dtrad(tile,year,doy)
print("build RNET trees----------------------------->") # Using MENA region for building trees
tree = buildRNETtrees(year,doy)
print("estimating RNET ----------------------------->")
getRNETfromTrees(tile,year,doy,tree)
print("estimating FSUN------------------------------>")
useTreesV2(tile,year,doy)
# useTreesV2(tile,year,doy,trees)
print("making ET------------------------------------>")
getDailyET(tile,year,doy)
# cleanup(year,doy,tiles)
print("============FINISHED!=========================")
else:
print "not all LST files are there"
else:
# ===========for processing all tiles in parallel======================
if tile == None:
tiles = [60,61,62,63,64,83,84,85,86,87,88,107,108,109,110,111,112]
else:
tiles = tile
for tile in tiles:
createFolders(tile,year,doy)
# runProcess(tiles,year,doy)
print("gridding VIIRS data-------------------------->")
# r = Parallel(n_jobs=-1, verbose=5)(delayed(gridMergePython)(tile,year,doy) for tile in tiles)
r = Parallel(n_jobs=-1, verbose=5)(delayed(gridMergePythonEWA)(tile,year,doy) for tile in tiles)
r = np.array(r)
tiles = np.array(tiles)
tiles = tiles[r]
print("running I5 atmosperic correction------------->")
# r = Parallel(n_jobs=-1, verbose=5)(delayed(atmosCorrection)(tile,year,doy) for tile in tiles)
r = Parallel(n_jobs=-1, verbose=5)(delayed(atmosCorrectPython)(tile,year,doy) for tile in tiles)
print("estimating dtrad and LST2-------------------->")
r = Parallel(n_jobs=-1, verbose=5)(delayed(pred_dtradV2)(tile,year,doy) for tile in tiles)
print("build RNET trees----------------------------->") # Using MENA region for building trees
tree = buildRNETtrees(year,doy)
print("estimating RNET ----------------------------->")
# r = Parallel(n_jobs=-1, verbose=5)(delayed(processTiles)(tile,year,doy) for tile in tiles) # using Tiles to build RNET trees
r = Parallel(n_jobs=-1, verbose=5)(delayed(getRNETfromTrees)(tile,year,doy,tree) for tile in tiles)
# getRNETfromTrees(tile,year,doy,tree)
print("estimating FSUN------------------------------>")
r = Parallel(n_jobs=-1, verbose=5)(delayed(useTreesV2)(tile,year,doy) for tile in tiles)
print("making ET------------------------------------>")
r = Parallel(n_jobs=-1, verbose=5)(delayed(getDailyET)(tile,year,doy) for tile in tiles)
# cleanup(year,doy,tiles)
print("============FINISHED!=========================")
def main():
# Get time and location from user
parser = argparse.ArgumentParser()
parser.add_argument("year", nargs='?', type=int, default=None, help="year of data")
parser.add_argument("start_doy", nargs='?',type=int, default=None, help="start day of processing. *Note: leave blank for Real-time")
parser.add_argument("end_doy", nargs='?',type=int, default=None, help="end day of processing. *Note: leave blank for Real-time")
parser.add_argument("--tiles", nargs='*',type=int, default=None, help="tile from 15x15 deg tile grid system")
parser.add_argument("--buildTrees", nargs='*',type=str, default='False', help="build Fsun trees")
args = parser.parse_args()
year= args.year
start_doy = args.start_doy
end_doy= args.end_doy
tiles = args.tiles
buildTrees = args.buildTrees
print buildTrees
if start_doy == None:
start = timer.time()
dd = datetime.date.today()+datetime.timedelta(days=-1)
year = dd.year
doy = (datetime.date.today()-datetime.date(year,1,1)).days
# trees = processTrees() # until we have data for other years only use 2015
if buildTrees[0] == 'True':
print("building regression trees from 5KM data---------->")
processTreesV2(doy)
# runSteps(1,trees,None,year,doy)
runSteps(tiles)
end = timer.time()
print("program duration: %f minutes" % ((end - start)/60.))
else:
days = range(start_doy,end_doy)
start = timer.time()
for doy in days:
print("processing day:%d of year:%d" % (doy,year))
print("building regression trees from 5KM data---------->")
# trees = processTrees(year,doy) # until we have data for other years only use 2015
if buildTrees[0] =='True':
print("building regression trees from 5KM data---------->")
processTreesV2(doy)
runSteps(tiles,year,doy)
end = timer.time()
print("program duration: %f minutes" % ((end - start)/60.))
main()
#year = 2015
#doy = 221
#days = range(225,228) | bsd-3-clause |
ShawnMurd/MetPy | examples/gridding/Natural_Neighbor_Verification.py | 3 | 10224 | # Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Natural Neighbor Verification
=============================
Walks through the steps of Natural Neighbor interpolation to validate that the algorithmic
approach taken in MetPy is correct.
"""
###########################################
# Find natural neighbors visual test
#
# A triangle is a natural neighbor for a point if the
# `circumscribed circle <https://en.wikipedia.org/wiki/Circumscribed_circle>`_ of the
# triangle contains that point. It is important that we correctly grab the correct triangles
# for each point before proceeding with the interpolation.
#
# Algorithmically:
#
# 1. We place all of the grid points in a KDTree. These provide worst-case O(n) time
# complexity for spatial searches.
#
# 2. We generate a `Delaunay Triangulation <https://docs.scipy.org/doc/scipy/
# reference/tutorial/spatial.html#delaunay-triangulations>`_
# using the locations of the provided observations.
#
# 3. For each triangle, we calculate its circumcenter and circumradius. Using
# KDTree, we then assign each grid a triangle that has a circumcenter within a
# circumradius of the grid's location.
#
# 4. The resulting dictionary uses the grid index as a key and a set of natural
# neighbor triangles in the form of triangle codes from the Delaunay triangulation.
# This dictionary is then iterated through to calculate interpolation values.
#
# 5. We then traverse the ordered natural neighbor edge vertices for a particular
# grid cell in groups of 3 (n - 1, n, n + 1), and perform calculations to generate
# proportional polygon areas.
#
# Circumcenter of (n - 1), n, grid_location
# Circumcenter of (n + 1), n, grid_location
#
# Determine what existing circumcenters (ie, Delaunay circumcenters) are associated
# with vertex n, and add those as polygon vertices. Calculate the area of this polygon.
#
# 6. Increment the current edges to be checked, i.e.:
# n - 1 = n, n = n + 1, n + 1 = n + 2
#
# 7. Repeat steps 5 & 6 until all of the edge combinations of 3 have been visited.
#
# 8. Repeat steps 4 through 7 for each grid cell.
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import ConvexHull, Delaunay, delaunay_plot_2d, Voronoi, voronoi_plot_2d
from scipy.spatial.distance import euclidean
from metpy.interpolate import geometry
from metpy.interpolate.points import natural_neighbor_point
###########################################
# For a test case, we generate 10 random points and observations, where the
# observation values are just the x coordinate value times the y coordinate
# value divided by 1000.
#
# We then create two test points (grid 0 & grid 1) at which we want to
# estimate a value using natural neighbor interpolation.
#
# The locations of these observations are then used to generate a Delaunay triangulation.
np.random.seed(100)
pts = np.random.randint(0, 100, (10, 2))
xp = pts[:, 0]
yp = pts[:, 1]
zp = (pts[:, 0] * pts[:, 0]) / 1000
tri = Delaunay(pts)
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
ax.ishold = lambda: True # Work-around for Matplotlib 3.0.0 incompatibility
delaunay_plot_2d(tri, ax=ax)
for i, zval in enumerate(zp):
ax.annotate('{} F'.format(zval), xy=(pts[i, 0] + 2, pts[i, 1]))
sim_gridx = [30., 60.]
sim_gridy = [30., 60.]
ax.plot(sim_gridx, sim_gridy, '+', markersize=10)
ax.set_aspect('equal', 'datalim')
ax.set_title('Triangulation of observations and test grid cell '
'natural neighbor interpolation values')
members, circumcenters = geometry.find_natural_neighbors(tri, list(zip(sim_gridx, sim_gridy)))
val = natural_neighbor_point(xp, yp, zp, (sim_gridx[0], sim_gridy[0]), tri, members[0],
circumcenters)
ax.annotate('grid 0: {:.3f}'.format(val), xy=(sim_gridx[0] + 2, sim_gridy[0]))
val = natural_neighbor_point(xp, yp, zp, (sim_gridx[1], sim_gridy[1]), tri, members[1],
circumcenters)
ax.annotate('grid 1: {:.3f}'.format(val), xy=(sim_gridx[1] + 2, sim_gridy[1]))
###########################################
# Using the circumcenter and circumcircle radius information from
# :func:`metpy.interpolate.geometry.find_natural_neighbors`, we can visually
# examine the results to see if they are correct.
def draw_circle(ax, x, y, r, m, label):
th = np.linspace(0, 2 * np.pi, 100)
nx = x + r * np.cos(th)
ny = y + r * np.sin(th)
ax.plot(nx, ny, m, label=label)
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
ax.ishold = lambda: True # Work-around for Matplotlib 3.0.0 incompatibility
delaunay_plot_2d(tri, ax=ax)
ax.plot(sim_gridx, sim_gridy, 'ks', markersize=10)
for i, (x_t, y_t) in enumerate(circumcenters):
r = geometry.circumcircle_radius(*tri.points[tri.simplices[i]])
if i in members[1] and i in members[0]:
draw_circle(ax, x_t, y_t, r, 'm-', str(i) + ': grid 1 & 2')
ax.annotate(str(i), xy=(x_t, y_t), fontsize=15)
elif i in members[0]:
draw_circle(ax, x_t, y_t, r, 'r-', str(i) + ': grid 0')
ax.annotate(str(i), xy=(x_t, y_t), fontsize=15)
elif i in members[1]:
draw_circle(ax, x_t, y_t, r, 'b-', str(i) + ': grid 1')
ax.annotate(str(i), xy=(x_t, y_t), fontsize=15)
else:
draw_circle(ax, x_t, y_t, r, 'k:', str(i) + ': no match')
ax.annotate(str(i), xy=(x_t, y_t), fontsize=9)
ax.set_aspect('equal', 'datalim')
ax.legend()
###########################################
# What?....the circle from triangle 8 looks pretty darn close. Why isn't
# grid 0 included in that circle?
x_t, y_t = circumcenters[8]
r = geometry.circumcircle_radius(*tri.points[tri.simplices[8]])
print('Distance between grid0 and Triangle 8 circumcenter:',
euclidean([x_t, y_t], [sim_gridx[0], sim_gridy[0]]))
print('Triangle 8 circumradius:', r)
###########################################
# Lets do a manual check of the above interpolation value for grid 0 (southernmost grid)
# Grab the circumcenters and radii for natural neighbors
cc = np.array(circumcenters)
r = np.array([geometry.circumcircle_radius(*tri.points[tri.simplices[m]]) for m in members[0]])
print('circumcenters:\n', cc)
print('radii\n', r)
###########################################
# Draw the natural neighbor triangles and their circumcenters. Also plot a `Voronoi diagram
# <https://docs.scipy.org/doc/scipy/reference/tutorial/spatial.html#voronoi-diagrams>`_
# which serves as a complementary (but not necessary)
# spatial data structure that we use here simply to show areal ratios.
# Notice that the two natural neighbor triangle circumcenters are also vertices
# in the Voronoi plot (green dots), and the observations are in the polygons (blue dots).
vor = Voronoi(list(zip(xp, yp)))
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
ax.ishold = lambda: True # Work-around for Matplotlib 3.0.0 incompatibility
voronoi_plot_2d(vor, ax=ax)
nn_ind = np.array([0, 5, 7, 8])
z_0 = zp[nn_ind]
x_0 = xp[nn_ind]
y_0 = yp[nn_ind]
for x, y, z in zip(x_0, y_0, z_0):
ax.annotate('{}, {}: {:.3f} F'.format(x, y, z), xy=(x, y))
ax.plot(sim_gridx[0], sim_gridy[0], 'k+', markersize=10)
ax.annotate('{}, {}'.format(sim_gridx[0], sim_gridy[0]), xy=(sim_gridx[0] + 2, sim_gridy[0]))
ax.plot(cc[:, 0], cc[:, 1], 'ks', markersize=15, fillstyle='none',
label='natural neighbor\ncircumcenters')
for center in cc:
ax.annotate('{:.3f}, {:.3f}'.format(center[0], center[1]),
xy=(center[0] + 1, center[1] + 1))
tris = tri.points[tri.simplices[members[0]]]
for triangle in tris:
x = [triangle[0, 0], triangle[1, 0], triangle[2, 0], triangle[0, 0]]
y = [triangle[0, 1], triangle[1, 1], triangle[2, 1], triangle[0, 1]]
ax.plot(x, y, ':', linewidth=2)
ax.legend()
ax.set_aspect('equal', 'datalim')
def draw_polygon_with_info(ax, polygon, off_x=0, off_y=0):
"""Draw one of the natural neighbor polygons with some information."""
pts = np.array(polygon)[ConvexHull(polygon).vertices]
for i, pt in enumerate(pts):
ax.plot([pt[0], pts[(i + 1) % len(pts)][0]],
[pt[1], pts[(i + 1) % len(pts)][1]], 'k-')
avex, avey = np.mean(pts, axis=0)
ax.annotate('area: {:.3f}'.format(geometry.area(pts)), xy=(avex + off_x, avey + off_y),
fontsize=12)
cc1 = geometry.circumcenter((53, 66), (15, 60), (30, 30))
cc2 = geometry.circumcenter((34, 24), (53, 66), (30, 30))
draw_polygon_with_info(ax, [cc[0], cc1, cc2])
cc1 = geometry.circumcenter((53, 66), (15, 60), (30, 30))
cc2 = geometry.circumcenter((15, 60), (8, 24), (30, 30))
draw_polygon_with_info(ax, [cc[0], cc[1], cc1, cc2], off_x=-9, off_y=3)
cc1 = geometry.circumcenter((8, 24), (34, 24), (30, 30))
cc2 = geometry.circumcenter((15, 60), (8, 24), (30, 30))
draw_polygon_with_info(ax, [cc[1], cc1, cc2], off_x=-15)
cc1 = geometry.circumcenter((8, 24), (34, 24), (30, 30))
cc2 = geometry.circumcenter((34, 24), (53, 66), (30, 30))
draw_polygon_with_info(ax, [cc[0], cc[1], cc1, cc2])
###########################################
# Put all of the generated polygon areas and their affiliated values in arrays.
# Calculate the total area of all of the generated polygons.
areas = np.array([60.434, 448.296, 25.916, 70.647])
values = np.array([0.064, 1.156, 2.809, 0.225])
total_area = np.sum(areas)
print(total_area)
###########################################
# For each polygon area, calculate its percent of total area.
proportions = areas / total_area
print(proportions)
###########################################
# Multiply the percent of total area by the respective values.
contributions = proportions * values
print(contributions)
###########################################
# The sum of this array is the interpolation value!
interpolation_value = np.sum(contributions)
function_output = natural_neighbor_point(xp, yp, zp, (sim_gridx[0], sim_gridy[0]), tri,
members[0], circumcenters)
print(interpolation_value, function_output)
###########################################
# The values are slightly different due to truncating the area values in
# the above visual example to the 3rd decimal place.
plt.show()
| bsd-3-clause |
CompPhysics/ThesisProjects | doc/MSc/msc_students/former/AudunHansen/Audun/Pythonscripts/CCD_block_implementation_mk2.py | 1 | 37097 | from numpy import *
from time import *
from matplotlib.pyplot import *
from scipy.sparse import csr_matrix, coo_matrix
#Main goal for this implementation: avoid poor design choices
class electronbasis():
def __init__(self, N, rs, Nparticles):
self.rs = rs
self.states = []
self.nstates = 0
self.nparticles = Nparticles
self.nshells = N - 1
self.Nm = N + 1
self.k_step = 2*(self.Nm + 1)
Nm = N
n = 0 #current shell
ene_integer = 0
while n <= self.nshells:
is_shell = False
for x in range(-Nm, Nm+1):
for y in range(-Nm, Nm+1):
for z in range(-Nm,Nm+1):
e = x*x + y*y + z*z
if e == ene_integer:
is_shell = True
self.nstates += 2
self.states.append([e, x,y,z, 1])
self.states.append([e, x,y,z,-1])
if is_shell:
n += 1
ene_integer += 1
self.L3 = (4*pi*self.nparticles*self.rs**3)/3.0
self.L2 = self.L3**(2/3.0)
self.L = pow(self.L3, 1/3.0)
for i in range(self.nstates):
self.states[i][0] *= 2*(pi**2)/self.L**2 #Multiplying in the missing factors in the single particle energy
self.states = array(self.states) #converting to array to utilize vectorized calculations
def hfenergy(self, nParticles):
#Calculating the HF-energy (reference energy)
e0 = 0.0
if nParticles<=self.nstates:
for i in range(nParticles):
e0 += self.h(i,i)
for j in range(nParticles):
if j != i:
e0 += .5*self.v(i,j,i,j)
else:
#Safety for cases where nParticles exceeds size of basis
print "Not enough basis states."
return e0
def h(self, p,q):
#Return single particle energy
return self.states[p,0]*(p==q)
def veval(self, p,q,r,s):
#A test for evaluating the two-body interaction
val = ""
if self.kdplus(p,q,r,s):
val+= "kdplus "
if self.kdspin(p,r):
val += "Direct[kdspin_pr "
if self.kdspin(q,s):
val += "kdspin_qs "
if self.kdwave(p,r) != 0:
val += "kdwave!=0 "
val += str(self.absdiff2(r,p))
val += "] "
if self.kdspin(p,s):
val += "Exchange[kdspin_pr "
if self.kdspin(q,r):
val += "kdspin_qs "
if self.kdwave(p,s) != 0:
val += "kdwave!=0 "
val += str(self.absdiff2(s,p))
val += "] "
return val
def vevalHF(self, N):
#Evaluation of all expressions of two-body contributions to the HF-energy
for i in range(N):
for j in range(N):
if i!= j:
print "<",i,j,"|",i,j,"> =",self.veval(i,j,i,j)
def V2(self, kp,kq,kr,ks):
#k = (energy, kx, ky, kz, ms)
# Vectorized interaction
# This function assumes the the first criterion (comment line below) has been asserted to be true
kdplus = 4*pi/self.L3 #(kp[:,1]+kq[:,1]==kr[:,1]+ks[:,1])*(kp[:,2]+kq[:,2]==kr[:,2]+ks[:,2])*(kp[:,3]+kq[:,3]==kr[:,3]+ks[:,3])*4*pi/self.L3#d_k+k k+k
kdspin1 = (kp[:,4]==kr[:,4])*(kq[:,4]==ks[:,4])*1
kdwave1 = abs((kp[:,1]==kr[:,1])*(kp[:,2]==kr[:,2])*(kp[:,3]==kr[:,3])-1)
absdiff2_1 = ((kr[:,1]-kp[:,1])**2+(kr[:,2]-kp[:,2])**2+(kr[:,3]-kp[:,3])**2) #absdiff2
term1=(4.0*absdiff2_1*pi**2)/self.L2
term1[term1==0] = 1
kdspin2 = (kp[:,4]==ks[:,4])*(kq[:,4]==kr[:,4])*1
kdwave2 = abs((kp[:,1]==ks[:,1])*(kp[:,2]==ks[:,2])*(kp[:,3]==ks[:,3])-1)
absdiff2_2 = ((ks[:,1]-kp[:,1])**2+(ks[:,2]-kp[:,2])**2+(ks[:,3]-kp[:,3])**2) #absdiff2
term2=(4.0*absdiff2_2*pi**2)/self.L2
term2[term2==0] = 1
return kdplus*(kdspin1*kdwave1/term1 - kdspin2*kdwave2/term2)
def V(self, kp,kq,kr,ks):
#k = (energy, kx, ky, kz, ms)
# Vectorized interaction
# This function assumes the the first criterion (comment line below) has been asserted to be true
kdplus = (kp[1,:]+kq[1,:]==kr[1,:]+ks[1,:])*(kp[2,:]+kq[2,:]==kr[2,:]+ks[2,:])*(kp[3,:]+kq[3,:]==kr[3,:]+ks[3,:])*4*pi/self.L3#d_k+k k+k //FACTOR 2 originally 4
kdspin1 = (kp[4,:]==kr[4,:])*(kq[4,:]==ks[4,:])*1
kdwave1 = abs((kp[1,:]==kr[1,:])*(kp[2,:]==kr[2,:])*(kp[3,:]==kr[3,:])-1)
absdiff2_1 = ((kr[1,:]-kp[1,:])**2+(kr[2,:]-kp[2,:])**2+(kr[3,:]-kp[3,:])**2) #absdiff2
term1=(4.0*absdiff2_1*pi**2)/self.L2
term1[term1==0] = 1
kdspin2 = (kp[4,:]==ks[4,:])*(kq[4,:]==kr[4,:])*1
kdwave2 = abs((kp[1,:]==ks[1,:])*(kp[2,:]==ks[2,:])*(kp[3,:]==ks[3,:])-1)
absdiff2_2 = ((ks[1,:]-kp[1,:])**2+(ks[2,:]-kp[2,:])**2+(ks[3,:]-kp[3,:])**2) #absdiff2
term2=(4.0*absdiff2_2*pi**2)/self.L2
term2[term2==0] = 1
return kdplus*(kdspin1*kdwave1/term1 - kdspin2*kdwave2/term2)
def v(self,p,q,r,s):
#Two body interaction
#To optimize bottleneck: vectorize this function ! (remove if-tests)
val = 0
terms = 0.0
term1 = 0.0
term2 = 0.0
kdpl = self.kdplus(p,q,r,s)
if kdpl != 0:
val = 1.0/self.L3
if self.kdspin(p,r)*self.kdspin(q,s)==1:
if self.kdwave(p,r) != 1.0:
#term1=(4*self.absdiff2(r,p)*pi**2)/self.L2
#terms += 1.0/term1
term1 = self.L2/(pi*self.absdiff2(r,p))
if self.kdspin(p,s)*self.kdspin(q,r)==1:
if self.kdwave(p,s) != 1.0:
#term2=(4*self.absdiff2(s,p)*pi**2)/self.L2
#terms -= 1.0/term2
term2 = self.L2/(pi*self.absdiff2(s,p))
return val*(term1-term2)
#The following is a series of kroenecker deltas used in the two-body interactions.
#Run kd_integrity() to ensure that they work as intended.
def kdi(self,a,b):
#Kroenecker delta integer
return 1.0*(a==b)
def kda(self,a,b):
#Kroenecker delta array
d = 1.0
#print a,b,
for i in range(len(a)):
d*=(a[i]==b[i])
return d
def kdfullplus(self,p,q,r,s):
#Kroenecker delta wavenumber p+q,r+s
return self.kda(self.states[p][1:5]+self.states[q][1:5],self.states[r][1:5]+self.states[s][1:5])
def kdplus(self,p,q,r,s):
#Kroenecker delta wavenumber p+q,r+s
return self.kda(self.states[p][1:4]+self.states[q][1:4],self.states[r][1:4]+self.states[s][1:4])
def kdspin(self,p,q):
#Kroenecker delta spin
return self.kdi(self.states[p][4], self.states[q][4])
def kdwave(self,p,q):
#Kroenecker delta wavenumber
return self.kda(self.states[p][1:4],self.states[q][1:4])
def absdiff2(self,p,q):
val = 0.0
for i in range(1,4):
val += (self.states[p][i]-self.states[q][i])*(self.states[p][i]-self.states[q][i])
#if val == 0:
# print "div0"
return val
def kd_integrity(self):
#test integrity of kroenecker deltas
print "Array KD :", self.kda([0,1,2], [0,1,2]) == True
print "Integer KD :", self.kdi(1,1) == True
print "Opposite spin :", self.kdspin(0,1) == False
print "Equal spin :", self.kdspin(1,1) == True
print "Wavenumber equal :", self.kdwave(1,0) == True
print "Wavenumber not equal:", self.kdwave(1,2) == False
def liststates(self):
for i in range(self.nstates):
print self.states[i]
def unique(self, p):
return self.states[p,1] + self.states[p,2]*self.k_step + self.states[p,3]*self.k_step**2 + self.states[p,4]*self.k_step**3
class channelmap():
"""
This tensorclass keeps track of and stores
(1) The dense blocks of amplitudes or interactions
(2) The assosciated unique identifiers resulting from conservation of quantum numbers
(3) The reorganization patterns needed to align matrices when performing contractions
"""
def __init__(self, basis, Np, Nq, Nr, Ns):
self.bs = basis
#print "Number of states:", self.bs.nstates
self.k_step = 2*(self.bs.Nm + 1) #steplength of momentum vector, used to uniquely identify combinations of quantum numbers
self.elements = [] #1D array where each element is stored, common to all blocks
self.elements = array([], dtype = float)
self.blockmap = [] #1D array where for each element, the mapped blocks and indices are stored, so that
self.blocks = [] #nested list containing blocks of pointers to elements
self.configs = [] #nested list containing the incoming and outgoing quantum numbers of blocks
self.blocklengths = []
self.Np = Np
self.Nq = Nq
self.Nr = Nr
self.Ns = Ns
self.configurations = []
self.amplitude = False
###########################
# Transform between 1D to /from 4D
############################
def to(self, p,q,r,s):
#translate to transformed index
return p + q*self.Np + r*self.Np*self.Nq + s*self.Np*self.Nq*self.Nr
def of(self, i):
#translate from transformed index
s = i//(self.Np*self.Nq*self.Nr)
r = (i-s*self.Np*self.Nq*self.Nr)//(self.Np*self.Nq)
q = (i-s*self.Np*self.Nq*self.Nr-r*self.Np*self.Nq)//self.Np
p = (i-s*self.Np*self.Nq*self.Nr-r*self.Np*self.Nq-q*self.Np)
return [p,q,r,s]
def map2(self, L, R):
#map as 2-2 diagram
#Map the conserved channels in the tensor where k_p + k_q == k_r + k_s & m_p + m_q == m_r + m_s
#L and R is p , q , ... = L; r , s , ... = R, giving the range of the rows an columns to be evaluates
#The resulting rows and cols are lists containing rows and columns of dense (nonzero) blocks in the full tensor
N = len(L)
N_rows = len(L[0][0]) #Number of indices in L
N_cols = len(R[0][0]) #Number of indices in R
ident_cols = zeros(N_cols, dtype = int)
ident_rows = zeros(N_rows, dtype = int)
bs = self.bs
for i in range(len(L)):
ident_rows += bs.unique(L[i][0]+L[i][2])*L[i][1]
for i in range(len(R)):
ident_cols += bs.unique(R[i][0]+R[i][2])*R[i][1]
#ident_rows += bs.unique(L[0]) + bs.unique(L[1]) #bs.states[L[i],1] + bs.states[L[i],2]*self.k_step + bs.states[L[i],3]*self.k_step**2 + bs.states[L[i],4]*self.k_step**3
#ident_cols += bs.unique(R[0]) + bs.unique(R[1]) #bs.states[R[i],1] + bs.states[R[i],2]*self.k_step + bs.states[R[i],3]*self.k_step**2 + bs.states[R[i],4]*self.k_step**3
uniques = intersect1d(ident_rows, ident_cols)
blocks = []
rows = []
cols = []
configurations = [] #store unique values for different configs
for u in uniques:
row_indices = ident_rows==u
col_indices = ident_cols==u
#The following is not general
p = L[0][0][row_indices]
q = L[1][0][row_indices]
r = R[0][0][col_indices]
s = R[1][0][col_indices]
if self.amplitude == False:
p += L[0][2]
q += L[1][2]
r += R[0][2]
s += R[1][2]
rows.append([p,q])
cols.append([r,s])
configurations.append(u) #LHS = RHS = u
"""
#store indices in a temporary 1D array
Nx = len(row_indices)
Ny = len(col_indices)
for nx in range(Nx):
for ny in range(Ny):
"""
nblocks = len(rows)
self.blocklengths.append(nblocks)
if self.amplitude:
self.consolidate(rows, cols, configurations, nblocks) #this process should only be used for amplitudes, for interactions: calculate elements on the fly
else:
#store only blocks, not elements
self.blocks.append([])
for i in range(len(rows)):
self.blocks[-1].append([rows[i], cols[i]])
self.configurations.append(configurations)
#print unique(self.ident_rows)
def broadcast(self):
#distribute all elements to the configurations
pass
def map(self, L = [[0,1],[1,1]], R=[[2,1],[3,1]]):
#(1) label rows and columns
Ns = [self.Np, self.Nq, self.Nr, self.Ns]
#(2) determine dimensions and indices of transformed matrix
Nrows = 1
for i in range(len(L)):
Nrows*=Ns[L[i][0]]
rows = arange(Nrows)
Ncols = 1
for i in range(len(R)):
Ncols*=Ns[R[i][0]]
cols = arange(Ncols)
left = self.unpack(rows, L, Ns) ##indices in the transformed matrix, do I know in which order these occurs? (p-q-r-s) yes, from L
right = self.unpack(cols, R, Ns)
pqrs = []
PQRS = [0,0,0,0]
for i in range(len(left)):
pqrs.append(left[i])
PQRS[L[i][0]] = left[i]
for i in range(len(right)):
pqrs.append(right[i])
PQRS[R[i][0]] = right[i]
#append(left, right)
"""
for i in range(4):
print max(pqrs[i])
print "::", max(PQRS[i])
"""
#(3) Identify blocks
#basically ident(row) == ident(col) produces the blocks
LHS = PQRS[L[0][0]]*L[0][1]
for i in range(len(L)-1):
LHS += self.bs.unique(PQRS[L[i+1][0]]*L[i+1][1])
RHS = PQRS[R[0][0]]*R[0][1]
for i in range(len(R)-1):
RHS += self.bs.unique(PQRS[R[i+1][0]]*R[i+1][1])
#now, we will need to find blocks where RHS==LHS, and sort them according to L and R
#We begin by finding the intersection of RHS and LHS
uniques = intersect1d(LHS, RHS)
nblocks = len(uniques)
self.blocklengths.append(nblocks)
self.configurations.append(array(uniques, dtype = int)) #used for inter-tensor contraction mapping
tempElements = []
tempBlockmap = []
self.blocks.append([])
U = len(self.blocks)-1 #current block configuration
#traverse uniques, consolidate blocks
#u_count = 0
for ni in range(len(uniques)):
u = uniques[i]
row_indices = LHS==u
col_indices = RHS==u
print len(rows), len(row_indices)
row = rows[row_indices] #rows and columns in the transformed matrix
col = cols[col_indices]
PQRS = [0,0,0,0]
Nx = len(row)
Ny = len(col)
block = zeros((Nx,Ny), dtype = int)
for nx in range(Nx):
for ny in range(Ny):
lhs = self.unpack(row[nx], L, Ns)
rhs = self.unpack(col[ny], R, Ns)
for i in range(len(lhs)):
PQRS[L[i][0]] = lhs[i]
for i in range(len(rhs)):
PQRS[R[i][0]] = rhs[i]
p,q,r,s = PQRS
index = self.to(p,q,r,s)
tempElements.append(index)
tempBlockmap.append([ni, nx, ny]) #the corresponding pointer back to the element in the current block
block[nx,ny] = index
self.blocks[U].append(block)
#print self.blocks
#######
##
## CONSOLIDATE ELEMENTS (link doubly occuring indices)
##
#########
#(1) sort temp elements and temp blockmap
n = argsort(tempElements)
tempElements = array(tempElements, dtype = float)[n]
tempBlockmap = array(tempBlockmap, dtype = int)[n]
#print "temp", tempElements
#print "te0", tempElements[0]
#traverse sorted arrays simultaneously and compare elements, for doubly occuring elements; change pointers in block so they point to the same adress
tempN = 0
trueN = 0
tempL = len(tempElements)
trueL = len(self.elements)
all_resolved = False
while trueN < trueL:
#print "mapping all elements"
if self.elements[trueN] == tempElements[tempN]:
#identical indices found, resolve doubly occuring indices by map to one unique index, update pointer in block
block_n, nx, ny = tempBlockmap[tempN]
self.blocks[U][block_n][nx,ny] = trueN #self.elements[trueN]
trueN += 1
tempN += 1
else:
if self.elements[trueN] < tempElements[tempN]:
trueN += 1
#if self.elements[trueN] > tempElements[tempN]:
else:
tempN += 1
if tempN >= tempL:
all_resolved = True #all elements accounted for in already existing self.elements
break
#append the remaining elements to self.elements
if not all_resolved:
#
#print "resolve remaining"
tempRemaining = zeros(tempL-tempN, dtype = int)
tN = 0
while tempN<tempL:
block_n, nx, ny = tempBlockmap[tempN]
tempRemaining[tN] = tempElements[tempN]
#print self.blocks[U][block_n]
#print nx,ny
self.blocks[U][block_n][nx,ny] = trueN + tN #self.elements[trueN]
tempN += 1
tN += 1
self.elements = append(self.elements, tempRemaining)
#print "elements:", self.elements.min()
def consolidate(self, rows, cols, configurations, nblocks):
#consolidate the block mapping to the self.elements indexing and return a list of properly mapped blocks
"""
This function needs to set up each block, where the block elements contain a pointer to the location in the self.elements array where the elements are kept
The challenge here is that the block elements are likely to be already present in the element array (unless first configuration), so that we need only append new elements
Lookup routines may be unbearably slow unless some preconcieved ordering is utilized
#config ID = ?
#does it work? remains to bee seen ;)
"""
self.configurations.append(array(configurations, dtype = int))
tempElements = []
tempBlockmap = []
self.blocks.append([])
u = len(self.blocks)-1 #current block configuration
for i in range(nblocks):
p,q = rows[i]
r,s = cols[i]
Nx = len(rows[i][0])
Ny = len(cols[i][0])
#print p,q,r,s, Nx, Ny
currentblock = zeros((Nx, Ny), dtype = int)
for nx in range(Nx):
for ny in range(Ny):
index = self.to(p[nx], q[nx], r[ny], s[ny])
#print ":", index
tempElements.append(index)
tempBlockmap.append([i, nx, ny]) #the corresponding pointer back to the element in the current block
currentblock[nx,ny] = index
#if index == 0:
# print i
self.blocks[u].append(currentblock)
#print rows[62]
#print cols[62]
#(1) sort temp elements and temp blockmap
n = argsort(tempElements)
tempElements = array(tempElements, dtype = float)[n]
tempBlockmap = array(tempBlockmap, dtype = int)[n]
#print "temp", tempElements
#print "te0", tempElements[0]
#traverse sorted arrays simultaneously and compare elements, for doubly occuring elements; change pointers in block so they point to the same adress
tempN = 0
trueN = 0
tempL = len(tempElements)
trueL = len(self.elements)
all_resolved = False
while trueN < trueL:
#print "mapping all elements"
if self.elements[trueN] == tempElements[tempN]:
#identical indices found, resolve doubly occuring indices by map to one unique index, update pointer in block
block_n, nx, ny = tempBlockmap[tempN]
self.blocks[u][block_n][nx,ny] = trueN #self.elements[trueN]
trueN += 1
tempN += 1
else:
if self.elements[trueN] < tempElements[tempN]:
trueN += 1
#if self.elements[trueN] > tempElements[tempN]:
else:
tempN += 1
if tempN >= tempL:
all_resolved = True #all elements accounted for in already existing self.elements
break
#append the remaining elements to self.elements
if not all_resolved:
#
#print "resolve remaining"
tempRemaining = zeros(tempL-tempN, dtype = int)
tN = 0
while tempN<tempL:
block_n, nx, ny = tempBlockmap[tempN]
tempRemaining[tN] = tempElements[tempN]
self.blocks[u][block_n][nx,ny] = trueN + tN #self.elements[trueN]
tempN += 1
tN += 1
self.elements = append(self.elements, tempRemaining)
#print "elements:", self.elements.min()
def matchblock(self, u, identifier):
return where(self.configurations[u]==identifier)[0][0]
def matchconfig(self, u, config):
unique_c = intersect1d(config, self.configurations[u]) #unique quantum numbers
pattern1 = zeros(len(unique_c), dtype = int) #pattern stores the index of the block where ident(quantum numbers) = config[i]
pattern2 = zeros(len(unique_c), dtype = int) #pattern stores the index of the block where ident(quantum numbers) = config[i]
for i in range(len(unique_c)):
pattern1[i] = self.matchblock(u, unique_c[i])
pattern2[i] = where(config==unique_c[i])[0][0]
return pattern1, pattern2
def getvblock(self, u, i):
#Get block as interaction (not stored)
p,q = self.blocks[u][i][0][0], self.blocks[u][i][0][1]
r,s = self.blocks[u][i][1][0], self.blocks[u][i][1][1]
Nx = len(p)
Ny = len(r)
block = zeros((Nx, Ny), dtype = float)
for nx in range(Nx):
for ny in range(Ny):
block[nx,ny] = bs.v(p[nx], q[nx], r[ny], s[ny])
return block
def getblock(self, u, i):
#Get block as amplitude
#u = config
#i = block number
return self.elements[self.blocks[u][i]]
def setblock(self, u, i, block):
self.elements[self.blocks[u][i]] = block #this acutally works beautifully!!! :D
def getblock2(self, i):
#set up and return a dense matrix for block i
nx = len(self.rows[i][0])
ny = len(self.cols[i][0])
block = zeros((nx,ny))
for x in range(nx):
for y in range(ny):
block[x,y] = self.bs.v(self.rows[i][0][x],self.rows[i][1][x],self.cols[i][0][y],self.cols[i][1][y])
#print "unique:", self.bs.unique(self.rows[i][0][0]) + self.bs.unique(self.rows[i][1][0])
#print "identifier:", self.configurations[i]
return block
def zeros(self):
#set all elements to zero
self.elements *= 0
def init(self):
#as vhhpp
for i in range(len(self.elements)):
p,q,r,s = self.of(self.elements[i]) #i,j,a,b
self.elements[i] = self.bs.v(p,q,r+self.Np,s+self.Np)
def init_as_t2amp(self):
self.energy_denom = zeros(len(self.elements), dtype = float)
for i in range(len(self.elements)):
p,q,r,s = self.of(self.elements[i]) #a,b,i,j
self.elements[i] = self.bs.v(p+self.Nr,q+self.Nr,r,s)/(bs.states[r,0] + bs.states[s,0] - bs.states[self.Nr + p,0] - bs.states[self.Nr + q,0])
self.energy_denom[i] = bs.states[r,0] + bs.states[s,0] - bs.states[self.Nr + p,0] - bs.states[self.Nr + q,0]
#print "energy:", self.energy_denom
#self.elements/=self.energy_denom
#######################################
##
## The different subindex initialization routines
##
#######################################
def unpack(self, row = arange(10*12), L = [[0,1], [1,1]], MN=[10, 12,14,5]):
#Unpack a compressed index of any size
M = [1]
mn = 1
for i in range(len(L)):
mn*=MN[L[i][0]]
M.append(mn)
M.reverse()
indices = []
for i in range(len(L)):
P = row.copy()
for e in range(i):
P -= indices[e]*M[e+1]
indices.append(P//M[i+1])
indices.reverse()
return indices
def setup_as_pq_rs(self, L = [[0,1],[1,1]], R=[[2,1],[3,1]]):
#(1) label rows and columns
#this is general to all setups
Ns = [self.Np, self.Nq, self.Nr, self.Ns]
#(2) determine dimensions and indices of transformed matrix
Nrows = 1
for i in range(len(L)):
Nrows*=Ns[L[i][0]]
rows = arange(Nrows)
Ncols = 1
for i in range(len(R)):
Ncols*=Ns[R[i][0]]
cols = arange(Ncols)
left = self.unpack(rows, L, Ns) ##indices in the transformed matrix, do I know in which order these occurs? (p-q-r-s) yes, from L
right = self.unpack(cols, R, Ns)
#print left
pqrs = []
PQRS = [0,0,0,0]
for i in range(len(left)):
pqrs.append(left[i])
PQRS[L[i][0]] = left[i]
for i in range(len(right)):
pqrs.append(right[i])
PQRS[R[i][0]] = right[i]
#append(left, right)
for i in range(4):
print max(pqrs[i])
print "::", max(PQRS[i])
#to sort these as they occur in the unaligned matrix, use L, R
#p,q,r,s = PQRS
#(3) Identify blocks
#basically ident(row) == ident(col) produces the blocks
LHS = pqrs[L[0][0]]*L[0][1]
for i in range(len(L)-1):
LHS += self.bs.unique(pqrs[L[i+1][0]]*L[i+1][1])
RHS = pqrs[R[0][0]]*R[0][1]
for i in range(len(R)-1):
RHS += self.bs.unique(pqrs[R[i+1][0]]*R[i+1][1])
#now, we will need to find blocks where RHS==LHS, and sort them according to L and R
#We begin by finding the intersection of RHS and LHS
uniques = intersect1d(LHS, RHS)
nblocks = len(uniques)
#traverse uniques
for u in uniques:
row_indices = LHS==u
col_indices = RHS==u
row = rows[row_indices] #rows and columns in the transformed matrix
col = cols[col_indices]
#left = self.unpack(rows, L, Ns) ##indices in the transformed matrix, do I know in which order these occurs? (p-q-r-s) yes, from L
#right = self.unpack(cols, R, Ns)
PQRS = [0,0,0,0]
Nx = len(row)
Ny = len(col)
block = zeros((Nx,Ny), dtype = int)
for nx in range(Nx):
for ny in range(Ny):
lhs = self.unpack(row[nx], L, Ns)
rhs = self.unpack(col[ny], R, Ns)
#print lhs, rhs
for i in range(len(lhs)):
PQRS[L[i][0]] = lhs[i]
for i in range(len(rhs)):
PQRS[R[i][0]] = rhs[i]
#print PQRS
p,q,r,s = PQRS
block[nx,ny] = self.to(p,q,r,s)
"""
#now, find p,q,r,s and broadcast to aligned matrix
pqrs_ = []
for i in range(len(L)):
print pq[L[i][0]]
pqrs_.append(L[i][0][row_indices])
for i in range(len(R)):
pqrs_.append(R[i][0][col_indices])
p,q,r,s = pqrs_
"""
#print pq[row_indices]
#print rs[col_indices]
#print " "
#The following is not general
#p = L[0][0][row_indices]
#q = L[1][0][row_indices]
#r = R[0][0][col_indices]
#s = R[1][0][col_indices]
def t2(self):
bs = self.bs
self.amplitude = True
Np = bs.nstates-bs.nparticles
Nh = bs.nparticles #conflicting naming should be resolved in final implementation
##############
# find basic arrangement
###############
row = arange(Nh**2)
i = row % Nh
j = row // Nh
col = arange(Np**2)
a = col % Np
b = col // Np
L = [[a,1, Nh],[b,1, Nh]]
R = [[i,1,0],[j,1, 0]]
self.map2(L,R)
##############
# find ck-ai, as used in Q, and L3
###############
row = arange(Nh*Np)
c = row // Np
k = row % Np
col = arange(Nh*Np)
i = col // Nh
a = col % Nh
L = [[c,1,Nh],[k,-1, 0]]
R = [[i,1, Nh],[a,-1, Nh]]
self.map2(L,R)
self.init_as_t2amp()
def vpphh(self):
bs = self.bs
Np = bs.nstates-bs.nparticles
Nh = bs.nparticles #conflicting naming should be resolved in final implementation
##############
# find basic arrangement
###############
row = arange(Nh**2)
i = row % Nh
j = row // Nh
col = arange(Np**2)
a = col % Np
b = col // Np
L = [[a,1, Nh],[b,1, Nh]]
R = [[i,1,0],[j,1, 0]]
self.map2(L,R)
def vpppp(self):
bs = self.bs
Np = bs.nstates-bs.nparticles
Nh = bs.nparticles #conflicting naming should be resolved in final implementation
col = arange(Np**2)
a = col % Np
b = col // Np
L = [[a,1,Nh],[b,1,Nh]]
R = [[a,1,Nh],[b,1,Nh]]
self.map2(L,R)
def vhhhh(self):
bs = self.bs
Np = bs.nstates-bs.nparticles
Nh = bs.nparticles #conflicting naming should be resolved in final implementation
col = arange(Nh**2)
i = col % Nh
j = col // Nh
L = [[i,1,0],[j,1,0]]
R = [[i,1,0],[j,1,0]]
self.map2(L,R)
def vhpph(self):
bs = self.bs
Np = bs.nstates-bs.nparticles
Nh = bs.nparticles #conflicting naming should be resolved in final implementation
row = arange(Nh*Np)
a = row // Nh
i = row % Nh
col = arange(Nh*Np)
j = col // Nh
b = col % Nh
L = [[i,1,0],[a,-1, Nh]]
R = [[b,1, Nh],[j,-1, 0]]
self.map2(L,R)
def vhhpp(self):
bs = self.bs
Np = bs.nstates-bs.nparticles
Nh = bs.nparticles #conflicting naming should be resolved in final implementation
##############
# find basic arrangement (used in Q1)
###############
row = arange(Nh**2)
i = row % Nh
j = row // Nh
col = arange(Np**2)
a = col % Np
b = col // Np
L = [[i,1, 0],[j,1,0]]
R = [[a,1,Nh],[b,1, Nh]]
self.map2(L,R)
#####################
# find vhpph (used for L3)
####################
row = arange(Nh*Np)
a = row // Nh
i = row % Nh
col = arange(Nh*Np)
j = col // Nh
b = col % Nh
L = [[i,1,0],[a,-1, Nh]]
R = [[b,1, Nh],[j,-1, 0]]
self.map2(L,R)
class CCD_block():
def __init__(self, bs, Nh):
#set up all diagrams needed as channelmap objects
pass
def align_diagrams(self):
#generate all channels
pass
def advance(self):
#perform one iteration
pass
def energy(self):
pass
def contract(self, M1, u1, M2, u2, channel, MR, ur):
#contract M1*M2 = MR guided by channel
c1,c2 = channel
for i in range(len(c2)):
#MR.setblock( dot(M1.getblock(u, c1[i]), M2.getblock(u2, c2)) #where to broadcast? ur, i
pass
def show_regions(bs):
Z = zeros((Nh**2, Np**2), dtype = int)
Ns = bs.nstates
for p in range(Nh):
for q in range(Nh):
for r in range(Np):
for s in range(Np):
if bs.kdplus(p,q,r,s):
Z[p+q*Nh,r+s*Np] += 1
imshow(Z)
show()
Nh = 14
Nshells =5
bs = electronbasis(Nshells, 1.0, Nh)
Np = bs.nstates-Nh
print "Number of states:", bs.nstates
vpphh = channelmap(bs, Np, Np, Nh, Nh)
vhhpp = channelmap(bs, Nh, Nh, Np, Np)
vhhhh = channelmap(bs, Nh, Nh, Nh, Nh)
vhpph = channelmap(bs, Nh, Nh, Nh, Nh)
vpppp = channelmap(bs, Np, Np, Np, Np)
t2amp = channelmap(bs, Np, Np, Nh, Nh)
t2prev = channelmap(bs,Np, Np, Nh, Nh)
v = channelmap(bs, Np, Np, Nh, Nh)
#v.map([[0,1],[2,-1]], [[1,-1],[3,1]])
#print len(v.elements)
#print vpphh.configurations
v.map()
#print len(v.elements)
#print v.configurations[0]
#print v.configurations[1]
c1,c2 = v.matchconfig(0,v.configurations[1])
for i in range(len(c1)):
print v.blocks[0][c1[i]]
print v.blocks[1][c2[i]]
print " "
print v.blocklengths
#performing a test
"""
vpphh.vpphh()
vpppp.vpppp()
vhhhh.vhhhh()
vhpph.vhpph()
vhhpp.vhhpp()
t2amp.t2()
t2prev.t2()
t2prev.zeros()
print "number of nonzero elements:", len(nonzero(t2amp.elements)[0])
c1,c2 = t2amp.matchconfig(0, vpppp.configurations[0])
print c1,c2
psum = 0
for i in range(len(c1)):
#print " "
#print vpppp.getvblock(0,c2[i]).shape
#print t2amp.getblock(0,c1[i]).shape
block = dot(vpppp.getvblock(0,c2[i]),t2amp.getblock(0,c1[i]))
t2prev.setblock(0,c1[i], block)
psum += .25*sum(block.diagonal())
#print t2prev.elements
print "Block calc:", psum
if False:
v1 = zeros((Nh**2, Np**2))
v2 = zeros((Np**2, Nh**2))
#manual MBPT(2) energy (-0.525588309385 for 66 states)
psum2 = 0
for i in range(Nh):
for j in range(Nh):
for a in range(Np):
for b in range(Np):
v1[i + j*Nh, a+b*Np] = bs.v(i,j,a+Nh,b+Nh)
v2[a+b*Np,i + j*Nh] = bs.v(a+Nh,b+Nh,i,j)/(bs.states[i,0] + bs.states[j,0] - bs.states[a + Nh, 0] - bs.states[b+Nh,0])
psum = .25*sum(dot(v1,v2).diagonal())
print "Standard calc:", psum
"""
"""
def to(self, p,q,r,s):
#translate to transformed index
return p + q*self.Np + r*self.Np*self.Nq + s*self.Np*self.Nq*self.Nr
return p + Np*(q + Nq*(r + Nr*s))
def of(self, i):
#translate from transformed index
s = i//(self.Np*self.Nq*self.Nr)
r = (i-s*self.Np*self.Nq*self.Nr)//(self.Np*self.Nq)
q = (i-s*self.Np*self.Nq*self.Nr-r*self.Np*self.Nq)//self.Np
p = (i-s*self.Np*self.Nq*self.Nr-r*self.Np*self.Nq-q*self.Np)
return [p,q,r,s]
"""
def unpack(row = arange(10*12), L = [[0,1], [1,1]], MN=[10, 12,14,5]):
#Unpack a compressed index of any size
M = [1]
mn = 1
for i in range(len(L)):
mn*=MN[L[i][0]]
M.append(mn)
M.reverse()
print "M:", M
indices = []
for i in range(len(L)):
#print "row:", row
P = row.copy()
print "P:",P
for e in range(i):
P -= indices[e]*M[e+1]
print "-", M[e+1], "*", indices[e]
print " "
indices.append(P//M[i+1])
print P,":",M[i+1]
print "=", P//M[i+1]
print "---------------------"
indices.reverse()
return indices
"""
s = i//Ms
r = (i-s*Ms)//Mr
q = (i-s*Ms-r*Mr)//Mq
p = (i-s*Ms-r*Mr-q*Mq)
"""
#unpack() | cc0-1.0 |
YoeriDijkstra/iFlow | Examples/Tutorial/plotting/PlotGrid.py | 1 | 1124 | """
Test
Date: 19-Oct-16
Authors: Y.M. Dijkstra
"""
import numpy as np
import step as st
import matplotlib.pyplot as plt
import nifty as ny
class PlotGrid:
# Variables
# Methods
def __init__(self, input):
self.input = input
return
def run(self):
st.configure()
z = ny.dimensionalAxis(self.input.slice('grid'), 'z')[:,:,0]
x = ny.dimensionalAxis(self.input.slice('grid'), 'x')[:,:,0]
print 'maximum depth: ' + str(self.input.v('H', x=0))
print 'minimum depth: ' + str(self.input.v('H', x=1))
print 'maximum width: ' + str(self.input.v('B', x=0))
print 'minimum width: ' + str(self.input.v('B', x=1))
plt.figure(1, figsize=(1,2))
for i in range(0, z.shape[1]):
plt.plot(x[:,0]/1000., z[:,i], 'k-')
for i in range(0, x.shape[0]):
plt.plot(x[i,:]/1000., z[i,:], 'k-')
plt.xlabel('x (km)')
plt.ylabel('z (m)')
plt.xlim(0, np.max(x[:,0])/1000.)
plt.ylim(np.min(z), np.max(z))
st.show()
# twoxu1 = 2*u1
d = {}
return d | lgpl-3.0 |
bthirion/nipy | nipy/tests/test_scripts.py | 1 | 5512 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Test scripts
If we appear to be running from the development directory, use the scripts in
the top-level folder ``scripts``. Otherwise try and get the scripts from the
path
"""
from __future__ import with_statement
import sys
import os
from os.path import dirname, join as pjoin, isfile, isdir, abspath, realpath
from subprocess import Popen, PIPE
import numpy as np
from nibabel.tmpdirs import InTemporaryDirectory
from nipy import load_image, save_image
from nipy.core.api import rollimg
from nose.tools import assert_true, assert_false, assert_equal
from ..testing import funcfile
from numpy.testing import decorators, assert_almost_equal
from nipy.testing.decorators import make_label_dec
from nibabel.optpkg import optional_package
matplotlib, HAVE_MPL, _ = optional_package('matplotlib')
needs_mpl = decorators.skipif(not HAVE_MPL, "Test needs matplotlib")
script_test = make_label_dec('script_test')
# Need shell to get path to correct executables
USE_SHELL = True
DEBUG_PRINT = os.environ.get('NIPY_DEBUG_PRINT', False)
def local_script_dir():
# Check for presence of scripts in development directory. ``realpath``
# checks for the situation where the development directory has been linked
# into the path.
below_nipy_dir = realpath(pjoin(dirname(__file__), '..', '..'))
devel_script_dir = pjoin(below_nipy_dir, 'scripts')
if isfile(pjoin(below_nipy_dir, 'setup.py')) and isdir(devel_script_dir):
return devel_script_dir
return None
LOCAL_SCRIPT_DIR = local_script_dir()
def run_command(cmd):
if not LOCAL_SCRIPT_DIR is None:
# Windows can't run script files without extensions natively so we need
# to run local scripts (no extensions) via the Python interpreter. On
# Unix, we might have the wrong incantation for the Python interpreter
# in the hash bang first line in the source file. So, either way, run
# the script through the Python interpreter
cmd = "%s %s" % (sys.executable, pjoin(LOCAL_SCRIPT_DIR, cmd))
if DEBUG_PRINT:
print("Running command '%s'" % cmd)
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=USE_SHELL)
stdout, stderr = proc.communicate()
if proc.poll() == None:
proc.terminate()
if proc.returncode != 0:
raise RuntimeError('Command "%s" failed with stdout\n%s\nstderr\n%s\n'
% (cmd, stdout, stderr))
return proc.returncode
@needs_mpl
@script_test
def test_nipy_diagnose():
# Test nipy diagnose script
fimg = load_image(funcfile)
ncomps = 12
with InTemporaryDirectory() as tmpdir:
# Need to quote out path in case it has spaces
cmd = 'nipy_diagnose "%s" --ncomponents=%d --out-path="%s"' % (
funcfile, ncomps, tmpdir)
run_command(cmd)
for out_fname in ('components_functional.png',
'pcnt_var_functional.png',
'tsdiff_functional.png',
'vectors_components_functional.npz'):
assert_true(isfile(out_fname))
for out_img in ('max_functional.nii.gz',
'mean_functional.nii.gz',
'min_functional.nii.gz',
'std_functional.nii.gz'):
img = load_image(out_img)
assert_equal(img.shape, fimg.shape[:-1])
del img
pca_img = load_image('pca_functional.nii.gz')
assert_equal(pca_img.shape, fimg.shape[:-1] + (ncomps,))
vecs_comps = np.load('vectors_components_functional.npz')
vec_diff = vecs_comps['slice_mean_diff2'].copy()# just in case
assert_equal(vec_diff.shape, (fimg.shape[-1]-1, fimg.shape[2]))
del pca_img, vecs_comps
with InTemporaryDirectory() as tmpdir:
# Check we can pass in slice and time flags
s0_img = rollimg(fimg, 'k')
save_image(s0_img, 'slice0.nii')
cmd = ('nipy_diagnose slice0.nii --ncomponents=%d --out-path="%s" '
'--time-axis=t --slice-axis=0' % (ncomps, tmpdir))
run_command(cmd)
pca_img = load_image('pca_slice0.nii')
assert_equal(pca_img.shape, s0_img.shape[:-1] + (ncomps,))
vecs_comps = np.load('vectors_components_slice0.npz')
assert_almost_equal(vecs_comps['slice_mean_diff2'], vec_diff)
del pca_img, vecs_comps
@needs_mpl
@script_test
def test_nipy_tsdiffana():
# Test nipy_tsdiffana script
out_png = 'ts_out.png'
with InTemporaryDirectory():
# Quotes in case of space in arguments
cmd = 'nipy_tsdiffana "%s" --out-file="%s"' % (funcfile, out_png)
run_command(cmd)
assert_true(isfile(out_png))
@script_test
def test_nipy_3_4d():
# Test nipy_3dto4d and nipy_4dto3d
fimg = load_image(funcfile)
N = fimg.shape[-1]
out_4d = 'func4d.nii'
with InTemporaryDirectory() as tmpdir:
# Quotes in case of space in arguments
cmd = 'nipy_4dto3d "%s" --out-path="%s"' % (funcfile, tmpdir)
run_command(cmd)
imgs_3d = ['functional_%04d.nii' % i for i in range(N)]
for iname in imgs_3d:
assert_true(isfile(iname))
cmd = 'nipy_3dto4d "%s" --out-4d="%s"' % ('" "'.join(imgs_3d), out_4d)
run_command(cmd)
fimg_back = load_image(out_4d)
assert_almost_equal(fimg.get_data(), fimg_back.get_data())
del fimg_back
| bsd-3-clause |
LukaOo/Cervix2017 | scripts/remove-all-unmarked-images.py | 1 | 1262 | ## Removes all unmarked images in input mark failes
## Input - file with marks
## - images catalog
## if failed exists in input marked file but has not any regions it file will be removed
##
import sys
import os
import pandas as pd
import json
import re
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-i", "--input", dest="input",
help="input image path")
parser.add_option("-m", "--masks", dest="masks",
help="Path with masks markers files")
(options, args) = parser.parse_args()
IMAGES_BASE_PATH = options.input
MARKERS_INPUT_PATH = options.masks
markers = os.listdir(MARKERS_INPUT_PATH)
markers.sort()
if __name__ == "__main__":
for mf in markers:
marker_file = pd.read_csv(MARKERS_INPUT_PATH + '/' + mf)
mf = re.search(r'^(.+?)\.txt', mf).group(1)
print mf
for i, j in enumerate(marker_file['region_shape_attributes']):
o = json.loads(j)
if 'name' not in o:
f = marker_file['#filename'][i]
fname = IMAGES_BASE_PATH + '/' + mf + '/' + f
if os.path.isfile(fname) == True:
print fname
os.remove(fname) | gpl-3.0 |
TomAugspurger/pandas | pandas/core/arrays/integer.py | 1 | 22308 | import numbers
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Type, Union
import warnings
import numpy as np
from pandas._libs import lib, missing as libmissing
from pandas._typing import ArrayLike, DtypeObj
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_bool_dtype,
is_datetime64_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.missing import isna
from pandas.core import ops
from pandas.core.array_algos import masked_reductions
from pandas.core.ops import invalid_comparison
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.tools.numeric import to_numeric
from .masked import BaseMaskedArray, BaseMaskedDtype
if TYPE_CHECKING:
import pyarrow # noqa: F401
class _IntegerDtype(BaseMaskedDtype):
"""
An ExtensionDtype to hold a single size & kind of integer dtype.
These specific implementations are subclasses of the non-public
_IntegerDtype. For example we have Int8Dtype to represent signed int 8s.
The attributes name & type are set when these subclasses are created.
"""
name: str
base = None
type: Type
def __repr__(self) -> str:
sign = "U" if self.is_unsigned_integer else ""
return f"{sign}Int{8 * self.itemsize}Dtype()"
@cache_readonly
def is_signed_integer(self) -> bool:
return self.kind == "i"
@cache_readonly
def is_unsigned_integer(self) -> bool:
return self.kind == "u"
@property
def _is_numeric(self) -> bool:
return True
@cache_readonly
def numpy_dtype(self) -> np.dtype:
""" Return an instance of our numpy dtype """
return np.dtype(self.type)
@cache_readonly
def kind(self) -> str:
return self.numpy_dtype.kind
@cache_readonly
def itemsize(self) -> int:
""" Return the number of bytes in this dtype """
return self.numpy_dtype.itemsize
@classmethod
def construct_array_type(cls) -> Type["IntegerArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return IntegerArray
def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
# for now only handle other integer types
if not all(isinstance(t, _IntegerDtype) for t in dtypes):
return None
np_dtype = np.find_common_type(
[t.numpy_dtype for t in dtypes], [] # type: ignore
)
if np.issubdtype(np_dtype, np.integer):
return _dtypes[str(np_dtype)]
return None
def __from_arrow__(
self, array: Union["pyarrow.Array", "pyarrow.ChunkedArray"]
) -> "IntegerArray":
"""
Construct IntegerArray from pyarrow Array/ChunkedArray.
"""
import pyarrow # noqa: F811
from pandas.core.arrays._arrow_utils import pyarrow_array_to_numpy_and_mask
pyarrow_type = pyarrow.from_numpy_dtype(self.type)
if not array.type.equals(pyarrow_type):
array = array.cast(pyarrow_type)
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
# pyarrow.ChunkedArray
chunks = array.chunks
results = []
for arr in chunks:
data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=self.type)
int_arr = IntegerArray(data.copy(), ~mask, copy=False)
results.append(int_arr)
return IntegerArray._concat_same_type(results)
def integer_array(values, dtype=None, copy: bool = False,) -> "IntegerArray":
"""
Infer and return an integer array of the values.
Parameters
----------
values : 1D list-like
dtype : dtype, optional
dtype to coerce
copy : bool, default False
Returns
-------
IntegerArray
Raises
------
TypeError if incompatible types
"""
values, mask = coerce_to_array(values, dtype=dtype, copy=copy)
return IntegerArray(values, mask)
def safe_cast(values, dtype, copy: bool):
"""
Safely cast the values to the dtype if they
are equivalent, meaning floats must be equivalent to the
ints.
"""
try:
return values.astype(dtype, casting="safe", copy=copy)
except TypeError as err:
casted = values.astype(dtype, copy=copy)
if (casted == values).all():
return casted
raise TypeError(
f"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}"
) from err
def coerce_to_array(
values, dtype, mask=None, copy: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask
Parameters
----------
values : 1D list-like
dtype : integer dtype
mask : bool 1D array, optional
copy : bool, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
# if values is integer numpy array, preserve it's dtype
if dtype is None and hasattr(values, "dtype"):
if is_integer_dtype(values.dtype):
dtype = values.dtype
if dtype is not None:
if isinstance(dtype, str) and (
dtype.startswith("Int") or dtype.startswith("UInt")
):
# Avoid DeprecationWarning from NumPy about np.dtype("Int64")
# https://github.com/numpy/numpy/pull/7476
dtype = dtype.lower()
if not issubclass(type(dtype), _IntegerDtype):
try:
dtype = _dtypes[str(np.dtype(dtype))]
except KeyError as err:
raise ValueError(f"invalid dtype specified {dtype}") from err
if isinstance(values, IntegerArray):
values, mask = values._data, values._mask
if dtype is not None:
values = values.astype(dtype.numpy_dtype, copy=False)
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
values = np.array(values, copy=copy)
if is_object_dtype(values):
inferred_type = lib.infer_dtype(values, skipna=True)
if inferred_type == "empty":
values = np.empty(len(values))
values.fill(np.nan)
elif inferred_type not in [
"floating",
"integer",
"mixed-integer",
"integer-na",
"mixed-integer-float",
]:
raise TypeError(f"{values.dtype} cannot be converted to an IntegerDtype")
elif is_bool_dtype(values) and is_integer_dtype(dtype):
values = np.array(values, dtype=int, copy=copy)
elif not (is_integer_dtype(values) or is_float_dtype(values)):
raise TypeError(f"{values.dtype} cannot be converted to an IntegerDtype")
if mask is None:
mask = isna(values)
else:
assert len(mask) == len(values)
if not values.ndim == 1:
raise TypeError("values must be a 1D list-like")
if not mask.ndim == 1:
raise TypeError("mask must be a 1D list-like")
# infer dtype if needed
if dtype is None:
dtype = np.dtype("int64")
else:
dtype = dtype.type
# if we are float, let's make sure that we can
# safely cast
# we copy as need to coerce here
if mask.any():
values = values.copy()
values[mask] = 1
values = safe_cast(values, dtype, copy=False)
else:
values = safe_cast(values, dtype, copy=False)
return values, mask
class IntegerArray(BaseMaskedArray):
"""
Array of integer (optional missing) values.
.. versionadded:: 0.24.0
.. versionchanged:: 1.0.0
Now uses :attr:`pandas.NA` as the missing value rather
than :attr:`numpy.nan`.
.. warning::
IntegerArray is currently experimental, and its API or internal
implementation may change without warning.
We represent an IntegerArray with 2 numpy arrays:
- data: contains a numpy integer array of the appropriate dtype
- mask: a boolean array holding a mask on the data, True is missing
To construct an IntegerArray from generic array-like input, use
:func:`pandas.array` with one of the integer dtypes (see examples).
See :ref:`integer_na` for more.
Parameters
----------
values : numpy.ndarray
A 1-d integer-dtype array.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values.
copy : bool, default False
Whether to copy the `values` and `mask`.
Attributes
----------
None
Methods
-------
None
Returns
-------
IntegerArray
Examples
--------
Create an IntegerArray with :func:`pandas.array`.
>>> int_array = pd.array([1, None, 3], dtype=pd.Int32Dtype())
>>> int_array
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: Int32
String aliases for the dtypes are also available. They are capitalized.
>>> pd.array([1, None, 3], dtype='Int32')
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: Int32
>>> pd.array([1, None, 3], dtype='UInt16')
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: UInt16
"""
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = 1
@cache_readonly
def dtype(self) -> _IntegerDtype:
return _dtypes[str(self._data.dtype)]
def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
if not (isinstance(values, np.ndarray) and values.dtype.kind in ["i", "u"]):
raise TypeError(
"values should be integer numpy array. Use "
"the 'pd.array' function instead"
)
super().__init__(values, mask, copy=copy)
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy: bool = False) -> "IntegerArray":
return integer_array(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_sequence_of_strings(
cls, strings, dtype=None, copy: bool = False
) -> "IntegerArray":
scalars = to_numeric(strings, errors="raise")
return cls._from_sequence(scalars, dtype, copy)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method: str, *inputs, **kwargs):
# For IntegerArray inputs, we apply the ufunc to ._data
# and mask the result.
if method == "reduce":
# Not clear how to handle missing values in reductions. Raise.
raise NotImplementedError("The 'reduce' method is not supported.")
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (IntegerArray,)):
return NotImplemented
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
mask = np.zeros(len(self), dtype=bool)
inputs2 = []
for x in inputs:
if isinstance(x, IntegerArray):
mask |= x._mask
inputs2.append(x._data)
else:
inputs2.append(x)
def reconstruct(x):
# we don't worry about scalar `x` here, since we
# raise for reduce up above.
if is_integer_dtype(x.dtype):
m = mask.copy()
return IntegerArray(x, m)
else:
x[mask] = np.nan
return x
result = getattr(ufunc, method)(*inputs2, **kwargs)
if isinstance(result, tuple):
tuple(reconstruct(x) for x in result)
else:
return reconstruct(result)
def _coerce_to_array(self, value) -> Tuple[np.ndarray, np.ndarray]:
return coerce_to_array(value, dtype=self.dtype)
def astype(self, dtype, copy: bool = True) -> ArrayLike:
"""
Cast to a NumPy array or ExtensionArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
ndarray or ExtensionArray
NumPy ndarray, BooleanArray or IntegerArray with 'dtype' for its dtype.
Raises
------
TypeError
if incompatible type with an IntegerDtype, equivalent of same_kind
casting
"""
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.string_ import StringDtype
dtype = pandas_dtype(dtype)
# if we are astyping to an existing IntegerDtype we can fastpath
if isinstance(dtype, _IntegerDtype):
result = self._data.astype(dtype.numpy_dtype, copy=False)
return dtype.construct_array_type()(result, mask=self._mask, copy=False)
elif isinstance(dtype, BooleanDtype):
result = self._data.astype("bool", copy=False)
return dtype.construct_array_type()(result, mask=self._mask, copy=False)
elif isinstance(dtype, StringDtype):
return dtype.construct_array_type()._from_sequence(self, copy=False)
# coerce
if is_float_dtype(dtype):
# In astype, we consider dtype=float to also mean na_value=np.nan
na_value = np.nan
elif is_datetime64_dtype(dtype):
na_value = np.datetime64("NaT")
else:
na_value = lib.no_default
return self.to_numpy(dtype=dtype, na_value=na_value, copy=False)
def _values_for_argsort(self) -> np.ndarray:
"""
Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort
"""
data = self._data.copy()
if self._mask.any():
data[self._mask] = data.min() - 1
return data
@classmethod
def _create_comparison_method(cls, op):
op_name = op.__name__
@unpack_zerodim_and_defer(op.__name__)
def cmp_method(self, other):
from pandas.arrays import BooleanArray
mask = None
if isinstance(other, (BooleanArray, IntegerArray)):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError(
"can only perform ops with 1-d structures"
)
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
if other is libmissing.NA:
# numpy does not handle pd.NA well as "other" scalar (it returns
# a scalar False instead of an array)
# This may be fixed by NA.__array_ufunc__. Revisit this check
# once that's implemented.
result = np.zeros(self._data.shape, dtype="bool")
mask = np.ones(self._data.shape, dtype="bool")
else:
with warnings.catch_warnings():
# numpy may show a FutureWarning:
# elementwise comparison failed; returning scalar instead,
# but in the future will perform elementwise comparison
# before returning NotImplemented. We fall back to the correct
# behavior today, so that should be fine to ignore.
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
with np.errstate(all="ignore"):
method = getattr(self._data, f"__{op_name}__")
result = method(other)
if result is NotImplemented:
result = invalid_comparison(self._data, other, op)
# nans propagate
if mask is None:
mask = self._mask.copy()
else:
mask = self._mask | mask
return BooleanArray(result, mask)
name = f"__{op.__name__}__"
return set_function_name(cmp_method, name, cls)
def sum(self, skipna=True, min_count=0, **kwargs):
nv.validate_sum((), kwargs)
result = masked_reductions.sum(
values=self._data, mask=self._mask, skipna=skipna, min_count=min_count
)
return result
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
Parameters
----------
result : array-like
mask : array-like bool
other : scalar or array-like
op_name : str
"""
# if we have a float operand we are by-definition
# a float result
# or our op is a divide
if (is_float_dtype(other) or is_float(other)) or (
op_name in ["rtruediv", "truediv"]
):
result[mask] = np.nan
return result
return type(self)(result, mask, copy=False)
@classmethod
def _create_arithmetic_method(cls, op):
op_name = op.__name__
@unpack_zerodim_and_defer(op.__name__)
def integer_arithmetic_method(self, other):
omask = None
if getattr(other, "ndim", 0) > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
if isinstance(other, IntegerArray):
other, omask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError(
"can only perform ops with 1-d structures"
)
if len(self) != len(other):
raise ValueError("Lengths must match")
if not (is_float_dtype(other) or is_integer_dtype(other)):
raise TypeError("can only perform ops with numeric values")
else:
if not (is_float(other) or is_integer(other) or other is libmissing.NA):
raise TypeError("can only perform ops with numeric values")
if omask is None:
mask = self._mask.copy()
if other is libmissing.NA:
mask |= True
else:
mask = self._mask | omask
if op_name == "pow":
# 1 ** x is 1.
mask = np.where((self._data == 1) & ~self._mask, False, mask)
# x ** 0 is 1.
if omask is not None:
mask = np.where((other == 0) & ~omask, False, mask)
elif other is not libmissing.NA:
mask = np.where(other == 0, False, mask)
elif op_name == "rpow":
# 1 ** x is 1.
if omask is not None:
mask = np.where((other == 1) & ~omask, False, mask)
elif other is not libmissing.NA:
mask = np.where(other == 1, False, mask)
# x ** 0 is 1.
mask = np.where((self._data == 0) & ~self._mask, False, mask)
if other is libmissing.NA:
result = np.ones_like(self._data)
else:
with np.errstate(all="ignore"):
result = op(self._data, other)
# divmod returns a tuple
if op_name == "divmod":
div, mod = result
return (
self._maybe_mask_result(div, mask, other, "floordiv"),
self._maybe_mask_result(mod, mask, other, "mod"),
)
return self._maybe_mask_result(result, mask, other, op_name)
name = f"__{op.__name__}__"
return set_function_name(integer_arithmetic_method, name, cls)
IntegerArray._add_arithmetic_ops()
IntegerArray._add_comparison_ops()
_dtype_docstring = """
An ExtensionDtype for {dtype} integer data.
.. versionchanged:: 1.0.0
Now uses :attr:`pandas.NA` as its missing value,
rather than :attr:`numpy.nan`.
Attributes
----------
None
Methods
-------
None
"""
# create the Dtype
@register_extension_dtype
class Int8Dtype(_IntegerDtype):
type = np.int8
name = "Int8"
__doc__ = _dtype_docstring.format(dtype="int8")
@register_extension_dtype
class Int16Dtype(_IntegerDtype):
type = np.int16
name = "Int16"
__doc__ = _dtype_docstring.format(dtype="int16")
@register_extension_dtype
class Int32Dtype(_IntegerDtype):
type = np.int32
name = "Int32"
__doc__ = _dtype_docstring.format(dtype="int32")
@register_extension_dtype
class Int64Dtype(_IntegerDtype):
type = np.int64
name = "Int64"
__doc__ = _dtype_docstring.format(dtype="int64")
@register_extension_dtype
class UInt8Dtype(_IntegerDtype):
type = np.uint8
name = "UInt8"
__doc__ = _dtype_docstring.format(dtype="uint8")
@register_extension_dtype
class UInt16Dtype(_IntegerDtype):
type = np.uint16
name = "UInt16"
__doc__ = _dtype_docstring.format(dtype="uint16")
@register_extension_dtype
class UInt32Dtype(_IntegerDtype):
type = np.uint32
name = "UInt32"
__doc__ = _dtype_docstring.format(dtype="uint32")
@register_extension_dtype
class UInt64Dtype(_IntegerDtype):
type = np.uint64
name = "UInt64"
__doc__ = _dtype_docstring.format(dtype="uint64")
_dtypes: Dict[str, _IntegerDtype] = {
"int8": Int8Dtype(),
"int16": Int16Dtype(),
"int32": Int32Dtype(),
"int64": Int64Dtype(),
"uint8": UInt8Dtype(),
"uint16": UInt16Dtype(),
"uint32": UInt32Dtype(),
"uint64": UInt64Dtype(),
}
| bsd-3-clause |
tentangdata/pinisi | scripts/functions.py | 1 | 8086 | import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
FIG_AREA = 48
GRID_AREA = 96
COLOR_GRID = 'lightgrey'
GRID_LW = 0.5
# Data processing functions
def filter_iqr(X, num_iqr):
median = X.median()
iqr = X.quantile(0.75) - X.quantile(0.25)
return X[(X >= median - iqr*num_iqr) & (X <= median + iqr*num_iqr)]
def dist(p_ref, points):
""" Compute distance between a reference point and a set of points.
p_ref : (float, float)
lat, lng coordinates of the reference point
points : pd.DataFrame with (lat, lng) columns
Points to be computed
Returns : pd.Series with same index as points
Distance between points and p_ref
"""
return ((points.lat - p_ref[0])**2 + (points.lng - p_ref[1])**2)**0.5
def get_dist_level(points_ref, points_expert, points, level_id):
p_ref = points_ref[points_ref.level == level_id]
p_ref = (p_ref.lat.iloc[0], p_ref.lng.iloc[0])
p_expert = points_expert[points_expert.level == level_id]
ps = points[points.level == level_id]
return dist(p_ref, ps), dist(p_ref, p_expert).iloc[0]
def calc_rect_area(rect):
return (rect[0][0] - rect[1][0]) * (rect[1][1] - rect[0][1])
# Plotting functions
def plot_scatter(points, rects, level_id, fig_area=FIG_AREA, grid_area=GRID_AREA, with_axis=False, with_img=True, img_alpha=1.0):
rect = rects[level_id]
top_lat, top_lng, bot_lat, bot_lng = get_rect_bounds(rect)
plevel = get_points_level(points, rects, level_id)
ax = plevel.plot('lng', 'lat', 'scatter')
plt.xlim(left=top_lng, right=bot_lng)
plt.ylim(top=top_lat, bottom=bot_lat)
if with_img:
img = plt.imread('/data/images/level%s.png' % level_id)
plt.imshow(img, zorder=0, alpha=img_alpha, extent=[top_lng, bot_lng, bot_lat, top_lat])
width, height = get_rect_width_height(rect)
fig_width, fig_height = get_fig_width_height(width, height, fig_area)
plt.gcf().set_size_inches(fig_width, fig_height)
if grid_area:
grid_horiz, grid_vertic = get_grids(rects, level_id, grid_area, fig_area)
for lat in grid_horiz:
plt.axhline(lat, color=COLOR_GRID, lw=GRID_LW)
for lng in grid_vertic:
plt.axvline(lng, color=COLOR_GRID, lw=GRID_LW)
if not with_axis:
ax.set_axis_off()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
return ax
def plot_heatmap(points, rects, level_id, grid_area=GRID_AREA, fig_area=FIG_AREA, cmap=None):
rect = rects[level_id]
top_lat, top_lng, bot_lat, bot_lng = get_rect_bounds(rect)
bins = get_bins(points, rects, level_id, grid_area, fig_area)
ax = sns.heatmap(bins, cbar=False, xticklabels=False, yticklabels=False, cmap=cmap)
width, height = get_rect_width_height(rect)
fig_width, fig_height = get_fig_width_height(width, height, fig_area)
plt.gcf().set_size_inches(fig_width, fig_height)
return ax
# Helper functions
def get_points_level(points, rects, level_id):
rect = rects[level_id]
top_lat, top_lng = rect[0][0], rect[0][1]
bot_lat, bot_lng = rect[1][0], rect[1][1]
plevel = points[points['level'] == level_id]
plevel = plevel[(plevel['lng'] >= top_lng) & (plevel['lng'] <= bot_lng) & (plevel['lat'] >= bot_lat) & (plevel['lat'] <= top_lat)]
return plevel
def get_fig_width_height(width, height, fig_area):
c_area = math.sqrt(fig_area / (width*height))
fig_width = width*c_area
fig_height = height*c_area
return fig_width, fig_height
def get_rect_width_height(rect):
top_lat, top_lng, bot_lat, bot_lng = get_rect_bounds(rect)
width = bot_lng - top_lng
height = top_lat - bot_lat
return width, height
def get_rect(polygons):
top_lat = max([coord[0] for coord in polygons])
bot_lat = min([coord[0] for coord in polygons])
top_lng = min([coord[1] for coord in polygons])
bot_lng = max([coord[1] for coord in polygons])
return (top_lat, top_lng), (bot_lat, bot_lng)
def get_rect_bounds(rect):
top_lat, top_lng = rect[0][0], rect[0][1]
bot_lat, bot_lng = rect[1][0], rect[1][1]
return top_lat, top_lng, bot_lat, bot_lng
def get_grids(rects, level_id, grid_area, fig_area):
rect = rects[level_id]
top_lat, top_lng, bot_lat, bot_lng = get_rect_bounds(rect)
width, height = get_rect_width_height(rect)
fig_width, fig_height = get_fig_width_height(width, height, fig_area)
factor = math.sqrt(grid_area/fig_area)
n_grid_horiz, n_grid_vertic = round(grid_area/fig_width/factor), round(grid_area/fig_height/factor)
grid_horiz = [top_lat - i*height/n_grid_horiz for i in range(n_grid_horiz + 1)]
grid_vertic = [bot_lng - i*width/n_grid_vertic for i in range(n_grid_vertic + 1)]
return grid_horiz, grid_vertic
def get_bins(points, rects, level_id, grid_area=GRID_AREA, fig_area=FIG_AREA):
plevel = get_points_level(points, rects, level_id)
bins = get_grids(rects, level_id, grid_area, fig_area)
bins_lat = pd.Series(pd.Categorical(pd.cut(plevel.lat, sorted(bins[0]), include_lowest=True), ordered=True))
bins_lng = pd.Series(pd.Categorical(pd.cut(plevel.lng, sorted(bins[1]), include_lowest=True), ordered=True))
coord_bins = pd.DataFrame(0, index=bins_lat.values.categories, columns=bins_lng.values.categories)
coord_counts = (bins_lat.astype(str) + '|' + bins_lng.astype(str)).value_counts()
for coord in coord_counts.index:
lat, lng = coord.split('|')
coord_bins.loc[lat, lng] += coord_counts[coord]
assert coord_bins.sum().sum() == len(plevel)
coord_bins.index = [float(i[1:i.index(',')]) for i in coord_bins.index]
coord_bins.columns = [float(i[1:i.index(',')]) for i in coord_bins.columns]
coord_bins = coord_bins.loc[coord_bins.index[::-1]] # reverse latitude (positive should be upper)
return coord_bins
# Dataset-specific functions
def browser_to_os(browser):
browser = browser.lower()
# Ubuntu
if "ubuntu" in browser:
return "Ubuntu"
# Windows Phone
elif "windows phone 8" in browser:
return "Windows Phone 8"
# Windows
elif ("windows nt 10" in browser):
return "Windows 10"
elif ("windows nt 6.2" in browser) or ("windows nt 6.3" in browser):
return "Windows 8"
elif ("windows nt 6.1" in browser):
return "Windows 7"
elif ("windows nt 6.0" in browser):
return "Windows Vista"
elif ("windows nt 5" in browser):
return "Windows XP"
# OS X
elif ("intel mac os x" in browser):
return "OS X"
# iPhone
elif ("cpu iphone" in browser) or ("wp-iphone" in browser):
if ("os 9" in browser):
return "iPhone iOS 9"
elif ("os 8" in browser):
return "iPhone iOS 8"
elif ("os 7" in browser):
return "iPhone iOS 7"
# Android
elif "android 5" in browser:
return "Android 5"
elif "android 4" in browser:
return "Android 4"
elif "android 3" in browser:
return "Android 3"
elif "android 2" in browser:
return "Android 2"
elif ("android" in browser) and ("tablet" in browser):
return "Android tablet"
elif "android" in browser:
return "Android"
# Other Linuxes
elif "linux" in browser:
return "Other Linux"
else:
raise ValueError("Unknown agent string: %s" % browser)
def os_to_generic(os_name):
os_name = os_name.lower()
if "ubuntu" in os_name:
return "Ubuntu"
elif "linux" in os_name:
return "Linux"
elif "android" in os_name:
return "Android"
elif "os x" in os_name:
return "OS X"
elif "ios" in os_name:
return "iOS"
elif "windows phone" in os_name:
return "Windows Phone"
elif "windows" in os_name:
return "Windows"
else:
raise ValueError("Unknown os: %s" % os_name)
def is_mobile(os_generic):
return os_generic.lower() in ["android", "ios", "windows phone"]
| gpl-2.0 |
wooga/airflow | airflow/hooks/dbapi_hook.py | 1 | 12455 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from contextlib import closing
from datetime import datetime
from typing import Any, Optional
from sqlalchemy import create_engine
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.typing_compat import Protocol
class ConnectorProtocol(Protocol):
"""
A protocol where you can connect to a database.
"""
def connect(self, host: str, port: int, username: str, schema: str) -> Any:
"""
Connect to a database.
:param host: The database host to connect to.
:param port: The database port to connect to.
:param username: The database username used for the authentication.
:param schema: The database schema to connect to.
:return: the authorized connection object.
"""
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None # type: str
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None # type: Optional[ConnectorProtocol]
def __init__(self, *args, **kwargs):
super().__init__()
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_uri(self) -> str:
"""
Extract the URI from the connection.
:return: the extracted uri.
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = '{conn.login}:{conn.password}@'.format(conn=conn)
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
uri = '{conn.conn_type}://{login}{host}/'.format(
conn=conn, login=login, host=host)
if conn.schema:
uri += conn.schema
return uri
def get_sqlalchemy_engine(self, engine_kwargs=None):
"""
Get an sqlalchemy_engine object.
:param engine_kwargs: Kwargs used in :func:`~sqlalchemy.create_engine`.
:return: the created engine.
"""
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
"""
import pandas.io.sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
"""
if isinstance(sql, str):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for sql_statement in sql:
if parameters is not None:
self.log.info("%s with parameters %s", sql_statement, parameters)
cur.execute(sql_statement, parameters)
else:
self.log.info(sql_statement)
cur.execute(sql_statement)
# If autocommit was set to False for db that supports autocommit,
# or if db does not supports autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
def set_autocommit(self, conn, autocommit):
"""
Sets the autocommit flag on the connection
"""
if not self.supports_autocommit and autocommit:
self.log.warning(
"%s connection doesn't support autocommit but autocommit activated.",
getattr(self, self.conn_name_attr)
)
conn.autocommit = autocommit
def get_autocommit(self, conn):
"""
Get autocommit setting for the provided connection.
Return True if conn.autocommit is set to True.
Return False if conn.autocommit is not set or set to False or conn
does not support autocommit.
:param conn: Connection to get autocommit setting from.
:type conn: connection object.
:return: connection autocommit setting.
:rtype: bool
"""
return getattr(conn, 'autocommit', False) and self.supports_autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
@staticmethod
def _generate_insert_sql(table, values, target_fields, replace, **kwargs):
"""
Static helper method that generate the INSERT SQL statement.
The REPLACE variant is specific to MySQL syntax.
:param table: Name of the target table
:type table: str
:param values: The row to insert into the table
:type values: tuple of cell values
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param replace: Whether to replace instead of insert
:type replace: bool
:return: The generated INSERT or REPLACE SQL statement
:rtype: str
"""
placeholders = ["%s", ] * len(values)
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += "{0} {1} VALUES ({2})".format(
table,
target_fields,
",".join(placeholders))
return sql
def insert_rows(self, table, rows, target_fields=None, commit_every=1000,
replace=False, **kwargs):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
sql = self._generate_insert_sql(
table, values, target_fields, replace, **kwargs
)
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info(
"Loaded %s rows into %s so far", i, table
)
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows", i)
@staticmethod
def _serialize_cell(cell, conn=None): # pylint: disable=unused-argument
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
| apache-2.0 |
Barmaley-exe/scikit-learn | sklearn/preprocessing/label.py | 3 | 28567 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import deprecated, column_or_1d
from ..utils.validation import check_array
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelEncoder was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-sequences',
'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
@property
@deprecated("Attribute indicator_matrix_ is deprecated and will be "
"removed in 0.17. Use 'y_type_ == 'multilabel-indicator'' "
"instead")
def indicator_matrix_(self):
return self.y_type_ == 'multilabel-indicator'
@property
@deprecated("Attribute multilabel_ is deprecated and will be removed "
"in 0.17. Use 'y_type_.startswith('multilabel')' "
"instead")
def multilabel_(self):
return self.y_type_.startswith('multilabel')
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
self._check_fitted()
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1,
sparse_output=False, multilabel=None):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
if multilabel is not None:
warnings.warn("The multilabel parameter is deprecated as of version "
"0.15 and will be removed in 0.17. The parameter is no "
"longer necessary because the value is automatically "
"inferred.", DeprecationWarning)
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
elif y_type == "multilabel-sequences":
Y = MultiLabelBinarizer(classes=classes,
sparse_output=sparse_output).fit_transform(y)
if sp.issparse(Y):
Y.data[:] = pos_label
else:
Y[Y == 1] = pos_label
return Y
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.argsort(classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
elif output_type == "multilabel-sequences":
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
mlb = MultiLabelBinarizer(classes=classes).fit([])
return mlb.inverse_transform(y)
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
ywcui1990/htmresearch | projects/union_pooling/experiments/tp_learning/tp_trained_tm_backwardLearning.py | 8 | 23013 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import sys
import time
import os
import yaml
from optparse import OptionParser
import numpy
from pylab import rcParams
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
plt.ion()
from nupic.data.generators.pattern_machine import PatternMachine
from nupic.data.generators.sequence_machine import SequenceMachine
from nupic.algorithms.monitor_mixin.monitor_mixin_base import MonitorMixinBase
from htmresearch.frameworks.union_temporal_pooling.union_temporal_pooler_experiment import (
UnionTemporalPoolerExperiment)
_SHOW_PROGRESS_INTERVAL = 200
"""
Experiment 1
Runs UnionTemporalPooler on input from a Temporal Memory after training
on a long sequence
Enables learning in UnionTemporalPooler
Tests different learning rules
- Forward learning is Hebbian learning on union pooled cells
- Backward learning is Reinforcement-like learning that allows cells to
connect to inputs from the previous few time steps
- Several metrics are measured before and after learning, including
average response latency, total size of union, overlap between learned &
naive representations
"""
ncol = 1024
learnType = 'ForwardBackward'
learningPasses = 100
paramDir = 'params/5_trainingPasses_'+str(ncol)+'_columns_'+learnType+'.yaml'
outputDir = 'results/'+str(ncol)+learnType+'/'
if not os.path.exists(outputDir):
os.makedirs(outputDir)
params = yaml.safe_load(open(paramDir, 'r'))
options = {'plotVerbosity': 2, 'consoleVerbosity': 2}
plotVerbosity = 2
consoleVerbosity = 1
print "Running SDR overlap experiment...\n"
print "Params dir: {0}".format(paramDir)
print "Output dir: {0}\n".format(outputDir)
# Dimensionality of sequence patterns
patternDimensionality = params["patternDimensionality"]
# Cardinality (ON / true bits) of sequence patterns
patternCardinality = params["patternCardinality"]
# Length of sequences shown to network
sequenceLength = params["sequenceLength"]
# Number of sequences used. Sequences may share common elements.
numberOfSequences = params["numberOfSequences"]
# Number of sequence passes for training the TM. Zero => no training.
trainingPasses = params["trainingPasses"]
# Generate a sequence list and an associated labeled list (both containing a
# set of sequences separated by None)
print "\nGenerating sequences..."
patternAlphabetSize = sequenceLength * numberOfSequences
patternMachine = PatternMachine(patternDimensionality, patternCardinality,
patternAlphabetSize)
sequenceMachine = SequenceMachine(patternMachine)
numbers = sequenceMachine.generateNumbers(numberOfSequences, sequenceLength)
generatedSequences = sequenceMachine.generateFromNumbers(numbers)
sequenceLabels = [str(numbers[i + i*sequenceLength: i + (i+1)*sequenceLength])
for i in xrange(numberOfSequences)]
labeledSequences = []
for label in sequenceLabels:
for _ in xrange(sequenceLength):
labeledSequences.append(label)
labeledSequences.append(None)
def initializeNetwork():
tmParamOverrides = params["temporalMemoryParams"]
upParamOverrides = params["unionPoolerParams"]
# Set up the Temporal Memory and Union Pooler network
print "\nCreating network..."
experiment = UnionTemporalPoolerExperiment(tmParamOverrides, upParamOverrides)
return experiment
def runTMtrainingPhase(experiment):
# Train only the Temporal Memory on the generated sequences
if trainingPasses > 0:
print "\nTraining Temporal Memory..."
if consoleVerbosity > 0:
print "\nPass\tBursting Columns Mean\tStdDev\tMax"
for i in xrange(trainingPasses):
experiment.runNetworkOnSequences(generatedSequences,
labeledSequences,
tmLearn=True,
upLearn=None,
verbosity=consoleVerbosity,
progressInterval=_SHOW_PROGRESS_INTERVAL)
if consoleVerbosity > 0:
stats = experiment.getBurstingColumnsStats()
print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2])
# Reset the TM monitor mixin's records accrued during this training pass
# experiment.tm.mmClearHistory()
print
print MonitorMixinBase.mmPrettyPrintMetrics(
experiment.tm.mmGetDefaultMetrics())
print
def SDRsimilarity(SDR1, SDR2):
return float(len(SDR1 & SDR2)) / float(max(len(SDR1), len(SDR2) ))
def getUnionSDRSimilarityCurve(activeCellsTrace, trainingPasses, sequenceLength, maxSeparation, skipBeginningElements=0):
similarityVsSeparation = numpy.zeros((trainingPasses, maxSeparation))
for rpts in xrange(trainingPasses):
for sep in xrange(maxSeparation):
similarity = []
for i in xrange(rpts*sequenceLength+skipBeginningElements, rpts*sequenceLength+sequenceLength-sep):
similarity.append(SDRsimilarity(activeCellsTrace[i], activeCellsTrace[i+sep]))
similarityVsSeparation[rpts, sep] = numpy.mean(similarity)
return similarityVsSeparation
def plotSDRsimilarityVsTemporalSeparation(similarityVsSeparationBefore, similarityVsSeparationAfter):
# plot SDR similarity as a function of temporal separation
f, (axs) = plt.subplots(nrows=2,ncols=2)
rpt = 0
ax1 = axs[0,0]
ax1.plot(similarityVsSeparationBefore[rpt,:],label='Before')
ax1.plot(similarityVsSeparationAfter[rpt,:],label='After')
ax1.set_xlabel('Separation in time between SDRs')
ax1.set_ylabel('SDRs overlap')
# ax1.set_title('Initial Cycle')
ax1.set_ylim([0,1])
ax1.legend(loc='upper right')
# rpt=4
# ax2.plot(similarityVsSeparationBefore[rpt,:],label='Before')
# ax2.plot(similarityVsSeparationAfter[rpt,:],label='After')
# ax2.set_xlabel('Separation in time between SDRs')
# ax2.set_ylabel('SDRs overlap')
# ax2.set_title('Last Cycle')
# ax2.set_ylim([0,1])
# ax2.legend(loc='upper right')
f.savefig(outputDir+'UnionSDRoverlapVsTemporalSeparation.eps',format='eps')
def plotSimilarityMatrix(similarityMatrixBefore, similarityMatrixAfter):
f, (ax1, ax2) = plt.subplots(nrows=1,ncols=2)
im = ax1.imshow(similarityMatrixBefore[0:sequenceLength, 0:sequenceLength],interpolation="nearest")
ax1.set_xlabel('Time (steps)')
ax1.set_ylabel('Time (steps)')
ax1.set_title('Overlap - Before Learning')
im = ax2.imshow(similarityMatrixAfter[0:sequenceLength, 0:sequenceLength],interpolation="nearest")
ax2.set_xlabel('Time (steps)')
ax2.set_ylabel('Time (steps)')
ax2.set_title('Overlap - After Learning')
# cax,kw = mpl.colorbar.make_axes([ax1, ax2])
# plt.colorbar(im, cax=cax, **kw)
# plt.tight_layout()
f.savefig(outputDir+'/UnionSDRoverlapBeforeVsAfterLearning.eps',format='eps')
def calculateSimilarityMatrix(activeCellsTraceBefore, activeCellsTraceAfter):
nSteps = sequenceLength # len(activeCellsTraceBefore)
similarityMatrixBeforeAfter = numpy.zeros((nSteps, nSteps))
similarityMatrixBefore = numpy.zeros((nSteps, nSteps))
similarityMatrixAfter = numpy.zeros((nSteps, nSteps))
for i in xrange(nSteps):
for j in xrange(nSteps):
similarityMatrixBefore[i,j] = SDRsimilarity(activeCellsTraceBefore[i], activeCellsTraceBefore[j])
similarityMatrixAfter[i,j] = SDRsimilarity(activeCellsTraceAfter[i], activeCellsTraceAfter[j])
similarityMatrixBeforeAfter[i,j] = SDRsimilarity(activeCellsTraceBefore[i], activeCellsTraceAfter[j])
return (similarityMatrixBefore, similarityMatrixAfter, similarityMatrixBeforeAfter)
def plotTPRvsUPROverlap(similarityMatrix):
f = plt.figure()
plt.subplot(2,2,1)
im = plt.imshow(similarityMatrix[0:sequenceLength, 0:sequenceLength],
interpolation="nearest",aspect='auto', vmin=0, vmax=0.3)
plt.colorbar(im)
plt.xlabel('UPR over time')
plt.ylabel('TPR over time')
plt.title(' Overlap between UPR & TPR')
f.savefig(outputDir+'OverlapTPRvsUPR.eps',format='eps')
def bitLifeVsLearningCycles(activeCellsTrace, numColumns,learningPasses, sequenceLength):
bitLifeVsLearning = numpy.zeros(learningPasses)
for i in xrange(learningPasses):
bitLifeList = []
bitLifeCounter = numpy.zeros(numColumns)
for t in xrange(sequenceLength):
preActiveCells = set(numpy.where(bitLifeCounter>0)[0])
currentActiveCells = activeCellsTrace[i*sequenceLength+t]
newActiveCells = list(currentActiveCells - preActiveCells)
stopActiveCells = list(preActiveCells - currentActiveCells)
if t == sequenceLength-1:
stopActiveCells = list(currentActiveCells)
continuousActiveCells = list(preActiveCells & currentActiveCells)
bitLifeList += list(bitLifeCounter[stopActiveCells])
bitLifeCounter[stopActiveCells] = 0
bitLifeCounter[newActiveCells] = 1
bitLifeCounter[continuousActiveCells] += 1
bitLifeVsLearning[i] = numpy.mean(bitLifeList)
return bitLifeVsLearning
def showSequenceStartLine(ax, trainingPasses, sequenceLength):
for i in xrange(trainingPasses):
ax.vlines(i*sequenceLength, 0, ax.get_ylim()[0], linestyles='--')
def runTestPhase(experiment, tmLearn=False, upLearn=True, outputfileName='results/TemporalPoolingOutputs.pdf'):
print "\nRunning test phase..."
print "tmLearn: ", tmLearn
print "upLearn: ", upLearn
inputSequences = generatedSequences
inputCategories = labeledSequences
experiment.tm.mmClearHistory()
experiment.up.mmClearHistory()
experiment.tm.reset()
experiment.up.reset()
# Persistence levels across time
poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 0))
# union SDR across time
activeCellsTrace = numpy.zeros((experiment.up._numColumns, 0))
# active cells in SP across time
activeSPTrace = numpy.zeros((experiment.up._numColumns, 0))
# number of connections for SP cells
connectionCountTrace = numpy.zeros((experiment.up._numColumns, 0))
# number of active inputs per SP cells
activeOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0))
# number of predicted active inputs per SP cells
predictedActiveOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0))
for _ in xrange(trainingPasses):
experiment.tm.reset()
experiment.up.reset()
for i in xrange(len(inputSequences)):
sensorPattern = inputSequences[i]
inputCategory = inputCategories[i]
if sensorPattern is None:
pass
else:
experiment.tm.compute(sensorPattern,
learn=tmLearn,
sequenceLabel=inputCategory)
activeCells, predActiveCells, burstingCols, = experiment.getUnionTemporalPoolerInput()
overlapsActive = experiment.up._calculateOverlap(activeCells)
overlapsPredictedActive = experiment.up._calculateOverlap(predActiveCells)
activeOverlapsTrace = numpy.concatenate((activeOverlapsTrace, overlapsActive.reshape((experiment.up._numColumns,1))), 1)
predictedActiveOverlapsTrace = numpy.concatenate((predictedActiveOverlapsTrace, overlapsPredictedActive.reshape((experiment.up._numColumns,1))), 1)
experiment.up.compute(activeCells,
predActiveCells,
learn=upLearn,
sequenceLabel=inputCategory)
currentPoolingActivation = experiment.up._poolingActivation.reshape((experiment.up._numColumns, 1))
poolingActivationTrace = numpy.concatenate((poolingActivationTrace, currentPoolingActivation), 1)
currentUnionSDR = numpy.zeros((experiment.up._numColumns, 1))
currentUnionSDR[experiment.up._unionSDR] = 1
activeCellsTrace = numpy.concatenate((activeCellsTrace, currentUnionSDR), 1)
currentSPSDR = numpy.zeros((experiment.up._numColumns, 1))
currentSPSDR[experiment.up._activeCells] = 1
activeSPTrace = numpy.concatenate((activeSPTrace, currentSPSDR), 1)
connectionCountTrace = numpy.concatenate((connectionCountTrace,
experiment.up._connectedCounts.reshape((experiment.up._numColumns, 1))), 1)
print "\nPass\tBursting Columns Mean\tStdDev\tMax"
stats = experiment.getBurstingColumnsStats()
print "{0}\t{1}\t{2}\t{3}".format(_, stats[0], stats[1], stats[2])
# print
# print MonitorMixinBase.mmPrettyPrintMetrics(\
# experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics())
# print
experiment.tm.mmClearHistory()
newConnectionCountTrace = numpy.zeros(connectionCountTrace.shape)
n = newConnectionCountTrace.shape[1]
newConnectionCountTrace[:,0:n-2] = connectionCountTrace[:,1:n-1] - connectionCountTrace[:,0:n-2]
# estimate fraction of shared bits across adjacent time point
unionSDRshared = experiment.up._mmComputeUnionSDRdiff()
bitLifeList = experiment.up._mmComputeBitLifeStats()
bitLife = numpy.array(bitLifeList)
# Plot SP outputs, UP persistence and UP outputs in testing phase
ncolShow = 100
f, (ax1, ax2, ax3) = plt.subplots(nrows=1,ncols=3)
ax1.imshow(activeSPTrace[1:ncolShow,:], cmap=cm.Greys,interpolation="nearest",aspect='auto')
showSequenceStartLine(ax1, trainingPasses, sequenceLength)
ax1.set_title('SP SDR')
ax1.set_ylabel('Columns')
ax2.imshow(poolingActivationTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
showSequenceStartLine(ax2, trainingPasses, sequenceLength)
ax2.set_title('Persistence')
ax3.imshow(activeCellsTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
showSequenceStartLine(ax3, trainingPasses, sequenceLength)
ax3.set_title('Union SDR')
# ax4.imshow(newConnectionCountTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
# showSequenceStartLine(ax4, trainingPasses, sequenceLength)
# ax4.set_title('New Connection #')
# ax2.set_xlabel('Time (steps)')
pp = PdfPages(outputfileName)
pp.savefig()
pp.close()
def runTPLearnPhase(experiment, learningPasses):
tmLearn = False
upLearn = True
inputSequences = generatedSequences
inputCategories = labeledSequences
experiment.tm.mmClearHistory()
experiment.up.mmClearHistory()
experiment.tm.reset()
experiment.up.reset()
# Persistence levels across time
poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 0))
# union SDR across time
activeCellsTrace = numpy.zeros((experiment.up._numColumns, 0))
# active cells in SP across time
activeSPTrace = numpy.zeros((experiment.up._numColumns, 0))
# number of connections for SP cells
connectionCountTrace = numpy.zeros((experiment.up._numColumns, 0))
# number of active inputs per SP cells
activeOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0))
# number of predicted active inputs per SP cells
predictedActiveOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0))
permamenceTrace = numpy.zeros((experiment.tm.numberOfCells(), 0))
for _ in xrange(learningPasses):
experiment.tm.reset()
experiment.up.reset()
for i in xrange(len(inputSequences)):
sensorPattern = inputSequences[i]
inputCategory = inputCategories[i]
if sensorPattern is None:
pass
else:
experiment.tm.compute(sensorPattern,
learn=tmLearn,
sequenceLabel=inputCategory)
activeCells, predActiveCells, burstingCols, = experiment.getUnionTemporalPoolerInput()
overlapsActive = experiment.up._calculateOverlap(activeCells)
overlapsPredictedActive = experiment.up._calculateOverlap(predActiveCells)
activeOverlapsTrace = numpy.concatenate((activeOverlapsTrace, overlapsActive.reshape((experiment.up._numColumns,1))), 1)
predictedActiveOverlapsTrace = numpy.concatenate((predictedActiveOverlapsTrace, overlapsPredictedActive.reshape((experiment.up._numColumns,1))), 1)
# print ' step: ', i
# print 'current predicted input: ',numpy.where(predActiveCells)[0]
# print 'previous predicted input: ',numpy.where(experiment.up._prePredictedActiveInput)[0]
# print 'overlapsPredictedActive: ', overlapsPredictedActive[31]
experiment.up.compute(activeCells,
predActiveCells,
learn=upLearn,
sequenceLabel=inputCategory)
# print 'active UP cells: ', experiment.up._activeCells
# permamenceTrace = numpy.concatenate((permamenceTrace,
# experiment.up._permanences.getRow(31).reshape((experiment.tm.numberOfCells(),1))),1)
#
# currentPoolingActivation = experiment.up._poolingActivation.reshape((experiment.up._numColumns, 1))
# poolingActivationTrace = numpy.concatenate((poolingActivationTrace, currentPoolingActivation), 1)
#
# currentUnionSDR = numpy.zeros((experiment.up._numColumns, 1))
# currentUnionSDR[experiment.up._unionSDR] = 1
# activeCellsTrace = numpy.concatenate((activeCellsTrace, currentUnionSDR), 1)
#
# currentSPSDR = numpy.zeros((experiment.up._numColumns, 1))
# currentSPSDR[experiment.up._activeCells] = 1
# activeSPTrace = numpy.concatenate((activeSPTrace, currentSPSDR), 1)
#
# connectionCountTrace = numpy.concatenate((connectionCountTrace,
# experiment.up._connectedCounts.reshape((experiment.up._numColumns, 1))), 1)
print "\nPass\tBursting Columns Mean\tStdDev\tMax"
stats = experiment.getBurstingColumnsStats()
print "{0}\t{1}\t{2}\t{3}".format(_, stats[0], stats[1], stats[2])
# print
# print MonitorMixinBase.mmPrettyPrintMetrics(\
# experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics())
# print
experiment.tm.mmClearHistory()
# Plot SP outputs, UP persistence and UP outputs in testing phase
ncolShow = 50
f, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1,ncols=4)
ax1.imshow(activeSPTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest", aspect='auto')
showSequenceStartLine(ax1, trainingPasses, sequenceLength)
ax1.set_title('SP SDR')
ax1.set_ylabel('Columns')
ax2.imshow(poolingActivationTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest", aspect='auto')
showSequenceStartLine(ax2, trainingPasses, sequenceLength)
ax2.set_title('Persistence')
ax3.imshow(activeCellsTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest", aspect='auto')
showSequenceStartLine(ax3, trainingPasses, sequenceLength)
ax3.set_title('Union SDR')
ax4.imshow(predictedActiveOverlapsTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
showSequenceStartLine(ax4, trainingPasses, sequenceLength)
ax4.set_title('New Connection #')
ax2.set_xlabel('Time (steps)')
def plotSummaryResults(upBeforeLearning, upDuringLearning, upAfterLearning, learningPasses):
maxSeparation = 30
skipBeginningElements = 10
activeCellsTraceBefore = upBeforeLearning._mmTraces['activeCells'].data
similarityVsSeparationBefore = getUnionSDRSimilarityCurve(activeCellsTraceBefore, trainingPasses, sequenceLength,
maxSeparation, skipBeginningElements)
activeCellsTraceAfter = upAfterLearning._mmTraces['activeCells'].data
similarityVsSeparationAfter = getUnionSDRSimilarityCurve(activeCellsTraceAfter, trainingPasses, sequenceLength,
maxSeparation, skipBeginningElements)
plotSDRsimilarityVsTemporalSeparation(similarityVsSeparationBefore, similarityVsSeparationAfter)
(similarityMatrixBefore, similarityMatrixAfter, similarityMatrixBeforeAfter) = \
calculateSimilarityMatrix(activeCellsTraceBefore, activeCellsTraceAfter)
plotTPRvsUPROverlap(similarityMatrixBeforeAfter)
plotSimilarityMatrix(similarityMatrixBefore, similarityMatrixAfter)
activeCellsTrace = upDuringLearning._mmTraces["activeCells"].data
meanBitLifeVsLearning = bitLifeVsLearningCycles(activeCellsTrace, experiment.up._numColumns, learningPasses, sequenceLength)
numBitsUsed = []
avgBitLatency = []
for rpt in xrange(learningPasses):
allActiveBits = set()
for i in xrange(sequenceLength):
allActiveBits |= (set(activeCellsTrace[rpt*sequenceLength+i]))
bitActiveTime = numpy.ones(experiment.up._numColumns) * -1
for i in xrange(sequenceLength):
curActiveCells = list(activeCellsTrace[rpt*sequenceLength+i])
for j in xrange(len(curActiveCells)):
if bitActiveTime[curActiveCells[j]] < 0:
bitActiveTime[curActiveCells[j]] = i
bitActiveTimeSummary = bitActiveTime[bitActiveTime>0]
print 'pass ', rpt, ' num bits: ', len(allActiveBits), ' latency : ',numpy.mean(bitActiveTimeSummary)
numBitsUsed.append(len(allActiveBits))
avgBitLatency.append(numpy.mean(bitActiveTimeSummary))
f = plt.figure()
plt.subplot(2,2,1)
plt.plot(numBitsUsed)
plt.xlabel(' learning pass #')
plt.ylabel(' number of cells in UPR')
plt.ylim([100,600])
plt.subplot(2,2,2)
plt.plot(avgBitLatency)
plt.xlabel(' learning pass #')
plt.ylabel(' average latency ')
plt.ylim([11,19])
plt.subplot(2,2,3)
plt.plot(meanBitLifeVsLearning)
plt.xlabel(' learning pass #')
plt.ylabel(' average lifespan ')
plt.ylim([10,30])
f.savefig(outputDir+'SDRpropertyOverLearning.eps',format='eps')
if __name__ == "__main__":
experiment = initializeNetwork()
runTMtrainingPhase(experiment)
runTestPhase(experiment, tmLearn=False, upLearn=False, outputfileName=outputDir+'TemporalPoolingBeforeLearning.pdf')
upBeforeLearning = copy.deepcopy(experiment.up)
runTPLearnPhase(experiment, learningPasses)
upDuringLearning = copy.deepcopy(experiment.up)
runTestPhase(experiment, tmLearn=False, upLearn=False, outputfileName=outputDir+'TemporalPoolingAfterLearning.pdf')
upAfterLearning = copy.deepcopy(experiment.up)
plotSummaryResults(upBeforeLearning, upDuringLearning, upAfterLearning, learningPasses)
| agpl-3.0 |
HeraclesHX/scikit-learn | sklearn/neighbors/nearest_centroid.py | 199 | 7249 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
vighneshbirodkar/scikit-image | skimage/viewer/qt.py | 48 | 1281 | _qt_version = None
has_qt = True
try:
from matplotlib.backends.qt_compat import QtGui, QtCore, QtWidgets, QT_RC_MAJOR_VERSION as _qt_version
except ImportError:
try:
from matplotlib.backends.qt4_compat import QtGui, QtCore
QtWidgets = QtGui
_qt_version = 4
except ImportError:
# Mock objects
class QtGui_cls(object):
QMainWindow = object
QDialog = object
QWidget = object
class QtCore_cls(object):
class Qt(object):
TopDockWidgetArea = None
BottomDockWidgetArea = None
LeftDockWidgetArea = None
RightDockWidgetArea = None
def Signal(self, *args, **kwargs):
pass
QtGui = QtWidgets = QtGui_cls()
QtCore = QtCore_cls()
has_qt = False
if _qt_version == 5:
from matplotlib.backends.backend_qt5 import FigureManagerQT
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
elif _qt_version == 4:
from matplotlib.backends.backend_qt4 import FigureManagerQT
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg
else:
FigureManagerQT = object
FigureCanvasQTAgg = object
Qt = QtCore.Qt
Signal = QtCore.Signal
| bsd-3-clause |
ShawnMurd/MetPy | conftest.py | 2 | 1246 | # Copyright (c) 2016,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Configure pytest for metpy."""
import os
import matplotlib
import matplotlib.pyplot
import numpy
import pandas
import pytest
import scipy
import xarray
import metpy.calc
# Need to disable fallback before importing pint
os.environ['PINT_ARRAY_PROTOCOL_FALLBACK'] = '0'
import pint # noqa: I100, E402
def pytest_report_header(config, startdir):
"""Add dependency information to pytest output."""
return ('Dependencies: Matplotlib ({}), NumPy ({}), Pandas ({}), '
'Pint ({}), SciPy ({}), Xarray ({})'.format(matplotlib.__version__,
numpy.__version__, pandas.__version__,
pint.__version__, scipy.__version__,
xarray.__version__))
@pytest.fixture(autouse=True)
def doctest_available_modules(doctest_namespace):
"""Make modules available automatically to doctests."""
doctest_namespace['metpy'] = metpy
doctest_namespace['metpy.calc'] = metpy.calc
doctest_namespace['plt'] = matplotlib.pyplot
| bsd-3-clause |
othercriteria/StochasticBlockmodel | minitest_gibbs.py | 1 | 4257 | #!/usr/bin/env python
# Check SEM's ability to stay in the neighborhood of the (label) truth
# when initialized at the (label) truth.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mlab import PCA
from Network import Network
from Models import StationaryLogistic, NonstationaryLogistic, Blockmodel
from Models import alpha_zero, alpha_norm
from Experiment import minimum_disagreement
# Parameters
N = 20
theta = 3.0
alpha_sd = 2.0
from_truth = True
steps = 100
# Set random seed for reproducible outputs
np.random.seed(137)
net = Network(N)
net.new_node_covariate('value').from_pairs(net.names, [0]*(N/2) + [1]*(N/2))
for v_1, v_2, name in [(0, 0, 'll'),
(1, 1, 'rr'),
(0, 1, 'lr')]:
def f_x(i_1, i_2):
return ((net.node_covariates['value'][i_1] == v_1) and
(net.node_covariates['value'][i_2] == v_2))
net.new_edge_covariate(name).from_binary_function_ind(f_x)
def f_x(i_1, i_2):
return np.random.uniform(-np.sqrt(3), np.sqrt(3))
net.new_edge_covariate('x').from_binary_function_ind(f_x)
data_model = NonstationaryLogistic()
data_model.beta['x'] = theta
for name, block_theta in [('ll', 4.0),
('rr', 3.0),
('lr', -2.0)]:
data_model.beta[name] = block_theta
alpha_norm(net, alpha_sd)
data_model.match_kappa(net, ('row_sum', 2))
net.generate(data_model)
net.show_heatmap()
net.offset_extremes()
fit_base_model = NonstationaryLogistic()
fit_base_model.beta['x'] = None
fit_model = Blockmodel(fit_base_model, 2)
#fit_model.base_model.fit = fit_model.base_model.fit_conditional
# Initialize block assignments
net.new_node_covariate_int('z')
if from_truth:
net.node_covariates['z'][:] = net.node_covariates['value'][:]
else:
net.node_covariates['z'][:] = np.random.random(N) < 0.5
# Calculate NLL at initialized block assignments
fit_model.fit_sem(net, cycles = 1, sweeps = 0,
use_best = False, store_all = True)
baseline_nll = fit_model.sem_trace[0][0]
nll_trace = []
z_trace = np.empty((steps,N))
disagreement_trace = []
theta_trace = []
for step in range(steps):
print step
fit_model.fit_sem(net, 1, 2, store_all = True)
#fit_model.fit_kl(net, 1)
nll_trace.append(fit_model.nll(net))
z_trace[step,:] = net.node_covariates['z'][:]
disagreement = minimum_disagreement(net.node_covariates['value'][:],
net.node_covariates['z'][:])
disagreement_trace.append(disagreement)
theta_trace.append(fit_model.base_model.beta['x'])
# Eliminate symmetry of 'z'
for step in range(steps):
if np.mean(z_trace[step,:]) < 0.5:
z_trace[step,:] = 1 - z_trace[step,:]
z_trace += np.random.normal(0, 0.01, (steps, N))
nll_trace = np.array(nll_trace)
nll_trace -= baseline_nll
disagreement_trace = np.array(disagreement_trace)
plt.figure()
plt.plot(np.arange(steps), theta_trace)
plt.xlabel('step')
plt.ylabel('theta')
plt.figure()
plt.plot(np.arange(steps), nll_trace)
plt.xlabel('step')
plt.ylabel('NLL')
plt.figure()
plt.plot(np.arange(steps), disagreement_trace)
plt.xlabel('step')
plt.ylabel('normalized disagreement')
plt.figure()
nll_trimmed = nll_trace[nll_trace <= np.percentile(nll_trace, 90)]
plt.hist(nll_trimmed, bins = 50)
plt.xlabel('NLL')
plt.title('Trimmed histogram of NLL')
try:
pca = PCA(z_trace)
plt.figure()
plt.plot(np.arange(steps), pca.Y[:,0])
plt.xlabel('step')
plt.ylabel('z (PC1)')
plt.figure()
plt.subplot(211)
plt.plot(pca.Y[:,0], nll_trace, '.')
plt.xlabel('z (PC1)')
plt.ylabel('NLL')
plt.subplot(212)
plt.plot(pca.Y[:,1], nll_trace, '.')
plt.xlabel('z (PC2)')
plt.ylabel('NLL')
plt.figure()
plt.subplot(211)
plt.plot(pca.Y[:,0], disagreement_trace, '.')
plt.xlabel('z (PC1)')
plt.ylabel('normalized disagreement')
plt.subplot(212)
plt.plot(pca.Y[:,1], disagreement_trace, '.')
plt.xlabel('z (PC2)')
plt.ylabel('normalized_disagreement')
plt.figure()
plt.plot(pca.Y[:,0], pca.Y[:,1])
plt.xlabel('z (PC1)')
plt.ylabel('z (PC2)')
except:
print 'PCA failed; maybe no variation in z or steps < N?'
plt.show()
| mit |
AmberJBlue/aima-python | submissions/Kinley/myNN.py | 13 | 3383 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.Kinley import drugs
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
drugData = DataFrame()
drugData.data = []
targetData = []
alcohol = drugs.get_surveys('Alcohol Dependence')
#tobacco = drugs.get_surveys('Tobacco Use')
i=0
for survey in alcohol[0]['data']:
try:
youngUser = float(survey['Young']),
youngUserFloat = youngUser[0]
midUser = float(survey['Medium']),
midUserFloat = midUser[0]
oldUser = float(survey['Old']),
oldUserFloat = oldUser[0]
place = survey['State']
total = youngUserFloat + midUserFloat + oldUserFloat
targetData.append(total)
youngCertain = float(survey['Young CI']),
youngCertainFloat = youngCertain[0]
midCertain = float(survey['Medium CI']),
midCertainFloat = midCertain[0]
oldCertain = float(survey['Old CI']),
oldCertainFloat = oldCertain[0]
drugData.data.append([youngCertainFloat, midCertainFloat, oldCertainFloat])
i = i + 1
except:
traceback.print_exc()
drugData.feature_names = [
'Young CI',
'Medium CI',
'Old CI',
]
drugData.target = []
def drugTarget(number):
if number > 100.0:
return 1
return 0
for pre in targetData:
# choose the target
tt = drugTarget(pre)
drugData.target.append(tt)
drugData.target_names = [
'States > 100k alcoholics',
'States < 100k alcoholics',
]
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
# hidden_layer_sizes = (100,),
# activation = 'relu',
solver='sgd', # 'adam',
# alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Try scaling the data.
'''
drugScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(drugData.data)
drugScaled.data = scaleGrid(drugData.data)
drugScaled.feature_names = drugData.feature_names
drugScaled.target = drugData.target
drugScaled.target_names = drugData.target_names
Examples = {
'drugDefault': {
'frame': drugData,
},
'drugSGD': {
'frame': drugData,
'mlpc': mlpc
},
'drugScaled': {
'frame': drugScaled,
},
} | mit |
johndamen/pyeasyplot | easyplot/managers.py | 1 | 7266 | from matplotlib import pyplot as plt, axes
import numpy as np
from math import ceil
from collections import ChainMap
from . import datasets
class FigureManager(object):
"""
object to simplify editing figure settings
"""
def __init__(self, fig):
self.fig = fig
self.fig.clear()
self.axes = [AxesManager(fig.add_subplot(111))]
self._current_index = 0
self._axrow_count = 1
@property
def current_index(self):
if self._current_index >= len(self.axes):
self._current_index = 0
return self._current_index
@current_index.setter
def current_index(self, val):
self._current_index = val
def clear_figure(self):
self.fig.clear()
def ax_count(self):
return len(self.axes)
def axrow_count(self):
return self._axrow_count
def set_ax_count(self, val, reset=True):
if not reset:
data = [(a.layers, a.settings) for a in self.axes]
data += [(None, dict())]*(val - len(self.axes))
else:
data = [(None, dict())]*val
self.axes = []
self.fig.clear()
for i in range(val):
ax = self.fig.add_subplot(self.axrow_count(), ceil(val/self.axrow_count()), i+1)
layers, settings = data[i]
axman = AxesManager(ax, layers=layers, **settings)
self.axes.append(axman)
def set_axrow_count(self, i, reset=True):
self._axrow_count = i
self.set_ax_count(self.ax_count(), reset=reset)
def ax2index(self, ax):
if isinstance(ax, AxesManager):
ax = ax.ax
for i, a in enumerate(self.axes):
if a.ax is ax:
return i
raise ValueError('could not identify axes')
def format_axes(self, i, reset=False, **settings):
"""
apply settings to axes object
:param i: index of the axes or axes instance
:param reset: update existing settings or reset all to defaults
:param settings: settings to apply
"""
if not isinstance(i, int):
i = self.ax2index(i)
self.axes[i].format(**settings)
def set_style(self, s):
"""
apply style and recreate axes
:param s: style name from plt.style.available
"""
old_data = [(a.position, a.layers, a.settings) for a in self.axes]
print(old_data)
self.fig.clear()
self.axes = []
plt.style.use(s)
for p, l, s in old_data:
self.axes.append(AxesManager(self.fig.add_axes(p), layers=l, **s))
def draw(self):
self.fig.canvas.draw()
def gca(self):
return self.axes[self.current_index]
class AxesManager(object):
def __init__(self, ax, layers=None, **settings):
if not isinstance(ax, axes.Axes):
raise TypeError('first argument not an instance of matplotlib.axes.Axes')
self.ax = ax
self.settings = dict()
self.format(**settings)
self.layers = layers or LayersContainer()
def set_position(self, *args):
if len(args) == 1:
pos, = args
else:
x, y, w, h = args
pos = [x, y, w, h]
self.ax.set_position(pos)
@property
def position(self):
x1, y1, x2, y2 = tuple(self.ax._position._points.flatten())
return [x1, y1, x2 - x1, y2 - y1]
def format(self, reset=False, **settings):
if reset:
self.settings = settings
else:
self.settings.update(settings)
self.apply_settings()
def check_limits(self, xlim, ylim):
self._check_limit('xlim', *xlim)
self._check_limit('ylim', *ylim)
self.apply_settings()
def _check_limit(self, name, vmin, vmax):
try:
vmin_old, vmax_old = self.settings[name]
except KeyError:
self.settings[name] = np.array([vmin, vmax])
else:
self.settings[name] = np.array([min(vmin_old, vmin), max(vmax_old, vmax)])
def apply_settings(self):
for k, v in self.settings.items():
try:
setter = getattr(self.ax, 'set_{}'.format(k))
except AttributeError:
raise AttributeError('{} not a valid axes setting'.format(k))
else:
setter(v)
def plot(self):
self.ax.clear()
self.apply_settings()
self.ax.hold(True)
self.ax.set_prop_cycle(None)
self.layers.plot(self.ax)
self.ax.hold(False)
def __str__(self):
return '<{}.{} [{:.2f}, {:.2f}, {:.2f}, {:.2f}]>'.format(__name__, self.__class__.__name__, *self.position)
class LayersContainer(list):
def __init__(self, *layers):
super().__init__()
for l in layers:
if isinstance(l, dict):
self.add(l['data'], **l['kwargs'])
else:
self.add(l)
self._current_index = len(self) - 1
def __str__(self):
indent = ' '*8
items = []
for item in self:
kwargstr = 'dict({})'.format(', '.join('{!s}={!r}'.format(*i) for i in item['kwargs'].items()))
items.append('{dataset} {kwargs}'.format(
indent=indent,
dataset=item['data'],
kwargs=kwargstr))
return 'Layers([{}])'.format(('\n'+indent).join(items))
@property
def current_index(self):
return self._current_index
@current_index.setter
def current_index(self, val):
if not isinstance(val, int):
raise TypeError('index must be an integer')
if val < 0 or val >= len(self):
raise IndexError(val)
self._current_index = val
def gcl(self):
return self[self.current_index]
def add(self, d, **kwargs):
if not isinstance(d, datasets.Dataset):
raise TypeError('invalid value for dataset')
kwargs = dict(ChainMap(kwargs, d.PLOT_DEFAULTS))
self.append(dict(data=d, kwargs=kwargs))
self.current_index = len(self) - 1
def edit(self, i, **kwargs):
self[i]['kwargs'].update(kwargs)
def edit_current(self, reset=False, **kwargs):
if reset:
self.gcl()['kwargs'] = kwargs
else:
self.gcl()['kwargs'].update(kwargs)
print(self)
def order(self, indices):
if sorted(list(indices)) != list(range(len(self))):
raise IndexError('indices not valid, all layers must be included once')
self[:] = [self[i] for i in indices]
self.current_index = len(self) - 1
def delete(self, i):
del self[i]
def plot(self, ax):
r = []
for l in self:
r.append(l['data'].plot(ax, **l['kwargs']))
return r
if __name__ == '__main__':
fig = plt.figure()
figman = FigureManager(fig)
figman.format_axes(0,
xlim=(0, 10),
ylim=(5, 6),
yticks=np.linspace(5, 6, 6),
ylabel='y',
xlabel='x',
xticks=np.arange(0, 11, 2),
title='test')
figman.set_style('ggplot')
plt.show()
| gpl-3.0 |
tdhopper/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
nasa/RHEAS | doc/conf.py | 1 | 9833 | # -*- coding: utf-8 -*-
#
# RHEAS documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 24 16:09:21 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Mock out the imports for some modules
from mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['argparse', 'numpy', 'pandas', 'psycopg2', 'scipy', 'scipy.linalg', 'netCDF4',
'scipy.stats', 'scipy.spatial', 'scipy.spatial.distance', 'GDAL', 'lxml', 'lxml.html',
'requests', 'h5py', 'osgeo', 'python-dateutil', 'dateutil', 'dateutil.relativedelta', 'requests.auth']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../src'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.2.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram'
# 'sphinxcontrib.programoutput',
# 'sphinxcontrib.plantuml'
]
# Set PlantUML path
# plantuml = 'java -jar /usr/local/Cellar/plantuml/8018/plantuml.8018.jar'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RHEAS'
copyright = u'2015, Kostas Andreadis'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'RHEASdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'RHEAS.tex', u'RHEAS Documentation',
u'Kostas Andreadis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rheas', u'RHEAS Documentation',
[u'Kostas Andreadis'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'RHEAS', u'RHEAS Documentation',
u'Kostas Andreadis', 'RHEAS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
elkingtonmcb/scikit-learn | sklearn/utils/__init__.py | 79 | 14202 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
| bsd-3-clause |
ChinaQuants/Finance-Python | PyFin/examples/pandas_benchmark.py | 2 | 3644 | # -*- coding: utf-8 -*-
u"""
Created on 2016-12-27
@author: cheng.li
"""
import datetime as dt
import numpy as np
import numpy.matlib as matlib
import pandas as pd
from PyFin.api import *
from PyFin.Math.Accumulators import MovingAverage
n = 300
m = 300
index = pd.date_range(dt.datetime(1990, 1, 1), dt.datetime(1990, 1, 1) + dt.timedelta(days=m-1))
index = np.repeat(index, n)
df = pd.DataFrame(np.random.randn(n*m, 3), columns=['x', 'y', 'z'], index=index)
df['c'] = matlib.repmat(np.linspace(0, n-1, n, dtype=int), 1, m)[0]
start = dt.datetime.now()
t = MA(20, 'x') / MA(30, 'y')
res = t.transform(df, category_field='c')
print("Finance-Python (group ma): {0}s".format(dt.datetime.now() - start))
start = dt.datetime.now()
groups = df.groupby('c')
res = groups['x'].rolling(20).mean() / groups['y'].rolling(30).mean()
print("Pandas (group ma): {0}s".format(dt.datetime.now() - start))
start = dt.datetime.now()
t = MovingAverage(20, 'x') / MovingAverage(30, 'x')
res = t.transform(df)
print("\nFinance-Python (rolling ma): {0}s".format(dt.datetime.now() - start))
start = dt.datetime.now()
res = df['x'].rolling(20).mean() / df['x'].rolling(30).mean()
print("Pandas (rolling ma): {0}s".format(dt.datetime.now() - start))
"""
Cross section analysis examples ...
"""
# rank
index = pd.date_range(dt.datetime(1990, 1, 1), dt.datetime(1990, 1, 1) + dt.timedelta(days=m-1))
index = np.repeat(index, n)
df = pd.DataFrame(np.random.randn(n*m, 1), columns=['x'], index=index)
ind = np.random.randint(0, int(n / 100), len(df))
df['c'] = matlib.repmat(np.linspace(0, n-1, n, dtype=int), 1, m)[0]
df['ind'] = ind
start = dt.datetime.now()
t = CSRank('x', groups='ind')
res1 = t.transform(df, category_field='c')
print("\nFinance-Python (cs rank): {0}s".format(dt.datetime.now() - start))
start = dt.datetime.now()
df2 = df.reset_index()
res2 = df2.groupby(['index', 'ind']).apply(lambda x: x['x'].rank())
print("Pandas (cs rank): {0}s".format(dt.datetime.now() - start))
res2 = pd.DataFrame({'index': res2.index.get_level_values(2), 'exp_rank': res2.values})
res2.sort_values('index', inplace=True)
res1['exp_rank'] = res2['exp_rank'].values
diff = res1['transformed'] - res1['exp_rank']
print("total rank difference: {0}".format(np.abs(diff).sum()))
# percentile
start = dt.datetime.now()
t = CSQuantiles('x', groups='ind')
res1 = t.transform(df, category_field='c')
print("\nFinance-Python (cs percentile): {0}s".format(dt.datetime.now() - start))
start = dt.datetime.now()
df2 = df.reset_index()
res2 = df2.groupby(['index', 'ind']).apply(lambda x: x['x'].rank() / (len(x) + 1))
print("Pandas (cs percentile): {0}s".format(dt.datetime.now() - start))
res2 = pd.DataFrame({'index': res2.index.get_level_values(2), 'exp_rank': res2.values})
res2.sort_values('index', inplace=True)
res1['exp_rank'] = res2['exp_rank'].values
diff = res1['transformed'] - res1['exp_rank']
print("total percentile difference: {0}".format(np.abs(diff).sum()))
# zscore
start = dt.datetime.now()
t = CSZScore('x', groups='ind')
res1 = t.transform(df, category_field='c')
print("\nFinance-Python (cs zscore): {0}s".format(dt.datetime.now() - start))
start = dt.datetime.now()
df2 = df.reset_index()
res2 = df2.groupby(['index', 'ind']).apply(lambda x: (x['x'] - x['x'].mean()) / x['x'].std(ddof=0))
print("Pandas (cs zscore): {0}s".format(dt.datetime.now() - start))
res2 = pd.DataFrame({'index': res2.index.get_level_values(2), 'exp_rank': res2.values})
res2.sort_values('index', inplace=True)
res1['exp_rank'] = res2['exp_rank'].values
diff = res1['transformed'] - res1['exp_rank']
print("total zscore difference: {0}".format(np.abs(diff).sum())) | mit |
hochthom/kaggle-taxi-ii | src/create_training_set_Experts.py | 2 | 2797 |
import time
import json
import numpy as np
import pandas as pd
from utils import haversineKaggle, heading, CITY_CENTER, LON_SCALE, LAT_SCALE
def process_row_training(X, row, end_pts):
pln = row['POLYLINE']
if len(pln) > 4:
pln = np.array(pln, ndmin=2)
for id_, pos in end_pts.iteritems():
# calc dist to end point
d1 = np.abs(pln[:, 0] - pos[0])*LON_SCALE + \
np.abs(pln[:, 1] - pos[1])*LAT_SCALE
if np.min(d1) < 0.05: #e.g. km
idx = np.argmin(d1)
data = [row['CALL_TYPE'], row['TAXI_ID']]
data += process_trip(pln[:idx+1, :], row['TIMESTAMP'])
data += [pln[-1,0], pln[-1,1], len(pln)]
X.setdefault(id_, []).append(data)
return X
def process_row_test(row):
x = row['POLYLINE']
x = np.array(x, ndmin=2)
data = process_trip(x, row['TIMESTAMP'])
return pd.Series(np.array(data, dtype=float))
def process_trip(x, start_time):
tt = time.localtime(start_time)
data = [tt.tm_wday, tt.tm_hour]
# cumulative sum of distance
d_cs = 0
vcar = 0
vmed = 0
head = 0
if x.shape[0] > 1:
d1 = haversineKaggle(x[:-1,:], x[1:,:])
d_cs = np.sum(d1)
vmed = np.median(d1)
vcar = d1[-1]
head = heading(x[-2,:], x[-1,:])
# distance from the center till cutting point
d_st = haversineKaggle(x[0,:], CITY_CENTER)[0]
h_st = heading(x[0,:], CITY_CENTER[0])
d_cut = haversineKaggle(x[-1,:], CITY_CENTER)[0]
h_cut = heading(CITY_CENTER[0], x[-1,:])
data += [x.shape[0], x[0,0], x[0,1], x[-1,0], x[-1,1], d_st, h_st, d_cut,
h_cut, d_cs, vmed, vcar, head]
return data
FEATURES = ['wday','hour','length','xs','ys','x1','y1','d_st','h_st',
'd_cut','h_cut','d_cs','vmed','vcar','heading']
t0 = time.time()
print('reading test data ...')
df = pd.read_csv('../data/test.csv', converters={'POLYLINE': lambda x: json.loads(x)})#, nrows=10000)
ds = df.apply(process_row_test, axis=1)
ds.columns = FEATURES
df = df.join(ds)
end_pts = dict((i, x) for i, x in enumerate(df[['x1','y1']].values))
print('reading training data ...')
df = pd.read_csv('../data/train.csv', converters={'POLYLINE': lambda x: json.loads(x)})#, nrows=50000)
print ('preparing train data ...')
X = {}
for i in range(df.shape[0]):
X = process_row_training(X, df.iloc[i], end_pts)
del df
for id_, data in X.iteritems():
df = pd.DataFrame(data, columns = ['CALL_TYPE','TAXI_ID'] + FEATURES + ['xe','ye','len'])
df['TAXI_ID'] -= np.min(df['TAXI_ID']) # makes csv smaller -> ids in [0, 980]
df.to_csv('../data/train_pp_TST_%i.csv' % (id_), index=False)
print('Done in %.1f sec.' % (time.time() - t0))
| mit |
neilswainston/HolyGrail | holygrail/regression.py | 1 | 3681 | '''
synbiochem (c) University of Manchester 2015
synbiochem is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
import itertools
from sklearn.metrics import mean_squared_error
from synbiochem.utils import seq_utils, struct_utils
import climate
from holygrail import data
import sbclearn.theanets.theanets_utils as theanets_utils
def regress(sample_size, struct_sets, length, split, hidden_layers):
'''Regression of phi/psi angles of peptides, specified by structure
pattern as regexps.'''
climate.enable_default_logging()
x_data = []
y_data = []
while len(x_data) < sample_size:
# Get random peptides that match structure patterns from PDB:
pdb_data, _ = data.sample_seqs(sample_size, struct_sets, length)
# Convert peptides to inputs, based on amino acid properties:
curr_x_data = seq_utils.get_aa_props([i[0]
for v in pdb_data.values()
for i in v])
# Get torsion angles as outputs:
curr_y_data = [_get_phi_psi_data(v[2][0], v[2][1], v[3])
for sublist in pdb_data.values()
for v in sublist]
x_data.extend([d for i, d in enumerate(curr_x_data)
if len(curr_y_data[i]) / 2 == len(curr_y_data[i]) /
len(seq_utils.AA_PROPS['A'])])
y_data.extend([d for i, d in enumerate(curr_y_data)
if len(curr_y_data[i]) / 2 == len(curr_y_data[i]) /
len(seq_utils.AA_PROPS['A'])])
# Randomise input and output data order:
x_data, y_data = theanets_utils.randomise_order([x_data[:sample_size],
y_data[:sample_size]])
return _run_regressor(split, x_data, y_data, hidden_layers)
def _get_proximity_data(pdb_id):
'''Gets proximity_data for deep learning.'''
all_sequences = struct_utils.get_sequences(pdb_id)
all_proximity_data = struct_utils.calc_proximities(pdb_id)
# Ensure data consistency in terms of data lengths:
assert len(all_sequences) == len(all_proximity_data)
assert all([len(all_sequences[idx]) == len(all_proximity_data[idx])
for idx in range(len(all_sequences))])
prox_output = [list(itertools.chain.from_iterable(proximities))
for proximities in all_proximity_data]
return all_sequences, prox_output
def _get_phi_psi_data(pdb_id, chain, subrange):
'''Gets input/output for deep learning.'''
all_phi_psi_data = struct_utils.get_phi_psi_data(pdb_id, chain)
phi_psi_output = [list(itertools.chain.from_iterable(lst))
for lst in all_phi_psi_data]
return phi_psi_output[0][slice(*[x * 2 for x in subrange])]
def _get_sub_square_matrix(idx, lngth, size):
'''Returns square submatrix of length lngth from square matrix of given
size.'''
return [((idx + r) * size) + idx + c for r in range(lngth)
for c in range(lngth)]
def _run_regressor(split, x_data, y_data, hidden_layers):
'''Runs the regressor job.'''
# Split data into training and classifying:
ind = int(split * len(x_data))
# Perform regression:
regressor = theanets_utils.Regressor(x_data[:ind], y_data[:ind])
regressor.train(hidden_layers=[hidden_layers])
y_pred = regressor.predict(x_data[ind:])
print len(y_data[ind:])
print len(y_pred)
print [len(d) for d in y_data[ind:]]
print [len(d) for d in y_pred]
return mean_squared_error(y_data[ind:], y_pred)
| mit |
AppliedBioinformatics/BioNanoAnalyst | scripts/App.py | 1 | 64208 | #!/usr/bin/env python
################################################################
## Application: BioNanoAnalyst
## Author: Yuxuan Yuan
## Email: [email protected]
## Last modification Date: 02/12/2016
## Copyright: Copyright (c) 2016 Applied Bioinformatics Group
## UWA, Perth WA, Australia
################################################################
#======================== libraries ==========================
import os
import re
import sys
import codecs
from time import time
from time import sleep
if sys.platform == 'win32':
import codecs
import webbrowser
import subprocess
import multiprocessing
try:
from PyQt4 import QtGui,QtCore
from PyQt4.QtGui import*
except ImportError:
from PySide import QtGui,QtCore
from PySide.QtGui import*
from datetime import datetime
from Frameworks import Ui_BioNanoAnalyst
from About import Ui_About
from Manual import Ui_Manual
from Settings import Ui_Settings
from Analysis import*
import pandas as pd
import numpy as np
import FileDialog
from matplotlib.pyplot import figure, show
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
#=======================================================================================
class Main(QtGui.QMainWindow):
def __init__(self, parent = None):
QtGui.QMainWindow.__init__(self, parent)
self.ui = Ui_BioNanoAnalyst()
self.ui.setupUi(self)
self.ref = None
self.enz = None
self.bnx = None
self.xmap = None
self.rcmap = None
self.qcmap = None
self.cs = None
self.about_window = None
self.manual_window = None
self.parameters_window = None
self.running = None
self.output_path = None
self.format = None
self.ctg_figure = None
self.axes = None
self.canvas = None
self.canvas_ctg = None
self.ref_table = QtGui.QTableWidget()
self.unqualified_table = QtGui.QTableWidget()
self.qualified_table = QtGui.QTableWidget()
self.BN_table = QtGui.QTableWidget()
self.unmapped_table = QtGui.QTableWidget()
self.mapped_table = QtGui.QTableWidget()
self.filtered_table = QtGui.QTableWidget()
self.no_data_table = QtGui.QTableWidget()
self.missing_table = QtGui.QTableWidget()
self.good_table = QtGui.QTableWidget()
self.site_p_table = QtGui.QTableWidget()
self.pos_p_table = QtGui.QTableWidget()
self.both_table = QtGui.QTableWidget()
#==================== menubar ====================
self.ui.actionNew.setShortcut('Ctrl+N')
self.ui.actionClose.setShortcut('Ctrl+Q')
self.ui.actionAbout.setShortcut('Ctrl+A')
self.ui.actionManual.setShortcut('Ctrl+M')
self.ui.actionFeedbacks.setShortcut('Ctrl+F')
self.ui.actionNew.triggered.connect(self.new)
self.ui.actionClose.triggered.connect(self.quit)
self.ui.actionAbout.triggered.connect(self.about)
self.ui.actionManual.triggered.connect(self.manual)
self.ui.actionFeedbacks.triggered.connect(self.feedbacks)
#=================== reference panel ==================
self.ui.ref_select_bn.clicked.connect(self.select_ref)
self.ui.ref_clear_bn.clicked.connect(self.clear_ref)
#================== raw data panel ====================
self.ui.raw_frame.setEnabled(False)
self.ui.raw_checkBox.stateChanged.connect(self.enable_raw)
self.ui.enzyme_combox.addItems(['BspQI','BbvCI','BsmI','BsrDI','bseCI','BssSI'])
self.ui.enzyme_combox.activated[str].connect(self.enzyme)
self.ui.raw_select_bn.clicked.connect(self.select_bnx)
self.ui.raw_clear_bn.clicked.connect(self.clear_bnx)
self.ui.raw_settings_bn.clicked.connect(self.raw_settings)
self.ui.raw_start_bn.clicked.connect(self.raw_start)
#================= aligned data panel =================
self.ui.aligned_frame.setEnabled(False)
self.ui.aligned_checkBox.stateChanged.connect(self.enable_aligned)
self.ui.xmap_select_bn.clicked.connect(self.select_xmap)
self.ui.xmap_clear_bn.clicked.connect(self.clear_xmap)
self.ui.rcmap_select_bn.clicked.connect(self.select_rcmap)
self.ui.rcmap_clear_bn.clicked.connect(self.clear_rcmap)
self.ui.qcmap_select_bn.clicked.connect(self.select_qcmap)
self.ui.qcmap_clear_bn.clicked.connect(self.clear_qcmap)
#================= analysis panel ===================
self.ui.analyse_bn.clicked.connect(self.analyse)
#=================== Stats panel ========================
self.ui.show_ref_bn.clicked.connect(self.show_ref)
self.ui.show_unqualified_bn.clicked.connect(self.show_unqualified)
self.ui.show_qualified_bn.clicked.connect(self.show_qualified)
self.ui.show_mapped_bn.clicked.connect(self.show_mapped)
self.ui.show_unmapped_bn.clicked.connect(self.show_unmapped)
self.ui.show_BN_bn.clicked.connect(self.show_BN)
self.ui.show_no_mapping_bn.clicked.connect(self.show_no_data)
self.ui.show_filtered_bn.clicked.connect(self.show_kicked)
self.ui.show_missing_bn.clicked.connect(self.show_missing)
self.ui.show_good_bn.clicked.connect(self.show_good)
self.ui.show_rsp_bn.clicked.connect(self.show_site_p)
self.ui.show_pp_bn.clicked.connect(self.show_pos_p)
self.ui.show_both_bn.clicked.connect(self.show_both)
self.clip = QtGui.QApplication.clipboard()
#==================== mapping status panel ===================
self.ui.save_select_bn.clicked.connect(self.save_select)
self.ui.save_clear_bn.clicked.connect(self.save_clear)
fig_format =['pdf','png','jpg','jpeg','eps', 'tif','ps', 'svg', 'svgz']
self.ui.fig_format_combox.addItems(fig_format)
self.ui.fig_format_combox.activated[str].connect(self.fig_format)
self.ui.fig_save_bn.clicked.connect(self.save_fig)
self.ui.save_all_bn.clicked.connect(self.save_all_figurs)
self.ui.save_qlt_bn.clicked.connect(self.save_qlt)
#======================================= Functions ==========================================
def new(self):
window = Main(self)
window.show()
def quit(self):
response=QtGui.QMessageBox.question(self, 'Warning !', 'Do you want to close the appliction?',
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if response == QtGui.QMessageBox.Yes:
sys.exit()
def about(self):
self.about_window = about()
self.about_window.show()
def manual(self):
self.manual_window = manual()
self.manual_window.show()
def feedbacks(self):
webbrowser.open("https://github.com/AppliedBioinformatics/BioNanoAnalyst/issues")
def select_ref(self):
if sys.platform == 'win32':
ref = QFileDialog.getOpenFileName(self, 'Select reference sequences', '','sequences (*fasta *fa *fna)')
self.ref=codecs.decode(str(ref)[1:-1].split(',')[0][2:-1],'unicode_escape')
self.ui.ref_input.setText(self.ref)
else:
self.ref = unicode(QFileDialog.getOpenFileName(self, 'Select reference sequences', '','sequences (*fasta *fa *fna)'))
self.ui.ref_input.setText(self.ref)
def clear_ref(self):
self.ui.ref_input.clear()
self.ref = None
def enable_raw(self):
if self.ui.raw_checkBox.isChecked():
if sys.platform =='win32':
self.ui.raw_frame.setEnabled(False)
self.ui.aligned_checkBox.setEnabled(False)
QtGui.QMessageBox.question(self, 'Warning !', 'Currently Option 1 is only available in Linux and MacOS system !',
QtGui.QMessageBox.Ok)
else:
self.ui.raw_frame.setEnabled(True)
self.ui.aligned_checkBox.setEnabled(False)
else:
self.ui.raw_frame.setEnabled(False)
self.ui.aligned_checkBox.setEnabled(True)
def enzyme(self):
self.enz = self.ui.enzyme_combox.currentText()
def select_bnx(self):
if sys.platform == 'win32':
bnx = QFileDialog.getOpenFileName(self,'Select bnx file','','bnx file (*.bnx)')
self.bnx = codecs.decode(str(bnx)[1:-1].split(',')[0][2:-1],'unicode_escape')
self.ui.raw_input.setText(self.bnx)
else:
self.bnx = unicode(QFileDialog.getOpenFileName(self,'Select bnx file','','bnx file (*.bnx)'))
self.ui.raw_input.setText(self.bnx)
def clear_bnx(self):
self.ui.raw_input.clear()
self.bnx = None
def raw_settings(self):
self.parameters_window = Settings()
self.parameters_window.show()
def raw_start(self):
if QtGui.QMessageBox.Ok not in [self.handle_ref_error(), self.handle_bnx_error(), self.handle_settings_error()]:
try:
self.ui.raw_status_label.setStyleSheet('color: blue')
self.ui.raw_status_label.setText('Running...')
qApp.processEvents()
self.ui.raw_status_label.repaint()
self.run_assembler()
self.run_refAligner()
self.ui.raw_status_label.setStyleSheet('color: green')
self.ui.raw_status_label.setText('Finished !')
name = self.ref.rsplit('/',1)[-1].rsplit('.',1)[0]
self.rcmap = self.settings['output_path']+'/'+name+'_r.cmap'
self.qcmap = self.settings['output_path']+'/'+name+'_q.cmap'
self.xmap = self.settings['output_path']+'/'+name+'.xmap'
except:
self.ui.raw_status_label.setStyleSheet('color: red')
self.ui.raw_status_label.setText('Crashed !')
self.ui.raw_status_label.repaint()
qApp.processEvents()
QtGui.QMessageBox.question(self, 'Error !', 'Something is wrong, please check the error messages!',QtGui.QMessageBox.Ok)
def enable_aligned(self):
if self.ui.aligned_checkBox.isChecked():
self.ui.aligned_frame.setEnabled(True)
self.ui.raw_checkBox.setEnabled(False)
else:
self.ui.aligned_frame.setEnabled(False)
self.ui.raw_checkBox.setEnabled(True)
def select_xmap(self):
if sys.platform == 'win32':
xmap = QFileDialog.getOpenFileName(self,'Select xmap file','','xmap file (*.xmap)')
self.xmap = codecs.decode(str(xmap)[1:-1].split(',')[0][2:-1],'unicode_escape')
self.ui.xmap_input.setText(self.xmap)
else:
self.xmap = unicode(QFileDialog.getOpenFileName(self,'Select xmap file','','xmap file (*.xmap)'))
self.ui.xmap_input.setText(self.xmap)
def clear_xmap(self):
self.ui.xmap_input.clear()
self.xmap = None
def select_rcmap(self):
if sys.platform == 'win32':
rcmap = QFileDialog.getOpenFileName(self,'Select ref cmap file','','cmap file (*_r.cmap)')
self.rcmap = codecs.decode(str(rcmap)[1:-1].split(',')[0][2:-1],'unicode_escape')
self.ui.rcmap_input.setText(self.rcmap)
else:
self.rcmap = unicode(QFileDialog.getOpenFileName(self,'Select ref cmap file','','cmap file (*_r.cmap)'))
self.ui.rcmap_input.setText(self.rcmap)
def clear_rcmap(self):
self.ui.rcmap_input.clear()
self.rcmap = None
def select_qcmap(self):
if sys.platform == 'win32':
qcmap = QFileDialog.getOpenFileName(self,'Select qry cmap file','','cmap file (*_q.cmap)')
self.qcmap = codecs.decode(str(qcmap)[1:-1].split(',')[0][2:-1],'unicode_escape')
self.ui.qcmap_input.setText(self.qcmap)
else:
self.qcmap = unicode(QFileDialog.getOpenFileName(self,'Select qry cmap file','','cmap file (*_q.cmap)'))
self.ui.qcmap_input.setText(self.qcmap)
def clear_qcmap(self):
self.ui.qcmap_input.clear()
self.qcmap = None
def ctgs(self):
ctg =self.ui.ctg_check_combox.currentText()
sub = self.running.overall[self.running.overall['contig']==ctg].reset_index(drop=True)
tsf=['siteID','position','coverage','score']
sub[tsf]=sub[tsf].apply(pd.to_numeric)
x=sub['siteID']
y1=sub['coverage']
y2=sub['score']
pos=sub['position']
D=dict()
for t in range(len(x)):
D[x[t]]=(pos[t],y2[t])
x_scale =[x.min()-2,x.max()+2]
#y1_scale=[y1.min()-2,y2.max()+2]
y2_scale=[-1,8]
try:
for items in reversed(range(self.ui.gridLayout_7.count())):
self.ui.gridLayout_7.itemAt(items).widget().deleteLater()
except:
pass
self.ctg_figure = Figure(facecolor='w')
self.axes =self.ctg_figure.add_axes([0.05, 0.14, 0.9, 0.8])
#self.axes.hold(False)
self.canvas_ctg = FigureCanvas(self.ctg_figure)
#self.toolbar= NavigationToolbar(self.canvas_ctg, self)
#self.ui.gridLayout_7.addWidget(self.toolbar,0, 0, 1, 1)
self.ui.gridLayout_7.addWidget(self.canvas_ctg, 1, 0, 1, 1)
ax2 = self.axes.twinx()
self.axes.plot(x, y1, 'bo-')
for j in range(len(y2)):
if y2[j]==0:
ax2.plot(x[j], y2[j], 'x',c='black')
if y2[j]==1:
ax2.plot(x[j], y2[j], 'd',c='red')
if y2[j]==2:
ax2.plot(x[j], y2[j], '<',c='purple')
if y2[j]==3:
ax2.plot(x[j], y2[j], '>',c='pink')
if y2[j]==4:
ax2.plot(x[j], y2[j], 'p',c='green')
lines = [
('Consistent', {'color': 'green', 'linestyle': ':', 'marker': 'p'}),
('Number discordant', {'color': 'pink', 'linestyle': ':', 'marker': '>'}),
('Distance discordant', {'color': 'purple', 'linestyle': ':', 'marker': '<'}),
('Num+dis discordant', {'color': 'red', 'linestyle': ':', 'marker': 'd'}),
( 'No data', {'color': 'black', 'linestyle': ':', 'marker': 'x'})
]
ax2.legend(
[create_dummy_line(**l[1]) for l in lines],
[l[0] for l in lines],
loc='upper center',bbox_to_anchor=(0.5, 1.06),
ncol=5, fancybox=True, shadow=True
)
ax2.plot(x,y2,'y-')
self.axes.set_xlabel('SiteID')
self.axes.set_xlim(x_scale)
self.axes.set_ylabel('Coverage',color='b')
#self.axes.set_ylim(y1_scale)
ax2.set_ylabel('score')
ax2.set_ylim(y2_scale)
scale=1.1
zp=ZoomPan()
figZoom1=zp.zoom_factory(self.axes, base_scale=scale)
figPan1=zp.pan_factory(self.axes)
figZoom2=zp.zoom_factory(ax2, base_scale=scale)
figPan2=zp.pan_factory(ax2.axes)
self.canvas_ctg.mpl_connect('pick_event', DataCursor(ax2,D))
ax2.set_picker(1)
self.canvas_ctg.show()
def save_select(self):
self.output_path = unicode(QFileDialog.getExistingDirectory())
self.ui.save_input.setText(self.output_path)
def save_clear(self):
self.ui.save_input.clear()
self.output_path = None
def fig_format(self):
self.format = self.ui.fig_format_combox.currentText()
def save_fig(self):
ctg = self.ui.ctg_check_combox.currentText()
format = self.ui.fig_format_combox.currentText()
try:
fig_name=r'%s/%s_mapping_status.%s'%(self.output_path,ctg,format)
if sys.platform =='win32':
fig_name = fig_name.replace('\\','/')
self.ctg_figure.savefig(fig_name)
except AttributeError:
return QtGui.QMessageBox.question(self, 'Error !', 'Please select a contig !',QtGui.QMessageBox.Ok)
except IOError:
QtGui.QMessageBox.question(self, 'Error !', 'Please select an output path !',QtGui.QMessageBox.Ok)
def save_all_figurs(self):
try:
format = self.ui.fig_format_combox.currentText()
for ctg in self.running.overall['contig'].unique():
sub = self.running.overall[self.running.overall['contig']==ctg].reset_index(drop=True)
tsf=['siteID','coverage','score']
sub[tsf]=sub[tsf].apply(pd.to_numeric)
x=sub['siteID']
y1=sub['coverage']
y2=sub['score']
x_scale =[x.min()-2,x.max()+2]
y2_scale=[-1,8]
fig = plt.figure(figsize=(16.9,4),facecolor='w')
ax1 = fig.add_axes([0.05, 0.14, 0.9, 0.8])
ax2 = ax1.twinx()
ax1.plot(x, y1, 'bo-')
for j in range(len(y2)):
if y2[j]==0:
ax2.plot(x[j], y2[j], 'x',c='black')
if y2[j]==1:
ax2.plot(x[j], y2[j], 'd',c='red')
if y2[j]==2:
ax2.plot(x[j], y2[j], '<',c='purple')
if y2[j]==3:
ax2.plot(x[j], y2[j], '>',c='pink')
if y2[j]==4:
ax2.plot(x[j], y2[j], 'p',c='green')
lines = [
('Consistent', {'color': 'green', 'linestyle': ':', 'marker': 'p'}),
('Number discordant', {'color': 'pink', 'linestyle': ':', 'marker': '>'}),
('Distance discordant', {'color': 'purple', 'linestyle': ':', 'marker': '<'}),
('Num+dis discordant', {'color': 'red', 'linestyle': ':', 'marker': 'd'}),
( 'No data', {'color': 'black', 'linestyle': ':', 'marker': 'x'})]
ax2.legend(
[create_dummy_line(**l[1]) for l in lines],
[l[0] for l in lines],
loc='upper center',bbox_to_anchor=(0.5, 1.06),
ncol=5, fancybox=True, shadow=True)
ax2.plot(x,y2,'y-')
ax1.set_xlabel('SiteID')
ax1.set_xlim(x_scale)
ax1.set_ylabel('Coverage',color='b')
ax2.set_ylabel('score')
ax2.set_ylim(y2_scale)
fig_name=r'%s/%s_mapping_status.%s'%(self.output_path,ctg,format)
if sys.platform =='win32':
fig_name = fig_name.replace('\\','/')
fig.savefig(fig_name,format=format)
fig.clf()
plt.close()
except IOError:
return QtGui.QMessageBox.question(self, 'Error !', 'Please select an output loaction !',QtGui.QMessageBox.Ok)
except AttributeError:
return QtGui.QMessageBox.question(self, 'Error !', 'Please check the contig(s) !',QtGui.QMessageBox.Ok)
def save_qlt(self):
try:
curr_time=datetime.now().strftime('%Y%m%d_%H-%M-%S')
if sys.platform=='win32':
path=r'%s'% self.output_path
else:
path=self.output_path
name='BioNanoAnalyst_report_cs%s_%s.gff3'% (self.cs,curr_time)
name_all='BioNanoAnalyst_report_overall_pairs_cs%s_%s.txt'% (self.cs, curr_time)
test=len(self.running.overall)
fd=open(os.path.join(path,name),'w')
for j in self.running.overall['contig'].unique():
sub=self.running.overall[self.running.overall['contig']==j].reset_index(drop=True)
startp=sub['position'][0]
i=1
ctg=sub['contig'][0]
while i < len(sub):
score1=sub['score'][i-1]
score2=sub['score'][i]
status1=sub['mapping_status'][i-1]
status2=sub['mapping_status'][i-1]
if score1==score2 and i!=len(sub)-1:
next
if score1==score2 and i==len(sub)-1:
fd.write('%s\tBioNanoAnalyst\tOptical_mapping\t%s\t%s\t.\t.\t.\tName=%s\n'% (ctg,startp, sub['position'][i], sub['mapping_status'][i]))
if score1>score2 and i!=len(sub)-1:
endp=sub['position'][i]
fd.write('%s\tBioNanoAnalyst\tOptical_mapping\t%s\t%s\t.\t.\t.\tName=%s\n'% (ctg,startp, endp, sub['mapping_status'][i-1]))
startp=endp
if score1>score2 and i==len(sub)-1:
fd.write('%s\tBioNanoAnalyst\tOptical_mapping\t%s\t%s\t.\t.\t.\tName=%s\n'% (ctg,startp, sub['position'][i], sub['mapping_status'][i]))
if score1<score2 and i!=len(sub)-1:
endp=sub['position'][i-1]
fd.write('%s\tBioNanoAnalyst\tOptical_mapping\t%s\t%s\t.\t.\t.\tName=%s\n'% (ctg,startp, endp, sub['mapping_status'][i-1]))
startp=endp
if score1<score2 and i==len(sub)-1:
fd.write('%s\tBioNanoAnalyst\tOptical_mapping\t%s\t%s\t.\t.\t.\tName=%s\n'% (ctg,startp, sub['position'][i], sub['mapping_status'][i]))
i+=1
self.running.paired.to_csv(os.path.join(path,name_all),sep='\t', index=False)
except IOError:
return QtGui.QMessageBox.question(self, 'Error !', 'Please select an output loaction !',QtGui.QMessageBox.Ok)
except AttributeError:
return QtGui.QMessageBox.question(self, 'Error !', 'No report can be saved !',QtGui.QMessageBox.Ok)
def handle_ref_error(self):
try:
if os.stat(self.ref).st_size>0:
with open(self.ref) as f:
for i in range(2):
line=f.next().strip()
if i == 0 and line[0]!='>':
return QtGui.QMessageBox.question(self, 'Error !', 'Please check your input reference !',
QtGui.QMessageBox.Ok)
if i == 1 and len(re.findall("[^ATGCN]", line.upper()))>0:
return QtGui.QMessageBox.question(self, 'Error !', 'Please check your input reference !',
QtGui.QMessageBox.Ok)
else:
return QtGui.QMessageBox.question(self, 'Warning !', 'The selected reference file is empty, please check !',
QtGui.QMessageBox.Ok)
except:
return QtGui.QMessageBox.question(self, 'Error !', 'Please input a reference file !',
QtGui.QMessageBox.Ok)
def handle_bnx_error(self):
try:
if os.stat(self.bnx).st_size == 0:
return QtGui.QMessageBox.question(self, 'Warning !', 'The selected bnx file is empty, please check !',
QtGui.QMessageBox.Ok)
except:
return QtGui.QMessageBox.question(self, 'Error !', 'Please input a .bnx file !', QtGui.QMessageBox.Ok)
def handle_settings_error(self):
try:
self.settings = self.parameters_window.parameters
except:
return QtGui.QMessageBox.question(self, 'Error !', 'Please check your settings !',
QtGui.QMessageBox.Ok)
def handle_xmap_error(self):
try:
if os.stat(self.xmap).st_size == 0:
return QtGui.QMessageBox.question(self, 'Warning !', 'The selected xmap file is empty, please check !',
QtGui.QMessageBox.Ok)
except:
return QtGui.QMessageBox.question(self, 'Error !', 'Please input a .xmap file !', QtGui.QMessageBox.Ok)
def handle_rcmap_error(self):
try:
if os.stat(self.rcmap).st_size == 0:
return QtGui.QMessageBox.question(self, 'Warning !', 'The selected _r.cmap file is empty, please check !',
QtGui.QMessageBox.Ok)
except:
return QtGui.QMessageBox.question(self, 'Error !', 'Please input a _r.cmap file !', QtGui.QMessageBox.Ok)
def handle_qcmap_error(self):
try:
if os.stat(self.qcmap).st_size == 0:
return QtGui.QMessageBox.question(self, 'Warning !', 'The selected _q.cmap file is empty, please check !',
QtGui.QMessageBox.Ok)
except:
return QtGui.QMessageBox.question(self, 'Error !', 'Please input a _q.cmap file !', QtGui.QMessageBox.Ok)
def corresponding_check(self):
if QtGui.QMessageBox.Ok not in [self.handle_xmap_error(), self.handle_rcmap_error(), self.handle_qcmap_error()]:
self.files = dict()
self.files['xmap'] = self.xmap
with open (self.xmap) as xmap:
for i in range(20): # the value here can be changed
try:
line = r'%s' % xmap.next().strip()
line = line.replace('\\','/')
except StopIteration:
pass
if line.startswith('# Reference Maps From:'):
try:
line = line.split()[-1].rsplit('/',1)[-1]
rcmap = self.rcmap.rsplit('/',1)[-1]
if line != rcmap:
return QtGui.QMessageBox.question(self, 'Error !', 'Rcmap file name in Xmap file is not the one you select !',
QtGui.QMessageBox.Ok)
else:
self.files['rcmap'] = self.rcmap
except:
return QtGui.QMessageBox.question(self, 'Error !', 'Please check your xmap file !', QtGui.QMessageBox.Ok)
if line.startswith('# Query Maps From:'):
try:
line = line.split()[-1]
line = line.rsplit('/',1)[-1]
qcmap = self.qcmap.rsplit('/',1)[-1]
if line != qcmap:
return QtGui.QMessageBox.question(self, 'Error !', 'Qcmap file name in Xmap file is not the one you select !',
QtGui.QMessageBox.Ok)
else:
self.files['qcmap'] = self.qcmap
except:
return QtGui.QMessageBox.question(self, 'Error !', 'Please check your xmap file !', QtGui.QMessageBox.Ok)
def handle_cs_error(self):
cs = self.ui.cs_input.text()
try:
self.cs = float(cs)
if self.cs < 0:
return QtGui.QMessageBox.question(self, 'Error !', 'Please input a confidence score >=0',
QtGui.QMessageBox.Ok)
except ValueError:
return QtGui.QMessageBox.question(self, 'Error !', 'Please input a confidence score >=0',
QtGui.QMessageBox.Ok)
def handle_O1(self):
if QtGui.QMessageBox.Ok in [self.handle_bnx_error(), self.handle_settings_error()]:
return False
else:
return True
def handle_O2(self):
if QtGui.QMessageBox.Ok in [self.handle_xmap_error(), self.handle_rcmap_error(), self.handle_qcmap_error()]:
return False
else:
return True
def analyse(self):
if QtGui.QMessageBox.Ok not in [self.handle_ref_error()]:
pass
else:
return
if QtGui.QMessageBox.Ok not in [self.handle_cs_error()]:
pass
else:
return
if self.ui.raw_checkBox.isChecked():
if sys.platform == 'win32':
return QtGui.QMessageBox.question(self, 'Error !', 'Currently Option 1 is not available in Windows !',
QtGui.QMessageBox.Ok)
if self.handle_O1()== True:
try:
if os.path.exists(self.xmap) and os.path.exists(self.rcmap) and os.path.exists(self.qcmap):
t1=time()
try:
self.ui.ctg_check_combox.clear()
self.ui.textBrowser.clear()
self.ui.verticalLayout_3.takeAt(0).widget().setParent(None)
self.canvas_ctg.close()
except:
pass
try:
for items in reversed(range(self.ui.gridLayout_7.count())):
self.ui.gridLayout_7.itemAt(items).widget().deleteLater()
except:
pass
mapping_status_view=QtGui.QGraphicsView(self.ui.mapping_status_frame)
self.ui.gridLayout_7.addWidget(mapping_status_view, 0, 0, 1, 1)
try:
## Start analysis
self.running = BioNano(self.xmap, self.rcmap, self.qcmap, self.cs, self.ref)
self.running.convert_tables()
## Emit the running signal
self.ui.analyse_status_label.setStyleSheet('color: blue')
self.ui.analyse_status_label.setText('Running...')
qApp.processEvents()
self.ui.analyse_status_label.repaint()
self.running.BioNano_stats()
self.running.parse_fasta()
self.running.qualification_filter()
self.running.mapping_filter()
self.running.getDetail()
self.running.getMissing()
self.running.getPaired()
self.running.checkStatus()
self.running.merge()
self.stats()
self.ui.ctg_check_combox.addItems([i for i in self.running.mapped['contig']])
self.ui.ctg_check_combox.activated[str].connect(self.ctgs)
## Make graphs
self.figure = plt.figure(facecolor='w')
self.figure.hold(False)
self.canvas = FigureCanvas(self.figure)
self.canvas.setMaximumSize(720,420)
self.ui.verticalLayout_3.addWidget(self.canvas)
ax=plt.subplot()
labels = np.char.array(['unqualified', 'no mapping','filtered', 'mapped'])
sizes = np.array([self.unqualified_len, self.no_mapping_len, self.filtered_len, self.mapped_len])
colors = ['lightcoral', 'gold', 'lightskyblue', 'yellowgreen']
explode = (0, 0, 0, 0.1)
porcent = 100.*sizes/sizes.sum()
patches, texts= plt.pie (sizes, colors=colors, startangle=140, shadow=True,explode=explode)
Labels=['{0} - {1:1.2f} %'.format(i,j) for i,j in zip(labels, porcent)]
plt.legend(patches, Labels, loc='upper left', bbox_to_anchor=(-0.1, 1.),fontsize=8)
plt.text(0.5,1.08,'Proportion of sublength to total reference length',horizontalalignment='center',fontsize=18,transform = ax.transAxes)
#plt.pie(sizes, explode=explode, labels=labels, colors=colors,autopct='%1.1f%%', shadow=True, startangle=140)
#plt.title('Proportion of sublength to total reference length')
plt.axis('equal')
self.canvas.draw()
## Emit the 'finished' signal
self.ui.analyse_status_label.setStyleSheet('color: green')
self.ui.analyse_status_label.setText('Finished !')
t2=time()
print 'The running time is %.2f seconds'%(t2-t1)
except AttributeError:
return
except:
QtGui.QMessageBox.question(self, 'Error !', 'BioNano optical mapping from option 1 is incomplete, please check !',
QtGui.QMessageBox.Ok)
if self.ui.aligned_checkBox.isChecked():
if self.handle_O2()== True:
t1=time()
try:
self.ui.ctg_check_combox.clear()
self.ui.textBrowser.clear()
self.ui.verticalLayout_3.takeAt(0).widget().setParent(None)
self.canvas_ctg.close()
except:
pass
try:
for items in reversed(range(self.ui.gridLayout_7.count())):
#self.ui.gridLayout_7.itemAt(items).widget().deleteLater()
self.ui.gridLayout_7.itemAt(items).widget().setParent(None)
except:
pass
mapping_status_view=QtGui.QGraphicsView(self.ui.mapping_status_frame)
self.ui.gridLayout_7.addWidget(mapping_status_view, 0, 0, 1, 1)
try:
## Start analysis
self.running = BioNano(self.xmap, self.rcmap, self.qcmap, self.cs, self.ref)
self.running.convert_tables()
## Emit the running signal
self.ui.analyse_status_label.setStyleSheet('color: blue')
self.ui.analyse_status_label.setText('Running...')
qApp.processEvents()
self.ui.analyse_status_label.repaint()
self.running.BioNano_stats()
self.running.parse_fasta()
self.running.qualification_filter()
self.running.mapping_filter()
self.running.getDetail()
self.running.getMissing()
self.running.getPaired()
self.running.checkStatus()
self.running.merge()
self.stats()
self.ui.ctg_check_combox.addItems([i for i in self.running.mapped['contig']])
self.ui.ctg_check_combox.activated[str].connect(self.ctgs)
## Make graphs
self.figure = plt.figure(facecolor='w')
self.figure.hold(False)
self.canvas = FigureCanvas(self.figure)
self.canvas.setMaximumSize(720,420)
self.ui.verticalLayout_3.addWidget(self.canvas)
ax=plt.subplot()
labels = np.char.array(['unqualified', 'no mapping','filtered', 'mapped'])
sizes = np.array([self.unqualified_len, self.no_mapping_len, self.filtered_len, self.mapped_len])
colors = ['lightcoral', 'gold', 'lightskyblue', 'yellowgreen']
explode = (0, 0, 0, 0.1)
porcent = 100.*sizes/sizes.sum()
patches, texts= plt.pie (sizes, colors=colors, startangle=140, shadow=True,explode=explode)
Labels=['{0} - {1:1.2f} %'.format(i,j) for i,j in zip(labels, porcent)]
plt.legend(patches, Labels, loc='upper left', bbox_to_anchor=(-0.1, 1.),fontsize=8)
plt.text(0.5,1.08,'Proportion of sublength to total reference length',horizontalalignment='center',fontsize=18,transform = ax.transAxes)
#plt.pie(sizes, explode=explode, labels=labels, colors=colors,autopct='%1.1f%%', shadow=True, startangle=140)
#plt.title('Proportion of sublength to total reference length')
plt.axis('equal')
self.canvas.draw()
## Emit the 'finished' signal
self.ui.analyse_status_label.setStyleSheet('color: green')
self.ui.analyse_status_label.setText('Finished !')
t2=time()
print 'The running time is %.2f seconds' % (t2-t1)
except AttributeError:
return
if not self.ui.raw_checkBox.isChecked() and not self.ui.aligned_checkBox.isChecked():
return QtGui.QMessageBox.question(self, 'Error !', 'Please select Option 1 or Option 2 and fill it !',
QtGui.QMessageBox.Ok)
def show_ref(self):
try:
self.ref_table.setWindowTitle('Information for reference')
self.ref_table.setColumnCount(len(self.running.ref_detail.columns))
self.ref_table.setRowCount(len(self.running.ref_detail.index))
for i in range(len(self.running.ref_detail.index)):
for j in range(len(self.running.ref_detail.columns)):
self.ref_table.setItem(i,j,QTableWidgetItem(str(self.running.ref_detail.iat[i, j])))
self.ref_table.setHorizontalHeaderLabels(['Index','Contig','Length(bp)','Splitted_ctg',
'Start','End','Id_in_all'])
self.ref_table.setMinimumSize(380,560)
self.ref_table.show()
except:
pass
def show_unqualified(self):
try:
self.unqualified_table.setWindowTitle('Information for unqualified contigs')
self.unqualified_table.setColumnCount(len(self.running.unqualified.columns))
self.unqualified_table.setRowCount(len(self.running.unqualified.index))
for i in range(len(self.running.unqualified.index)):
for j in range(len(self.running.unqualified.columns)):
self.unqualified_table.setItem(i,j,QTableWidgetItem(str(self.running.unqualified.iat[i, j])))
self.unqualified_table.setHorizontalHeaderLabels(['index','contig','length(bp)'])
self.unqualified_table.setMinimumSize(380,560)
self.unqualified_table.show()
except:
pass
def show_qualified(self):
try:
self.qualified_table.setWindowTitle('Information for qualified contigs')
self.qualified_table.setColumnCount(len(self.running.qualified.columns))
self.qualified_table.setRowCount(len(self.running.qualified.index))
for i in range(len(self.running.qualified.index)):
for j in range(len(self.running.qualified.columns)):
self.qualified_table.setItem(i,j,QTableWidgetItem(str(self.running.qualified.iat[i, j])))
self.qualified_table.setHorizontalHeaderLabels(['index','contig','length(bp)','numSites'])
self.qualified_table.setMinimumSize(450,560)
self.qualified_table.show()
except:
pass
def show_BN(self):
try:
self.BN_table.setWindowTitle('Information for matched BioNano data')
self.BN_table.setColumnCount(len(self.running.BN.columns))
self.BN_table.setRowCount(len(self.running.BN.index))
for i in range(len(self.running.BN.index)):
for j in range(len(self.running.BN.columns)):
self.BN_table.setItem(i,j,QTableWidgetItem(str(self.running.BN.iat[i, j])))
self.BN_table.setHorizontalHeaderLabels(['CMapId','ContigLength','NumSites'])
self.BN_table.setMinimumSize(380,560)
self.BN_table.show()
except:
pass
def show_unmapped(self):
try:
self.unmapped_table.setWindowTitle('Information for unmapped contigs')
self.unmapped_table.setColumnCount(len(self.running.unmapped.columns))
self.unmapped_table.setRowCount(len(self.running.unmapped.index))
for i in range(len(self.running.unmapped.index)):
for j in range(len(self.running.unmapped.columns)):
self.unmapped_table.setItem(i,j,QTableWidgetItem(str(self.running.unmapped.iat[i, j])))
self.unmapped_table.setHorizontalHeaderLabels(['index','contig','length(bp)','numSites'])
self.unmapped_table.setMinimumSize(450,560)
self.unmapped_table.show()
except:
pass
def show_mapped(self):
try:
self.mapped_table.setWindowTitle('Information for mapped contigs')
self.mapped_table.setColumnCount(len(self.running.mapped.columns))
self.mapped_table.setRowCount(len(self.running.mapped.index))
for i in range(len(self.running.mapped.index)):
for j in range(len(self.running.mapped.columns)):
self.mapped_table.setItem(i,j,QTableWidgetItem(str(self.running.mapped.iat[i, j])))
self.mapped_table.setHorizontalHeaderLabels(['index','contig','length(bp)','numSites'])
self.mapped_table.setMinimumSize(450,560)
self.mapped_table.show()
except:
pass
def show_kicked(self):
try:
self.filtered_table.setWindowTitle('Information for filtered contigs')
self.filtered_table.setColumnCount(len(self.running.kicked.columns))
self.filtered_table.setRowCount(len(self.running.kicked.index))
for i in range(len(self.running.kicked.index)):
for j in range(len(self.running.kicked.columns)):
self.filtered_table.setItem(i,j,QTableWidgetItem(str(self.running.kicked.iat[i, j])))
self.filtered_table.setHorizontalHeaderLabels(['index','contig','length(bp)','numSites'])
self.filtered_table.setMinimumSize(450,560)
self.filtered_table.show()
except:
pass
def show_no_data(self):
try:
self.no_data_table.setWindowTitle('Information for contigs with no BioNano data matched')
self.no_data_table.setColumnCount(len(self.running.no_data.columns))
self.no_data_table.setRowCount(len(self.running.no_data.index))
for i in range(len(self.running.no_data.index)):
for j in range(len(self.running.no_data.columns)):
self.no_data_table.setItem(i,j,QTableWidgetItem(str(self.running.no_data.iat[i, j])))
self.no_data_table.setHorizontalHeaderLabels(['index','contig','length(bp)','numSites'])
self.no_data_table.setMinimumSize(450,560)
self.no_data_table.show()
except:
pass
def show_missing(self):
try:
self.missing_table.setWindowTitle('Information for ref restriction site missing mapping')
self.missing_table.setColumnCount(len(self.running.missing.columns))
self.missing_table.setRowCount(len(self.running.missing.index))
for i in range(len(self.running.missing.index)):
for j in range(len(self.running.missing.columns)):
self.missing_table.setItem(i,j,QTableWidgetItem(str(self.running.missing.iat[i, j])))
self.missing_table.setHorizontalHeaderLabels(['index','contig','siteID','position','numSites'])
self.missing_table.setMinimumSize(500,560)
self.missing_table.show()
except:
pass
def show_good(self):
try:
self.good_table.setWindowTitle('Information for well matched ref restriction site')
self.good_table.setColumnCount(len(self.running.good.columns))
self.good_table.setRowCount(len(self.running.good.index))
for i in range(len(self.running.good.index)):
for j in range(len(self.running.good.columns)):
self.good_table.setItem(i,j,QTableWidgetItem(str(self.running.good.iat[i,j])))
self.good_table.setHorizontalHeaderLabels(['index','contig','siteID','position','numSites'])
self.good_table.setMinimumSize(500,560)
self.good_table.show()
except:
pass
def show_site_p(self):
try:
self.site_p_table.setWindowTitle('Information for ref restriction site having number of site matching problem')
self.site_p_table.setColumnCount(len(self.running.q_rs_a.columns))
self.site_p_table.setRowCount(len(self.running.q_rs_a.index))
for i in range(len(self.running.q_rs_a.index)):
for j in range(len(self.running.q_rs_a.columns)):
self.site_p_table.setItem(i,j,QTableWidgetItem(str(self.running.q_rs_a.iat[i,j])))
self.site_p_table.setHorizontalHeaderLabels(['index','contig','siteID','position','numSites'])
self.site_p_table.setMinimumSize(500,560)
self.site_p_table.show()
except:
pass
def show_pos_p(self):
try:
self.pos_p_table.setWindowTitle('Information for ref restriction site having postion matching problem')
self.pos_p_table.setColumnCount(len(self.running.q_dis_a.columns))
self.pos_p_table.setRowCount(len(self.running.q_dis_a.index))
for i in range(len(self.running.q_dis_a.index)):
for j in range(len(self.running.q_dis_a.columns)):
self.pos_p_table.setItem(i,j,QTableWidgetItem(str(self.running.q_dis_a.iat[i,j])))
self.pos_p_table.setHorizontalHeaderLabels(['index','contig','siteID','position','numSites'])
self.pos_p_table.setMinimumSize(500,560)
self.pos_p_table.show()
except:
pass
def show_both(self):
try:
self.both_table.setWindowTitle('Information for ref restriction site having both problems')
self.both_table.setColumnCount(len(self.running.both.columns))
self.both_table.setRowCount(len(self.running.both.index))
for i in range(len(self.running.both.index)):
for j in range(len(self.running.both.columns)):
self.both_table.setItem(i,j,QTableWidgetItem(str(self.running.both.iat[i,j])))
self.both_table.setHorizontalHeaderLabels(['index','contig','siteID','position','numSites'])
self.both_table.setMinimumSize(500,560)
self.both_table.show()
except:
pass
def stats(self):
unqualified = None
unqualified_len = None
unmapped = None
unmapped_len = None
no_mapping = None
no_mapping_len = None
filtered =None
filtered_len =None
ref_ctgs=len(self.running.ref_id)
ref_len = sum([i for i in self.running.ref_inf.values()])/1.0e6
ref_N = self.running.N/1.0e6
mapped = len(self.running.mapped.index)
mapped_len = sum([int(i) for i in self.running.mapped['length(bp)']])
BN = len(self.running.BN.index)
BN_len = sum([int(i) for i in self.running.BN['ContigLength']])/1.0e6
qualified = len(self.running.qualified.index)
qualified_len = sum([int(i) for i in self.running.qualified['length(bp)']])/1.0e6
try:
unqualified = len(self.running.unqualified.index)
unqualified_len = sum([int(i) for i in self.running.unqualified['length(bp)']])
except:
pass
try:
unmapped = len(self.running.unmapped.index)
unmapped_len = sum([int(i) for i in self.running.unmapped['length(bp)']])/1.0e6
except:
pass
try:
no_mapping = len(self.running.no_data.index)
no_mapping_len = sum([int(i) for i in self.running.no_data['length(bp)']])
except:
pass
try:
filtered = len(self.running.kicked.index)
filtered_len = sum([int(i) for i in self.running.kicked['length(bp)']])
except:
pass
self.ui.textBrowser.append('\n')
self.ui.textBrowser.append('When confidence score = %s, the statistics are listed as below:\n'%self.cs)
self.ui.textBrowser.append('Subject\tNumContig\tTotalLength(Mb)\tLen2Ref (%)\t\tNum2Ref (%)')
self.ui.textBrowser.append('Reference\t%s\t%.2f\t\t100.00\t\t100.00'%(ref_ctgs,ref_len))
self.ui.textBrowser.append('Qualified\t%s\t%.2f\t\t%.2f\t\t%.2f'%(qualified,qualified_len,qualified_len/ref_len*100,float(qualified)/ref_ctgs*100))
try:
self.ui.textBrowser.append('Unqualified\t%s\t%.2f\t\t%.2f\t\t%.2f'%(unqualified,unqualified_len/1.0e6,unqualified_len/1.0e4/ref_len,float(unqualified)/ref_ctgs*100))
except:
pass
try:
self.ui.textBrowser.append('Unmapped\t%s\t%.2f\t\t%.2f\t\t%.2f'%(unmapped,unmapped_len,unmapped_len/ref_len*100,float(unmapped)/ref_ctgs*100))
except:
pass
try:
self.ui.textBrowser.append('No mapping\t%s\t%.2f\t\t%.2f\t\t%.2f'%(no_mapping,no_mapping_len/1.0e6,no_mapping_len/1.0e4/ref_len,float(no_mapping)/ref_ctgs*100))
except:
pass
try:
self.ui.textBrowser.append('Filtered\t%s\t%.2f\t\t%.2f\t\t%.2f'%(filtered,filtered_len/1.0e6,filtered_len/1.0e4/ref_len,float(filtered)/ref_ctgs*100))
except:
pass
self.ui.textBrowser.append('Mapped\t%s\t%.2f\t\t%.2f\t\t%.2f'%(mapped,mapped_len/1.0e6,mapped_len/1.0e4/ref_len,float(mapped)/ref_ctgs*100))
self.ui.textBrowser.append('BioNano\t%s\t%.2f\t\tNA\t\tNA\n'%(BN,BN_len))
self.ui.textBrowser.append('Note: The total length of Ns in the reference is %s Mb.\n'%ref_N)
self.unqualified_len = unqualified_len
self.mapped_len = mapped_len
self.no_mapping_len = no_mapping_len
self.filtered_len = filtered_len
def run_assembler(self):
pgm = 'python'
script = self.settings['script_path']+'/pipelineCL.py'
thread = '-T %s'% self.settings['threads']
jobs = '-j %s'% self.settings['jobs']
iter = '-i %s'% self.settings['iteration']
tools = '-t %s'% self.settings['tools_path']
if self.settings['gs']>=500:
xml = '-a ' + self.settings['script_path']+'/optArguments_human.xml'
elif self.settings['gs']<500 and self.settings['gs']>100:
xml = '-a '+ self.settings['script_path']+'/optArguments_medium.xml'
else:
xml = '-a '+ self.settings['script_path']+'/optArguments_small.xml'
name = self.ref.rsplit('/',1)[-1].rsplit('.',1)[0]
self.raw_output = '-l '+self.settings['output_path']+'/'+name
bnx = self.bnx.replace(' ','\ ')
bnx = '-b '+bnx
cmd = '%s %s -w -d -U %s %s %s %s %s %s %s'%(pgm, script, thread, jobs, iter, tools, xml, self.raw_output, bnx)
os.system(cmd)
def run_refAligner(self):
name = self.ref.rsplit('/',1)[-1].rsplit('.',1)[0]
self.enzyme()
make_RefCmap(self.ref, enz=str(self.enz), min_len=20, min_nsite=5, path=self.settings['output_path'])
rcmap = self.settings['output_path']+'/'+name+'_'+self.enz+'.cmap'
pgm = self.settings['tools_path']+'/RefAligner'
ref = '-ref %s'% rcmap
qcmap = '-i '+self.settings['output_path']+'/'+name+'/contigs/exp_refineFinal1/EXP_REFINEFINAL1.cmap'
outprefix = '-o %s'% (self.settings['output_path']+'/'+name)
cmd = '%s -f %s %s %s -maxthreads 32 -res 2.9 -FP 0.6 -FN 0.06 -sf 0.20 -sd 0.0 -sr 0.01 -extend 1 -outlier 0.0001 -endoutlier 0.001 -PVendoutlier -deltaX 12 -deltaY 12 -xmapchim 12 -hashgen 5 7 2.4 1.5 0.05 5.0 1 1 1 -hash -hashdelta 50 -mres 1e-3 -hashMultiMatch 100 -insertThreads 4 -nosplit 2 -biaswt 0 -T 1e-12 -S -1000 -indel -PVres 2 -rres 0.9 -MaxSE 0.5 -HSDrange 1.0 -outlierBC -xmapUnique 12 -AlignRes 2. -outlierExtend 12 24 -Kmax 12 -f -maxmem 128 -BestRef 1 -stdout -stderr' % (pgm, ref, qcmap, outprefix)
os.system(cmd)
self.xmap = outprefix+'.xmap'
self.rcmap = outprefix+'_r.cmap'
self.qcmap = outprefix+'_q.cmap'
def create_dummy_line(**kwds):
return Line2D([], [], **kwds)
class Settings(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.ui = Ui_Settings()
self.ui.setupUi(self)
self.tools_path = None
self.scripts_path = None
self.gs = None
self.output_path = None
self.threads = 4
self.jobs = 2
self. iteration = 5
self.ui.tools_location_selecet_bn.clicked.connect(self.select_tools_path)
self.ui.tools_location_clear_bn.clicked.connect(self.clear_tools_path)
self.ui.scripts_location_selecet_bn.clicked.connect(self.select_scripts_path)
self.ui.scripts_location_clear_bn.clicked.connect(self.clear_scripts_path)
self.ui.threads_spinBox.setValue(self.threads)
self.ui.threads_spinBox.setMinimum(1)
self.ui.jobs_spinBox.setValue(self.jobs)
self.ui.jobs_spinBox.setMinimum(1)
self.ui.iteration_spinBox.setValue(self. iteration)
self.ui.iteration_spinBox.setMinimum(1)
self.ui.output_select_bn.clicked.connect(self.select_output_path)
self.ui.output_clear_bn.clicked.connect(self.clear_output_path)
self.ui.setting_confirm_frame.accepted.connect(self.confirm)
self.ui.setting_confirm_frame.rejected.connect(self.cancel)
def select_tools_path(self):
self.tools_path = unicode(QFileDialog.getExistingDirectory())
self.ui.tools_location_input.setText(self.tools_path)
def clear_tools_path(self):
self.ui.tools_location_input.clear()
self.tools_path = None
def select_scripts_path(self):
self.scripts_path = unicode(QFileDialog.getExistingDirectory())
self.ui.scripts_location_input.setText(self.scripts_path)
def clear_scripts_path(self):
self.ui.scripts_location_input.clear()
self.scripts_path = None
def select_output_path(self):
self.output_path = str (QFileDialog.getExistingDirectory())
self.ui.output_input.setText(self.output_path)
def clear_output_path(self):
self.ui.output_input.clear()
self.output_path = None
def confirm_tool_path(self):
try:
if sys.platform == 'win32':
assembler = (self.tools_path + '\WindowsAssembler.exe').replace('\\','/')
refaligner = (self.tools_path + '\WindowsRefAligner.exe').replace('\\','/')
if not (os.path.exists(assembler) and os.path.exists(refaligner)):
return QtGui.QMessageBox.question(self, 'Error !', 'Please check the tool path or tools inside !',
QtGui.QMessageBox.Ok)
else:
if os.access(assembler, os.X_OK) == False:
return QtGui.QMessageBox.question(self, 'Warning!', 'WindowsAssembler.exe is not executable, please check !',
QtGui.QMessageBox.Ok)
if os.access(refaligner, os.X_OK) == False:
return QtGui.QMessageBox.question(self, 'Warning!', 'WindowsRefAligner.exe is not executable, please check !',
QtGui.QMessageBox.Ok)
else:
assembler = self.tools_path + '/Assembler'
refaligner = self.tools_path + '/RefAligner'
if not (os.path.exists(assembler) and os.path.exists(refaligner)):
return QtGui.QMessageBox.question(self, 'Error !', 'Please check the tool path or tools inside !',
QtGui.QMessageBox.Ok)
else:
if os.access(assembler, os.X_OK) == False:
return QtGui.QMessageBox.question(self, 'Warning!', 'Assembler is not executable, please check !',
QtGui.QMessageBox.Ok)
if os.access(refaligner, os.X_OK) == False:
return QtGui.QMessageBox.question(self, 'Warning!', 'RefAligner is not executable, please check !',
QtGui.QMessageBox.Ok)
except:
return QtGui.QMessageBox.question(self, 'Error !', 'Please check the tool path or tools inside !', QtGui.QMessageBox.Ok)
def confirm_scripts_path(self):
try:
script1=self.scripts_path+'/pipelineCL.py'
script2=self.scripts_path+'/Pipeline.py'
if not (os.path.exists(script1) and os.path.exists(script2)):
return QtGui.QMessageBox.question(self, 'Error !', 'Please check the scripts path or scripts inside !',
QtGui.QMessageBox.Ok)
except:
return QtGui.QMessageBox.question(self, 'Error !', 'Please check the scripts path or scripts inside !',
QtGui.QMessageBox.Ok)
def confirm_output_path(self):
try:
len(self.output_path)
except:
return QtGui.QMessageBox.question(self, 'Error !', 'Please select a output path !', QtGui.QMessageBox.Ok)
def confirm_gs(self):
self.gs= self.ui.gs_input.text()
try:
self.gs = float(self.gs)
if self.gs<=0:
return QtGui.QMessageBox.question(self, 'Error !', 'Please input a genome size bigger than 0 !', QtGui.QMessageBox.Ok)
except ValueError:
return QtGui.QMessageBox.question(self, 'Error !', 'Please check your input genome size !', QtGui.QMessageBox.Ok)
def confirm(self):
if QtGui.QMessageBox.Ok not in [self.confirm_tool_path(),self.confirm_scripts_path(),self.confirm_gs(),self.confirm_output_path()]:
self.parameters = dict()
self.parameters['tools_path'] = self.tools_path
self.parameters['script_path'] = self.scripts_path
self.parameters['threads'] = self.ui.threads_spinBox.value()
self.parameters['jobs'] = self.ui.jobs_spinBox.value()
self.parameters['iteration'] = self.ui.iteration_spinBox.value()
self.parameters['gs'] = self.gs
self.parameters['output_path'] = self.output_path
self.close()
def cancel(self):
self.close()
class about(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.ui = Ui_About()
self.ui.setupUi(self)
self.ui.about_bn.clicked.connect(self.confirm)
def confirm(self):
self.close()
class manual(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.ui = Ui_Manual()
self.ui.setupUi(self)
self.ui.manual_bn.clicked.connect(self.confirm)
def confirm(self):
self.close()
class ZoomPan:
def __init__(self):
self.press = None
self.cur_xlim = None
self.cur_ylim = None
self.x0 = None
self.y0 = None
self.x1 = None
self.y1 = None
self.xpress = None
self.ypress = None
self.xzoom = True
self.yzoom = True
self.cidBP = None
self.cidBR = None
self.cidBM = None
self.cidKeyP = None
self.cidKeyR = None
self.cidScroll = None
def zoom_factory(self, ax, base_scale = 2.):
def zoom(event):
cur_xlim = ax.get_xlim()
cur_ylim = ax.get_ylim()
xdata = event.xdata # get event x location
ydata = event.ydata # get event y location
if(xdata is None):
return()
if(ydata is None):
return()
if event.button == 'down':
# deal with zoom in
scale_factor = 1 / base_scale
elif event.button == 'up':
# deal with zoom out
scale_factor = base_scale
else:
# deal with something that should never happen
scale_factor = 1
print(event.button)
new_width = (cur_xlim[1] - cur_xlim[0]) * scale_factor
new_height = (cur_ylim[1] - cur_ylim[0]) * scale_factor
relx = (cur_xlim[1] - xdata)/(cur_xlim[1] - cur_xlim[0])
rely = (cur_ylim[1] - ydata)/(cur_ylim[1] - cur_ylim[0])
if(self.xzoom):
ax.set_xlim([xdata - new_width * (1-relx), xdata + new_width * (relx)])
if(self.yzoom):
ax.set_ylim([ydata - new_height * (1-rely), ydata + new_height * (rely)])
ax.figure.canvas.draw()
ax.figure.canvas.flush_events()
def onKeyPress(event):
if event.key == 'x':
self.xzoom = True
self.yzoom = False
if event.key == 'y':
self.xzoom = False
self.yzoom = True
def onKeyRelease(event):
self.xzoom = True
self.yzoom = True
fig = ax.get_figure() # get the figure of interest
self.cidScroll = fig.canvas.mpl_connect('scroll_event', zoom)
self.cidKeyP = fig.canvas.mpl_connect('key_press_event',onKeyPress)
self.cidKeyR = fig.canvas.mpl_connect('key_release_event',onKeyRelease)
return zoom
def pan_factory(self, ax):
def onPress(event):
if event.inaxes != ax: return
self.cur_xlim = ax.get_xlim()
self.cur_ylim = ax.get_ylim()
self.press = self.x0, self.y0, event.xdata, event.ydata
self.x0, self.y0, self.xpress, self.ypress = self.press
def onRelease(event):
self.press = None
ax.figure.canvas.draw()
def onMotion(event):
if self.press is None: return
if event.inaxes != ax: return
dx = event.xdata - self.xpress
dy = event.ydata - self.ypress
self.cur_xlim -= dx
self.cur_ylim -= dy
ax.set_xlim(self.cur_xlim)
ax.set_ylim(self.cur_ylim)
ax.figure.canvas.draw()
ax.figure.canvas.flush_events()
fig = ax.get_figure() # get the figure of interest
self.cidBP = fig.canvas.mpl_connect('button_press_event',onPress)
self.cidBR = fig.canvas.mpl_connect('button_release_event',onRelease)
self.cidBM = fig.canvas.mpl_connect('motion_notify_event',onMotion)
# attach the call back
#return the function
return onMotion
class DataCursor(object):
text_template = 'siteID: %d\nLocation: %s'
x, y = 0.0, 0.0
xoffset, yoffset = -20, 20
text_template = 'siteID: %d\nLocation: %s'
def __init__(self, ax, pos):
self.ax = ax
self.pos= pos
self.annotation = ax.annotate(self.text_template,
xy=(self.x, self.y), xytext=(self.xoffset, self.yoffset),
textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')
)
self.annotation.set_visible(False)
def __call__(self, event):
self.event = event
self.x, self.y = event.mouseevent.xdata, event.mouseevent.ydata
try:
if self.x >= self.pos.keys()[0] and self.x<=self.pos.keys()[-1]:
self.annotation.xy = self.x, self.y
hz=self.x-int(round(self.x))
sz=self.y-self.pos[int(round(self.x))][1]
if int(round(self.x)) in self.pos.keys() and (hz)**2<0.02 and (sz)**2<0.02:
self.annotation.set_text(self.text_template % (int(round(self.x-0.4)), self.pos[int(round(self.x-0.4))][0]))
self.annotation.set_visible(True)
event.canvas.draw()
else:
self.annotation.set_visible(False)
except:
pass
if __name__=="__main__":
multiprocessing.freeze_support()
app = QtGui.QApplication(sys.argv)
window = Main()
window.show()
sys.exit(app.exec_())
| gpl-3.0 |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py | 82 | 6157 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform.parameter
def data(self):
return self._data
@transform.parameter
def num_threads(self):
return self._num_threads
@transform.parameter
def enqueue_size(self):
return self._enqueue_size
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@transform.parameter
def shuffle(self):
return self._shuffle
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
@transform.parameter
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + list(self._data.keys()))
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
| apache-2.0 |
richardwolny/sms-tools | lectures/06-Harmonic-model/plots-code/sines-partials-harmonics-phase.py | 22 | 1986 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/sine-440-490.wav')
w = np.hamming(3529)
N = 32768
hN = N/2
t = -20
pin = 4850
x1 = x[pin:pin+w.size]
mX1, pX1 = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX1, t)
pmag = mX1[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX1, pX1, ploc)
plt.figure(1, figsize=(9, 6))
plt.subplot(311)
plt.plot(fs*np.arange(pX1.size)/float(N), pX1, 'c', lw=1.5)
plt.plot(fs * iploc / N, ipphase, marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([200, 1000, -2, 8])
plt.title('pX + peaks (sine-440-490.wav)')
(fs, x) = UF.wavread('../../../sounds/vibraphone-C6.wav')
w = np.blackman(401)
N = 1024
hN = N/2
t = -80
pin = 200
x2 = x[pin:pin+w.size]
mX2, pX2 = DFT.dftAnal(x2, w, N)
ploc = UF.peakDetection(mX2, t)
pmag = mX2[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX2, pX2, ploc)
plt.subplot(3,1,2)
plt.plot(fs*np.arange(pX2.size)/float(N), pX2, 'c', lw=1.5)
plt.plot(fs * iploc/N, ipphase, marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([500,10000,min(pX2), 25])
plt.title('pX + peaks (vibraphone-C6.wav)')
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
w = np.blackman(651)
N = 2048
hN = N/2
t = -80
pin = 10000
x3 = x[pin:pin+w.size]
mX3, pX3 = DFT.dftAnal(x3, w, N)
ploc = UF.peakDetection(mX3, t)
pmag = mX3[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX3, pX3, ploc)
plt.subplot(3,1,3)
plt.plot(fs*np.arange(pX3.size)/float(N), pX3, 'c', lw=1.5)
plt.plot(fs * iploc / N, ipphase, marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([0,6000,2, 24])
plt.title('pX + peaks (oboe-A4.wav)')
plt.tight_layout()
plt.savefig('sines-partials-harmonics-phase.png')
plt.show()
| agpl-3.0 |
cancan101/tensorflow | tensorflow/contrib/learn/python/learn/grid_search_test.py | 18 | 2259 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grid search tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.contrib.learn.python import learn
from tensorflow.python.platform import test
HAS_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if HAS_SKLEARN:
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
except ImportError:
HAS_SKLEARN = False
class GridSearchTest(test.TestCase):
"""Grid search tests."""
def testIrisDNN(self):
if HAS_SKLEARN:
random.seed(42)
iris = datasets.load_iris()
feature_columns = learn.infer_real_valued_columns_from_input(iris.data)
classifier = learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3)
grid_search = GridSearchCV(
classifier, {'hidden_units': [[5, 5], [10, 10]]},
scoring='accuracy',
fit_params={'steps': [50]})
grid_search.fit(iris.data, iris.target)
score = accuracy_score(iris.target, grid_search.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
if __name__ == '__main__':
test.main()
| apache-2.0 |
ChanChiChoi/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 221 | 2702 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
datapythonista/pandas | pandas/tests/groupby/test_bin_groupby.py | 2 | 3707 | import numpy as np
import pytest
from pandas._libs import (
lib,
reduction as libreduction,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import Series
import pandas._testing as tm
def test_series_grouper():
obj = Series(np.random.randn(10))
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.intp)
grouper = libreduction.SeriesGrouper(obj, np.mean, labels, 2)
result, counts = grouper.get_result()
expected = np.array([obj[3:6].mean(), obj[6:].mean()], dtype=object)
tm.assert_almost_equal(result, expected)
exp_counts = np.array([3, 4], dtype=np.int64)
tm.assert_almost_equal(counts, exp_counts)
def test_series_grouper_result_length_difference():
# GH 40014
obj = Series(np.random.randn(10), dtype="float64")
obj.index = obj.index.astype("O")
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.intp)
grouper = libreduction.SeriesGrouper(obj, lambda x: all(x > 0), labels, 2)
result, counts = grouper.get_result()
expected = np.array([all(obj[3:6] > 0), all(obj[6:] > 0)], dtype=object)
tm.assert_equal(result, expected)
exp_counts = np.array([3, 4], dtype=np.int64)
tm.assert_equal(counts, exp_counts)
def test_series_grouper_requires_nonempty_raises():
# GH#29500
obj = Series(np.random.randn(10))
dummy = obj.iloc[:0]
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.intp)
with pytest.raises(ValueError, match="SeriesGrouper requires non-empty `series`"):
libreduction.SeriesGrouper(dummy, np.mean, labels, 2)
def test_series_bin_grouper():
obj = Series(np.random.randn(10))
bins = np.array([3, 6], dtype=np.int64)
grouper = libreduction.SeriesBinGrouper(obj, np.mean, bins)
result, counts = grouper.get_result()
expected = np.array([obj[:3].mean(), obj[3:6].mean(), obj[6:].mean()], dtype=object)
tm.assert_almost_equal(result, expected)
exp_counts = np.array([3, 3, 4], dtype=np.int64)
tm.assert_almost_equal(counts, exp_counts)
def assert_block_lengths(x):
assert len(x) == len(x._mgr.blocks[0].mgr_locs)
return 0
def cumsum_max(x):
x.cumsum().max()
return 0
@pytest.mark.parametrize(
"func",
[
cumsum_max,
pytest.param(assert_block_lengths, marks=td.skip_array_manager_invalid_test),
],
)
def test_mgr_locs_updated(func):
# https://github.com/pandas-dev/pandas/issues/31802
# Some operations may require creating new blocks, which requires
# valid mgr_locs
df = pd.DataFrame({"A": ["a", "a", "a"], "B": ["a", "b", "b"], "C": [1, 1, 1]})
result = df.groupby(["A", "B"]).agg(func)
expected = pd.DataFrame(
{"C": [0, 0]},
index=pd.MultiIndex.from_product([["a"], ["a", "b"]], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"binner,closed,expected",
[
(
np.array([0, 3, 6, 9], dtype=np.int64),
"left",
np.array([2, 5, 6], dtype=np.int64),
),
(
np.array([0, 3, 6, 9], dtype=np.int64),
"right",
np.array([3, 6, 6], dtype=np.int64),
),
(np.array([0, 3, 6], dtype=np.int64), "left", np.array([2, 5], dtype=np.int64)),
(
np.array([0, 3, 6], dtype=np.int64),
"right",
np.array([3, 6], dtype=np.int64),
),
],
)
def test_generate_bins(binner, closed, expected):
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
result = lib.generate_bins_dt64(values, binner, closed=closed)
tm.assert_numpy_array_equal(result, expected)
class TestMoments:
pass
| bsd-3-clause |
skuschel/postpic | examples/simpleexample.py | 2 | 6385 | #!/usr/bin/env python
#
# This file is part of postpic.
#
# postpic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# postpic is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with postpic. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright Stephan Kuschel 2015
#
def main():
import numpy as np
import postpic as pp
# postpic will use matplotlib for plotting. Changing matplotlibs backend
# to "Agg" makes it possible to save plots without a display attached.
# This is necessary to run this example within the "run-tests" script
# on travis-ci.
import matplotlib; matplotlib.use('Agg')
# choose the dummy reader. This reader will create fake data for testing.
pp.chooseCode('dummy')
dr = pp.readDump(3e5) # Dummyreader takes a float as argument, not a string.
# set and create directory for pictures.
savedir = '_examplepictures/'
import os
if not os.path.exists(savedir):
os.mkdir(savedir)
# initialze the plotter object.
# project name will be prepended to all output names
plotter = pp.plotting.plottercls(dr, outdir=savedir, autosave=True, project='simpleexample')
# we will need a refrence to the MultiSpecies quite often
from postpic.particles import MultiSpecies
# create MultiSpecies Object for every particle species that exists.
pas = [MultiSpecies(dr, s) for s in dr.listSpecies()]
if True:
# Plot Data from the FieldAnalyzer fa. This is very simple: every line creates one plot
plotter.plotField(dr.Ex()) # plot 0
plotter.plotField(dr.Ey()) # plot 1
plotter.plotField(dr.Ez()) # plot 2
plotter.plotField(dr.energydensityEM()) # plot 3
# Using the MultiSpecies requires an additional step:
# 1) The MultiSpecies.createField method will be used to create a Field object
# with choosen particle scalars on every axis
# 2) Plot the Field object
optargsh={'bins': [300,300]}
for pa in pas:
# create a Field object nd holding the number density
nd = pa.createField('x', 'y', simextent=True, **optargsh)
# plot the Field object nd
plotter.plotField(nd, name='NumberDensity') # plot 4
# if you like to keep working with the just created number density
# yourself, it will convert to an numpy array whenever needed:
arr = np.asarray(nd)
print('Shape of number density: {}'.format(arr.shape))
# more advanced: create a field holding the total kinetic energy on grid
ekin = pa.createField('x', 'y', weights='Ekin_MeV', simextent=True, **optargsh)
# The Field objectes can be used for calculations. Here we use this to
# calculate the average kinetic energy on grid and plot
plotter.plotField(ekin / nd, name='Avg Kin Energy (MeV)') # plot 5
# use optargsh to force lower resolution
# plot number density
plotter.plotField(pa.createField('x', 'y', **optargsh), lineoutx=True, lineouty=True) # plot 6
# plot phase space
plotter.plotField(pa.createField('x', 'p', **optargsh)) # plot 7
plotter.plotField(pa.createField('x', 'gamma', **optargsh)) # plot 8
plotter.plotField(pa.createField('x', 'beta', **optargsh)) # plot 9
# same with high resolution
plotter.plotField(pa.createField('x', 'y', bins=[1000,1000])) # plot 10
plotter.plotField(pa.createField('x', 'p', bins=[1000,1000])) # plot 11
# advanced: postpic has already defined a lot of particle scalars as Px, Py, Pz, P, X, Y, Z, gamma, beta, Ekin, Ekin_MeV, Ekin_MeV_amu, ... but if needed you can also define your own particle scalar on the fly.
# In case its regularly used it should be added to postpic. If you dont know how, just let us know about your own useful particle scalar by email or adding an issue at
# https://github.com/skuschel/postpic/issues
# define your own particle scalar: p_r = sqrt(px**2 + py**2)/p
plotter.plotField(pa.createField('sqrt(px**2 + py**2)/p', 'sqrt(x**2 + y**2)', bins=[400,400])) # plot 12
# however, since its unknown to the program, what quantities were calculated the axis of plot 12 will only say "unknown"
# this can be avoided in two ways:
# 1st: define your own ScalarProperty(name, expr, unit):
p_perp = pp.particles.ScalarProperty('sqrt(px**2 + py**2)/p', name='p_perp', unit='kg*m/s')
r_xy = pp.particles.ScalarProperty('sqrt(x**2 + y**2)', name='r_xy', unit='m')
# this will create an identical plot, but correcly labled
plotter.plotField(pa.createField(p_perp, r_xy, bins=[400,400])) # plot 13
# if those quantities are reused often, teach postip to recognize them within the string expression:
pp.particles.particle_scalars.add(p_perp)
#pp.particles.scalars.add(r_xy) # we cannot execute this line, because r_xy is already predefinded
plotter.plotField(pa.createField('p_perp', 'r_xy', bins=[400,400])) # plot 14
# choose particles by their properies
# this has been the old interface, which would still work
# def cf(ms):
# return ms('x') > 0.0 # only use particles with x > 0.0
# cf.name = 'x>0.0'
# pa.compress(cf)
# nicer is the new filter function, which does exactly the same:
pf = pa.filter('x>0')
# plot 15, compare with plot 10
plotter.plotField(pf.createField('x', 'y', bins=[1000,1000]))
# plot 16, compare with plot 12
plotter.plotField(pf.createField('p_perp', 'r_xy', bins=[400,400]))
plotter.plotField(dr.divE()) # plot 13
if __name__=='__main__':
main()
| gpl-3.0 |
Solid-Mechanics/matplotlib-4-abaqus | matplotlib/backends/backend_cocoaagg.py | 4 | 9838 | from __future__ import division, print_function
"""
backend_cocoaagg.py
A native Cocoa backend via PyObjC in OSX.
Author: Charles Moad ([email protected])
Notes:
- Requires PyObjC (currently testing v1.3.7)
- The Tk backend works nicely on OSX. This code
primarily serves as an example of embedding a
matplotlib rendering context into a cocoa app
using a NSImageView.
"""
import os, sys
try:
import objc
except ImportError:
raise ImportError('The CococaAgg backend required PyObjC to be installed!')
from Foundation import *
from AppKit import *
from PyObjCTools import NibClassBuilder, AppHelper
from matplotlib import cbook
cbook.warn_deprecated(
'1.3',
message="The CocoaAgg backend is not a fully-functioning backend. "
"It may be removed in matplotlib 1.4.")
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backend_bases import FigureManagerBase, FigureCanvasBase
from matplotlib.backend_bases import ShowBase
from backend_agg import FigureCanvasAgg
from matplotlib._pylab_helpers import Gcf
mplBundle = NSBundle.bundleWithPath_(os.path.dirname(__file__))
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasCocoaAgg(figure)
return FigureManagerCocoaAgg(canvas, num)
## Below is the original show() function:
#def show():
# for manager in Gcf.get_all_fig_managers():
# manager.show()
#
## It appears that this backend is unusual in having a separate
## run function invoked for each figure, instead of a single
## mainloop. Presumably there is no blocking at all.
##
## Using the Show class below should cause no difference in
## behavior.
class Show(ShowBase):
def mainloop(self):
pass
show = Show()
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
class FigureCanvasCocoaAgg(FigureCanvasAgg):
def draw(self):
FigureCanvasAgg.draw(self)
def blit(self, bbox):
pass
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
NibClassBuilder.extractClasses('Matplotlib.nib', mplBundle)
class MatplotlibController(NibClassBuilder.AutoBaseClass):
# available outlets:
# NSWindow plotWindow
# PlotView plotView
def awakeFromNib(self):
# Get a reference to the active canvas
NSApp().setDelegate_(self)
self.app = NSApp()
self.canvas = Gcf.get_active().canvas
self.plotView.canvas = self.canvas
self.canvas.plotView = self.plotView
self.plotWindow.setAcceptsMouseMovedEvents_(True)
self.plotWindow.makeKeyAndOrderFront_(self)
self.plotWindow.setDelegate_(self)#.plotView)
self.plotView.setImageFrameStyle_(NSImageFrameGroove)
self.plotView.image_ = NSImage.alloc().initWithSize_((0,0))
self.plotView.setImage_(self.plotView.image_)
# Make imageview first responder for key events
self.plotWindow.makeFirstResponder_(self.plotView)
# Force the first update
self.plotView.windowDidResize_(self)
def windowDidResize_(self, sender):
self.plotView.windowDidResize_(sender)
def windowShouldClose_(self, sender):
#NSApplication.sharedApplication().stop_(self)
self.app.stop_(self)
return objc.YES
def saveFigure_(self, sender):
p = NSSavePanel.savePanel()
if(p.runModal() == NSFileHandlingPanelOKButton):
self.canvas.print_figure(p.filename())
def printFigure_(self, sender):
op = NSPrintOperation.printOperationWithView_(self.plotView)
op.runOperation()
class PlotWindow(NibClassBuilder.AutoBaseClass):
pass
class PlotView(NibClassBuilder.AutoBaseClass):
def updatePlot(self):
w,h = self.canvas.get_width_height()
# Remove all previous images
for i in xrange(self.image_.representations().count()):
self.image_.removeRepresentation_(self.image_.representations().objectAtIndex_(i))
self.image_.setSize_((w,h))
brep = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(
(self.canvas.buffer_rgba(),'','','',''), # Image data
w, # width
h, # height
8, # bits per pixel
4, # components per pixel
True, # has alpha?
False, # is planar?
NSCalibratedRGBColorSpace, # color space
w*4, # row bytes
32) # bits per pixel
self.image_.addRepresentation_(brep)
self.setNeedsDisplay_(True)
def windowDidResize_(self, sender):
w,h = self.bounds().size
dpi = self.canvas.figure.dpi
self.canvas.figure.set_size_inches(w / dpi, h / dpi)
self.canvas.draw()
self.updatePlot()
def mouseDown_(self, event):
dblclick = (event.clickCount() == 2)
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseDown):
button = 1
else:
print('Unknown mouse event type:', type, file=sys.stderr)
button = -1
self.canvas.button_press_event(loc.x, loc.y, button, dblclick=dblclick)
self.updatePlot()
def mouseDragged_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
self.canvas.motion_notify_event(loc.x, loc.y)
self.updatePlot()
def mouseUp_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseUp):
button = 1
else:
print('Unknown mouse event type:', type, file=sys.stderr)
button = -1
self.canvas.button_release_event(loc.x, loc.y, button)
self.updatePlot()
def keyDown_(self, event):
self.canvas.key_press_event(event.characters())
self.updatePlot()
def keyUp_(self, event):
self.canvas.key_release_event(event.characters())
self.updatePlot()
class MPLBootstrap(NSObject):
# Loads the nib containing the PlotWindow and PlotView
def startWithBundle_(self, bundle):
#NSApplicationLoad()
if not bundle.loadNibFile_externalNameTable_withZone_('Matplotlib.nib', {}, None):
print('Unable to load Matplotlib Cocoa UI!', file=sys.stderr)
sys.exit()
class FigureManagerCocoaAgg(FigureManagerBase):
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
try:
WMEnable('Matplotlib')
except:
# MULTIPLE FIGURES ARE BUGGY!
pass # If there are multiple figures we only need to enable once
#self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
# 'startWithBundle:',
# mplBundle,
# False)
def show(self):
# Load a new PlotWindow
self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
'startWithBundle:',
mplBundle,
False)
NSApplication.sharedApplication().run()
FigureManager = FigureManagerCocoaAgg
#### Everything below taken from PyObjC examples
#### This is a hack to allow python scripts to access
#### the window manager without running pythonw.
def S(*args):
return ''.join(args)
OSErr = objc._C_SHT
OUTPSN = 'o^{ProcessSerialNumber=LL}'
INPSN = 'n^{ProcessSerialNumber=LL}'
FUNCTIONS=[
# These two are public API
( u'GetCurrentProcess', S(OSErr, OUTPSN) ),
( u'SetFrontProcess', S(OSErr, INPSN) ),
# This is undocumented SPI
( u'CPSSetProcessName', S(OSErr, INPSN, objc._C_CHARPTR) ),
( u'CPSEnableForegroundOperation', S(OSErr, INPSN) ),
]
def WMEnable(name='Python'):
if isinstance(name, unicode):
name = name.encode('utf8')
mainBundle = NSBundle.mainBundle()
bPath = os.path.split(os.path.split(os.path.split(sys.executable)[0])[0])[0]
if mainBundle.bundlePath() == bPath:
return True
bndl = NSBundle.bundleWithPath_(objc.pathForFramework('/System/Library/Frameworks/ApplicationServices.framework'))
if bndl is None:
print('ApplicationServices missing', file=sys.stderr)
return False
d = {}
objc.loadBundleFunctions(bndl, d, FUNCTIONS)
for (fn, sig) in FUNCTIONS:
if fn not in d:
print('Missing', fn, file=sys.stderr)
return False
err, psn = d['GetCurrentProcess']()
if err:
print('GetCurrentProcess', (err, psn), file=sys.stderr)
return False
err = d['CPSSetProcessName'](psn, name)
if err:
print('CPSSetProcessName', (err, psn), file=sys.stderr)
return False
err = d['CPSEnableForegroundOperation'](psn)
if err:
#print >>sys.stderr, 'CPSEnableForegroundOperation', (err, psn)
return False
err = d['SetFrontProcess'](psn)
if err:
print('SetFrontProcess', (err, psn), file=sys.stderr)
return False
return True
| mit |
NumCosmo/NumCosmo | examples/example_evolaa_zero.py | 1 | 2254 | #!/usr/bin/env python
try:
import gi
gi.require_version('NumCosmo', '1.0')
gi.require_version('NumCosmoMath', '1.0')
except:
pass
import math
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from gi.repository import NumCosmo as Nc
from gi.repository import NumCosmoMath as Ncm
#
# Initializing the library objects, this must be called before
# any other library function.
#
Ncm.cfg_init ()
class PyHOAATest (Ncm.HOAA):
def __init__ (self):
Ncm.HOAA.__init__ (self, opt = Ncm.HOAAOpt.DLNMNU_ONLY)
def do_eval_nu (self, model, t, k):
return k
def do_eval_m (self, model, t, k):
return t * t
def do_eval_mnu (self, model, t, k):
return t * t * k
def do_eval_dlnmnu (self, model, t, k):
return 2.0 / t
def do_eval_system (self, model, t, k):
return k, 2.0 / t, 0.0
def do_nsing (self, model, k):
return 1
def do_get_sing_info (self, model, k, sing):
return 0.0, -1.0, +1.0, Ncm.HOAASingType.ZERO
def do_eval_sing_mnu (self, model, t, k, sing):
return t * t * k
def do_eval_sing_dlnmnu (self, model, t, k, sing):
return 2.0 / t
def do_eval_sing_system (self, model, t, k, sing):
return k, 2.0 / t, 0.0
def sol_q (k, t):
a = k * t
return math.sin (a) / a
def sol_p (k, t):
a = k * t
return t * (math.cos (a) - math.sin (a) / a)
hoaa = PyHOAATest ()
k = 1.0
hoaa.set_ti (-1.0e10)
hoaa.set_tf (+1.0e10)
hoaa.set_k (k)
hoaa.set_reltol (1.0e-14)
ti = - 10.0
S1 = sol_q (k, ti)
PS1 = sol_p (k, ti)
hoaa.prepare ()
(t0, t1) = hoaa.get_t0_t1 ()
(Aq, Av) = hoaa.eval_solution (None, ti, S1, PS1)
print ("# ", t0, t1)
print ("# ", Aq, Av)
ta = np.linspace (-5.0e-1, +5.0e-1, 1000000)
for t in ta:
(q, v, Pq, Pv) = hoaa.eval_QV (None, t)
(upsilon, gamma, qbar, pbar) = hoaa.eval_AA (None, t)
S = Aq * q + Av * v
PS = Aq * Pq + Av * Pv
mnu = hoaa.eval_mnu (None, t, k)
nu = hoaa.eval_nu (None, t, k)
lnmnu = math.log (mnu)
I = 0.5 * (mnu * q**2 + Pq**2 / mnu)
J = 0.5 * (mnu * v**2 + Pv**2 / mnu)
print (t, S, sol_q (k, t), PS, sol_p (k, t), I, J, math.sqrt (I * J), upsilon, gamma, qbar, pbar, (qbar**2 + pbar**2) / math.hypot (upsilon, 1.0 / math.cosh (lnmnu)) - 1.0, Aq * q / (Av * v) + 1.0)
| gpl-3.0 |
jrbourbeau/cr-composition | notebooks/legacy/baseline/nstations-vs-nchannels.py | 2 | 3439 | #!/usr/bin/env python
from __future__ import division
import numpy as np
import sys
import matplotlib.pyplot as plt
import argparse
import seaborn.apionly as sns
from icecube import ShowerLLH
from composition.analysis.load_sim import load_sim
from composition.support_functions.checkdir import checkdir
if __name__ == "__main__":
sns.set_palette('muted')
sns.set_color_codes()
p = argparse.ArgumentParser(
description='Creates performance plots for ShowerLLH')
p.add_argument('-o', '--outdir', dest='outdir',
default='/home/jbourbeau/public_html/figures/composition/baseline', help='Output directory')
p.add_argument('-e', '--energy', dest='energy',
default='MC',
choices=['MC', 'reco'],
help='Option for a variety of preset bin values')
p.add_argument('--extended', dest='extended',
default=False, action='store_true',
help='Use extended energy range')
args = p.parse_args()
checkdir(args.outdir + '/')
# Import ShowerLLH sim reconstructions and cuts to be made
df, cut_dict = load_sim(return_cut_dict=True)
selection_mask = np.array([True] * len(df))
standard_cut_keys = ['IT_containment', 'IceTopMaxSignalInEdge',
'IceTopMaxSignal', 'NChannels', 'InIce_containment']
for key in standard_cut_keys:
selection_mask *= cut_dict[key]
df = df[selection_mask]
nchannels = np.log10(df.NChannels)
nstations = np.log10(df.NStations)
# 2D charge vs nchannels histogram of proton fraction
nstations_bins = np.linspace(0, 2, 75)
nchannels_bins = np.linspace(0, 4, 75)
h, xedges, yedges = np.histogram2d(nchannels,
nstations,
bins=[nchannels_bins,
nstations_bins],
normed=False)
h = np.rot90(h)
h = np.flipud(h)
h = np.ma.masked_where(h == 0, h)
h = np.log10(h)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
# extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
def line_fit(array):
fit = []
for x in array:
if x <= 9.0:
slope = (5.3 - 2.55) / (9.5 - 6.2)
fit.append(2.55 + slope * (x - 6.2))
else:
slope = (5.20 - 4.9) / (9.5 - 9.0)
fit.append(4.9 + slope * (x - 9.0))
fit = np.array(fit)
return fit
fig, ax = plt.subplots()
# colormap = 'coolwarm'
colormap = 'viridis'
plt.imshow(h, extent=extent, origin='lower',
interpolation='none', cmap=colormap,
aspect=1)
# x = np.arange(6.2, 9.51, 0.1)
# plt.plot(x, line_fit(x), marker='None', linestyle='--',
# color='k')
if args.energy == 'MC':
plt.xlabel('$\log_{10}(\mathrm{NChannels})$')
if args.energy == 'reco':
plt.xlabel('$\log_{10}(E_{\mathrm{reco}}/\mathrm{GeV})$')
plt.ylabel('$\log_{10}(\mathrm{NStations})$')
# plt.xlim([6.2, 9.5])
cb = plt.colorbar(
label='$\log_{10}(\mathrm{Counts})$')
if args.energy == 'MC':
outfile = args.outdir + '/NStations-vs-NChannels.png'
if args.energy == 'reco':
outfile = args.outdir + '/charge-vs-reco-energy.png'
plt.savefig(outfile)
plt.close()
| mit |
CTSRD-SOAAP/chromium-42.0.2311.135 | native_client/pnacl/driver/pnacl-ld.py | 2 | 23928 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from driver_tools import ArchMerge, DriverChain, GetArch, \
ParseArgs, ParseTriple, RunDriver, RunWithEnv, SetArch, \
SetExecutableMode, TempNameGen, UnrecognizedOption
from driver_env import env
from driver_log import Log
import filetype
import ldtools
import pathtools
EXTRA_ENV = {
'ALLOW_NATIVE': '0', # Allow LD args which will change the behavior
# of native linking. This must be accompanied by
# -arch to produce a .nexe.
'USE_IRT': '1', # Use stable IRT interfaces.
'INPUTS' : '',
'OUTPUT' : '',
'STATIC' : '0',
'PIC' : '0',
'USE_STDLIB': '1',
'RELOCATABLE': '0',
'SONAME' : '',
'STRIP_MODE' : 'none',
'STRIP_FLAGS' : '${STRIP_FLAGS_%STRIP_MODE%}',
'STRIP_FLAGS_all' : '-s',
'STRIP_FLAGS_debug': '-S',
'OPT_INLINE_THRESHOLD': '100',
'OPT_LEVEL': '', # Default opt is 0, but we need to know if it's explicitly
# requested or not, since we don't want to propagate
# the value to TRANSLATE_FLAGS if it wasn't explicitly set.
'OPT_LTO_FLAGS': '-std-link-opts -disable-internalize',
'OPT_FLAGS': '${#OPT_LEVEL && !OPT_LEVEL == 0 ? ${OPT_LTO_FLAGS}} ' +
'-inline-threshold=${OPT_INLINE_THRESHOLD} ',
'TRANSLATE_FLAGS': '${PIC ? -fPIC} ${!USE_STDLIB ? -nostdlib} ' +
'${#SONAME ? -Wl,--soname=${SONAME}} ' +
'${#OPT_LEVEL ? -O${OPT_LEVEL}} ' +
'--allow-llvm-bitcode-input ' +
'${CXX_EH_MODE==zerocost ? --pnacl-allow-zerocost-eh} ' +
'${TRANSLATE_FLAGS_USER}',
# Extra pnacl-translate flags specified by the user using -Wt
'TRANSLATE_FLAGS_USER': '',
'GOLD_PLUGIN_ARGS': '-plugin=${GOLD_PLUGIN_SO} ' +
'-plugin-opt=emit-llvm',
'LD_FLAGS' : '-nostdlib ${@AddPrefix:-L:SEARCH_DIRS} ' +
'${STATIC ? -static} ' +
'${RELOCATABLE ? -relocatable} ' +
'${#SONAME ? --soname=${SONAME}}',
# Flags for native linking.
# Only allowed if ALLOW_NATIVE is true.
'LD_FLAGS_NATIVE': '',
'SEARCH_DIRS' : '${SEARCH_DIRS_USER} ${SEARCH_DIRS_BUILTIN}',
'SEARCH_DIRS_USER' : '',
# Standard Library Directories
'SEARCH_DIRS_BUILTIN': '${USE_STDLIB ? ' +
' ${BASE_USR}/usr/lib/ ' +
' ${BASE_USR}/lib/ ' +
' ${BASE_SDK}/lib/ ' +
' ${BASE_LIB}/ ' +
'}',
'BCLD_OFORMAT' : '${BCLD_OFORMAT_%ARCH%}',
'BCLD_OFORMAT_ARM' : 'elf32-littlearm-nacl',
'BCLD_OFORMAT_X8632' : 'elf32-i386-nacl',
'BCLD_OFORMAT_X8664' : 'elf64-x86-64-nacl',
'BCLD_OFORMAT_MIPS32' : 'elf32-tradlittlemips-nacl',
'BCLD_OFORMAT_ARM_NONSFI' : 'elf32-littlearm-nacl',
'BCLD_OFORMAT_X8632_NONSFI' : 'elf32-i386-nacl',
'BCLD_ALLOW_UNRESOLVED' :
# The following functions are implemented in the native support library.
# Before a .pexe is produced, they get rewritten to intrinsic calls.
# However, this rewriting happens after bitcode linking - so gold has
# to be told that these are allowed to remain unresolved.
'--allow-unresolved=memcpy '
'--allow-unresolved=memset '
'--allow-unresolved=memmove '
'--allow-unresolved=setjmp '
'--allow-unresolved=longjmp '
# These TLS layout functions are either defined by the ExpandTls
# pass or (for non-ABI-stable code only) by PNaCl's native support
# code.
'--allow-unresolved=__nacl_tp_tls_offset '
'--allow-unresolved=__nacl_tp_tdb_offset '
# __nacl_get_arch() is for non-ABI-stable code only.
'--allow-unresolved=__nacl_get_arch '
'${CXX_EH_MODE==sjlj ? '
# These symbols are defined by libsupc++ and the PNaClSjLjEH
# pass generates references to them.
'--undefined=__pnacl_eh_stack '
'--undefined=__pnacl_eh_resume '
# These symbols are defined by the PNaClSjLjEH pass and
# libsupc++ refers to them.
'--allow-unresolved=__pnacl_eh_type_table '
'--allow-unresolved=__pnacl_eh_action_table '
'--allow-unresolved=__pnacl_eh_filter_table} '
# For exception-handling enabled tests.
'${CXX_EH_MODE==zerocost ? '
'--allow-unresolved=_Unwind_Backtrace '
'--allow-unresolved=_Unwind_DeleteException '
'--allow-unresolved=_Unwind_GetCFA '
'--allow-unresolved=_Unwind_GetDataRelBase '
'--allow-unresolved=_Unwind_GetGR '
'--allow-unresolved=_Unwind_GetIP '
'--allow-unresolved=_Unwind_GetIPInfo '
'--allow-unresolved=_Unwind_GetLanguageSpecificData '
'--allow-unresolved=_Unwind_GetRegionStart '
'--allow-unresolved=_Unwind_GetTextRelBase '
'--allow-unresolved=_Unwind_PNaClSetResult0 '
'--allow-unresolved=_Unwind_PNaClSetResult1 '
'--allow-unresolved=_Unwind_RaiseException '
'--allow-unresolved=_Unwind_Resume '
'--allow-unresolved=_Unwind_Resume_or_Rethrow '
'--allow-unresolved=_Unwind_SetGR '
'--allow-unresolved=_Unwind_SetIP}',
'BCLD_FLAGS':
'--oformat ${BCLD_OFORMAT} ' +
'${!RELOCATABLE ? --undef-sym-check ${BCLD_ALLOW_UNRESOLVED}} ' +
'${GOLD_PLUGIN_ARGS} ${LD_FLAGS}',
'RUN_BCLD': ('${LD} ${BCLD_FLAGS} ${inputs} -o ${output}'),
'CXX_EH_MODE': 'none',
'ALLOW_NEXE_BUILD_ID': '0',
'DISABLE_ABI_CHECK': '0',
'LLVM_PASSES_TO_DISABLE': '',
'RUN_PASSES_SEPARATELY': '0',
}
def AddToBCLinkFlags(*args):
env.append('LD_FLAGS', *args)
def AddToNativeFlags(*args):
env.append('LD_FLAGS_NATIVE', *args)
def AddToBothFlags(*args):
env.append('LD_FLAGS', *args)
env.append('LD_FLAGS_NATIVE', *args)
def SetLibTarget(*args):
arch = ParseTriple(args[0])
if arch != 'le32':
env.set('BCLIB_ARCH', arch)
def IsPortable():
return env.getone('BCLIB_ARCH') == ''
LDPatterns = [
( '--pnacl-allow-native', "env.set('ALLOW_NATIVE', '1')"),
( '--noirt', "env.set('USE_IRT', '0')"),
( '--pnacl-exceptions=(none|sjlj|zerocost)', "env.set('CXX_EH_MODE', $0)"),
# TODO(mseaborn): Remove "--pnacl-allow-exceptions", which is
# superseded by "--pnacl-exceptions".
( '--pnacl-allow-exceptions', "env.set('CXX_EH_MODE', 'zerocost')"),
( '(--pnacl-allow-nexe-build-id)', "env.set('ALLOW_NEXE_BUILD_ID', '1')"),
( '--pnacl-disable-abi-check', "env.set('DISABLE_ABI_CHECK', '1')"),
# "--pnacl-disable-pass" allows an ABI simplification pass to be
# disabled if it is causing problems. These passes are generally
# required for ABI-stable pexes but can be omitted when the PNaCl
# toolchain is used for producing native nexes.
( '--pnacl-disable-pass=(.+)', "env.append('LLVM_PASSES_TO_DISABLE', $0)"),
( '--pnacl-run-passes-separately', "env.set('RUN_PASSES_SEPARATELY', '1')"),
( ('-target', '(.+)'), SetLibTarget),
( ('--target=(.+)'), SetLibTarget),
( '-o(.+)', "env.set('OUTPUT', pathtools.normalize($0))"),
( ('-o', '(.+)'), "env.set('OUTPUT', pathtools.normalize($0))"),
( ('--output', '(.+)'), "env.set('OUTPUT', pathtools.normalize($0))"),
( '-static', "env.set('STATIC', '1')"),
( '-nostdlib', "env.set('USE_STDLIB', '0')"),
( '-r', "env.set('RELOCATABLE', '1')"),
( '-relocatable', "env.set('RELOCATABLE', '1')"),
( '-i', "env.set('RELOCATABLE', '1')"),
( ('-L', '(.+)'),
"env.append('SEARCH_DIRS_USER', pathtools.normalize($0))\n"),
( '-L(.+)',
"env.append('SEARCH_DIRS_USER', pathtools.normalize($0))\n"),
( ('--library-path', '(.+)'),
"env.append('SEARCH_DIRS_USER', pathtools.normalize($0))\n"),
# -rpath and -rpath-link are only relevant to dynamic linking.
# Ignore them for compatibility with build scripts that expect to be
# able to pass them.
( ('(-rpath)','(.*)'), ""),
( ('(-rpath)=(.*)'), ""),
( ('(-rpath-link)','(.*)'), ""),
( ('(-rpath-link)=(.*)'), ""),
# This overrides the builtin linker script.
( ('(-T)', '(.*)'), AddToNativeFlags),
# TODO(pdox): Allow setting an alternative _start symbol in bitcode
( ('(-e)','(.*)'), AddToBothFlags),
# TODO(pdox): Support GNU versioning.
( '(--version-script=.*)', ""),
# Flags to pass to the native linker.
( '-Wn,(.*)', "env.append('LD_FLAGS_NATIVE', *($0.split(',')))"),
( ('(-Ttext-segment=.*)'), AddToNativeFlags),
( ('(-Trodata-segment=.*)'), AddToNativeFlags),
( ('(--section-start)', '(.+)'), AddToNativeFlags),
( ('(--build-id)'), AddToNativeFlags),
# Flags to pass to translate
( '-Wt,(.*)', "env.append('TRANSLATE_FLAGS_USER', *($0.split(',')))"),
# NOTE: -export-dynamic doesn't actually do anything to the bitcode link
# right now. This is just in case we do want to record that in metadata
# eventually, and have that influence the native linker flags.
( '(-export-dynamic)', AddToBCLinkFlags),
( '-?-soname=(.*)', "env.set('SONAME', $0)"),
( ('-?-soname', '(.*)'), "env.set('SONAME', $0)"),
( '(-M)', AddToBCLinkFlags),
( '(--print-map)', AddToBCLinkFlags),
( '(-t)', AddToBCLinkFlags),
( '(--trace)', AddToBCLinkFlags),
( ('(-y)','(.*)'), AddToBCLinkFlags),
( ('(-defsym)','(.*)'), AddToBCLinkFlags),
( '-melf_nacl', "env.set('ARCH', 'X8632')"),
( ('-m','elf_nacl'), "env.set('ARCH', 'X8632')"),
( '-melf64_nacl', "env.set('ARCH', 'X8664')"),
( ('-m','elf64_nacl'), "env.set('ARCH', 'X8664')"),
( '-marmelf_nacl', "env.set('ARCH', 'ARM')"),
( ('-m','armelf_nacl'), "env.set('ARCH', 'ARM')"),
( '-mmipselelf_nacl', "env.set('ARCH', 'MIPS32')"),
( ('-m','mipselelf_nacl'), "env.set('ARCH', 'MIPS32')"),
( ('(-?-wrap)', '(.+)'), AddToBCLinkFlags),
( ('(-?-wrap=.+)'), AddToBCLinkFlags),
# NOTE: For scons tests, the code generation fPIC flag is used with pnacl-ld.
( '-fPIC', "env.set('PIC', '1')"),
# This controls LTO optimization.
# opt does not support -Os but internally it is identical to -O2
# opt also does not support -O4 but -O4 is how you ask clang for LTO, so we
# can support it as well
( '-Os', "env.set('OPT_LEVEL', '2')"),
( '-O([0-3])', "env.set('OPT_LEVEL', $0)"),
( '-O([0-9]+)', "env.set('OPT_LEVEL', '3')"),
( '(-translate-fast)', "env.append('TRANSLATE_FLAGS', $0)"),
( '-s', "env.set('STRIP_MODE', 'all')"),
( '--strip-all', "env.set('STRIP_MODE', 'all')"),
( '-S', "env.set('STRIP_MODE', 'debug')"),
( '--strip-debug', "env.set('STRIP_MODE', 'debug')"),
( '-g', ""),
# Inputs and options that need to be kept in order
( '(-l.*)', "env.append('INPUTS', $0)"),
( ('(-l)','(.*)'), "env.append('INPUTS', $0+$1)"),
( ('--library', '(.*)'), "env.append('INPUTS', '-l'+$0)"),
( '(--no-as-needed)', "env.append('INPUTS', $0)"),
( '(--as-needed)', "env.append('INPUTS', $0)"),
( '(--start-group)', "env.append('INPUTS', $0)"),
( '(--end-group)', "env.append('INPUTS', $0)"),
( '(-Bstatic)', "env.append('INPUTS', $0)"),
( '(-Bdynamic)', "env.append('INPUTS', $0)"),
( '(--(no-)?whole-archive)', "env.append('INPUTS', $0)"),
( '(--undefined=.*)', "env.append('INPUTS', $0)"),
( ('(-u)','(.*)'), "env.append('INPUTS', $0+$1)"),
( '(-u.*)', "env.append('INPUTS', $0)"),
( '(-.*)', UnrecognizedOption),
( '(.*)', "env.append('INPUTS', pathtools.normalize($0))"),
]
def main(argv):
env.update(EXTRA_ENV)
ParseArgs(argv, LDPatterns)
# If the user passed -arch, then they want native output.
arch_flag_given = GetArch() is not None
# Both LD_FLAGS_NATIVE and TRANSLATE_FLAGS_USER affect
# the translation process. If they are non-empty,
# then --pnacl-allow-native must be given.
allow_native = env.getbool('ALLOW_NATIVE')
native_flags = env.get('LD_FLAGS_NATIVE') + env.get('TRANSLATE_FLAGS_USER')
if len(native_flags) > 0:
if not allow_native:
flagstr = ' '.join(native_flags)
Log.Fatal('"%s" affects translation. '
'To allow, specify --pnacl-allow-native' % flagstr)
if env.getbool('ALLOW_NATIVE') and not arch_flag_given:
Log.Fatal("--pnacl-allow-native given, but translation "
"is not happening (missing -arch?)")
# Overriding the lib target uses native-flavored bitcode libs rather than the
# portable bitcode libs. It is currently only tested/supported for
# building the IRT.
if not IsPortable():
env.set('BASE_USR', "${BASE_USR_ARCH}")
env.set('BASE_LIB', "${BASE_LIB_ARCH}")
if env.getbool('RELOCATABLE'):
env.set('STATIC', '0')
inputs = env.get('INPUTS')
output = env.getone('OUTPUT')
if output == '':
output = pathtools.normalize('a.out')
if not arch_flag_given:
# If -arch is not given, assume X86-32.
# This is because gold requires an arch (even for bitcode linking).
SetArch('X8632')
assert(GetArch() is not None)
inputs = FixPrivateLibs(inputs)
# Expand all parameters
# This resolves -lfoo into actual filenames,
# and expands linker scripts into command-line arguments.
inputs = ldtools.ExpandInputs(inputs,
env.get('SEARCH_DIRS'),
env.getbool('STATIC'),
# Once all glibc bitcode link is purely
# bitcode (e.g., even libc_nonshared.a)
# we may be able to restrict this more.
# This is also currently used by
# pnacl_generate_pexe=0 with glibc,
# for user libraries.
ldtools.LibraryTypes.ANY)
# Make sure the inputs have matching arch.
CheckInputsArch(inputs)
regular_inputs, native_objects = SplitLinkLine(inputs)
if env.getbool('RELOCATABLE'):
bitcode_type = 'po'
native_type = 'o'
else:
bitcode_type = 'pexe'
native_type = 'nexe'
if native_objects and not allow_native:
argstr = ' '.join(native_objects)
Log.Fatal("Native objects '%s' detected in the link. "
"To allow, specify --pnacl-allow-native" % argstr)
tng = TempNameGen([], output)
# Do the bitcode link.
if HasBitcodeInputs(inputs):
chain = DriverChain(inputs, output, tng)
chain.add(LinkBC, 'pre_opt.' + bitcode_type)
# Some ABI simplification passes assume the whole program is
# available (e.g. -expand-varargs, -nacl-expand-ctors and
# -nacl-expand-tls). While we could try running a subset of
# simplification passes when linking native objects, we don't
# do this because it complicates testing. For example,
# it requires '-expand-constant-expr' to be able to handle
# 'landingpad' instructions.
# However, if we aren't using biased bitcode, then at least -expand-byval
# must be run to work with the PPAPI shim calling convention, and
# -expand-varargs is needed because after LLVM 3.5 the x86-32 backend does
# not expand the llvm.va_arg intrinsic correctly.
# (see https://code.google.com/p/nativeclient/issues/detail?id=3913#c24)
abi_simplify = (env.getbool('STATIC') and
len(native_objects) == 0 and
env.getone('CXX_EH_MODE') != 'zerocost' and
not env.getbool('ALLOW_NEXE_BUILD_ID') and
IsPortable())
still_need_expand_byval = IsPortable()
still_need_expand_varargs = (still_need_expand_byval and
len(native_objects) == 0)
# A list of groups of args. Each group should contain a pass to run
# along with relevant flags that go with that pass.
opt_args = []
if abi_simplify:
pre_simplify = ['-pnacl-abi-simplify-preopt']
if env.getone('CXX_EH_MODE') == 'sjlj':
pre_simplify += ['-enable-pnacl-sjlj-eh']
else:
assert env.getone('CXX_EH_MODE') == 'none'
opt_args.append(pre_simplify)
elif env.getone('CXX_EH_MODE') != 'zerocost':
# '-lowerinvoke' prevents use of C++ exception handling, which
# is not yet supported in the PNaCl ABI. '-simplifycfg' removes
# landingpad blocks made unreachable by '-lowerinvoke'.
#
# We run this in order to remove 'resume' instructions,
# otherwise these are translated to calls to _Unwind_Resume(),
# which will not be available at native link time.
opt_args.append(['-lowerinvoke', '-simplifycfg'])
if env.getone('OPT_LEVEL') != '' and env.getone('OPT_LEVEL') != '0':
opt_args.append(env.get('OPT_FLAGS'))
if env.getone('STRIP_MODE') != 'none':
opt_args.append(env.get('STRIP_FLAGS'))
if abi_simplify:
post_simplify = ['-pnacl-abi-simplify-postopt']
if not env.getbool('DISABLE_ABI_CHECK'):
post_simplify += [
'-verify-pnaclabi-module',
'-verify-pnaclabi-functions',
# A flag for the above -verify-pnaclabi-* passes.
'-pnaclabi-allow-debug-metadata']
opt_args.append(post_simplify)
elif still_need_expand_byval:
# We may still need -expand-byval to match the PPAPI shim
# calling convention.
opt_args.append(['-expand-byval'])
if still_need_expand_varargs:
opt_args.append(['-expand-varargs'])
if len(opt_args) != 0:
if env.getbool('RUN_PASSES_SEPARATELY'):
for i, group in enumerate(opt_args):
chain.add(DoLLVMPasses(group),
'simplify_%d.%s' % (i, bitcode_type))
else:
flattened_opt_args = [flag for group in opt_args for flag in group]
chain.add(DoLLVMPasses(flattened_opt_args),
'simplify_and_opt.' + bitcode_type)
else:
chain = DriverChain('', output, tng)
# If -arch is also specified, invoke pnacl-translate afterwards.
if arch_flag_given:
env.set('NATIVE_OBJECTS', *native_objects)
chain.add(DoTranslate, native_type)
chain.run()
if bitcode_type == 'pexe' and not arch_flag_given:
# Mark .pexe files as executable.
# Some versions of 'configure' expect this.
SetExecutableMode(output)
return 0
def FixPrivateLibs(user_libs):
"""If not using the IRT or if private libraries are used:
- Place private libraries that can coexist before their public
equivalent (keep both);
- Replace public libraries that can't coexist with their private
equivalent.
This occurs before path resolution (important because public/private
libraries aren't always colocated) and assumes that -l:libfoo.a syntax
isn't used by the driver for relevant libraries.
"""
special_libs = {
# Public library name: (private library name, can coexist?)
'-lnacl': ('-lnacl_sys_private', True),
'-lpthread': ('-lpthread_private', False),
}
private_libs = [v[0] for v in special_libs.values()]
public_libs = special_libs.keys()
private_lib_for = lambda user_lib: special_libs[user_lib][0]
can_coexist = lambda user_lib: special_libs[user_lib][1]
no_irt = not env.getbool('USE_IRT')
uses_private_libs = set(user_libs) & set(private_libs)
if not (no_irt or uses_private_libs):
return user_libs
result_libs = []
for user_lib in user_libs:
if user_lib in public_libs:
result_libs.append(private_lib_for(user_lib))
if can_coexist(user_lib):
result_libs.append(user_lib)
else:
result_libs.append(user_lib)
return result_libs
def SplitLinkLine(inputs):
""" Split the input list into bitcode and native objects (.o, .a)
"""
normal = []
native = []
# Group flags need special handling because they need to go into the right
# list based on the type of the inputs in the group. If the group has both
# native and bitcode files (which is unfortunately the case for
# irt_browser_lib) then the group flags need to go in both lists.
if '--start-group' in inputs:
start_group = inputs.index('--start-group')
# Start with the inputs before the first group
normal, native = SplitLinkLine(inputs[:start_group])
try:
end_group = inputs.index('--end-group')
except ValueError:
Log.Fatal("Found --start-group without matching --end-group")
# Add the contents of the group together with the --{start,end}-group flags
norm_group, native_group = SplitLinkLine(inputs[start_group + 1:end_group])
if len(norm_group) > 0:
normal.extend(['--start-group'] + norm_group + ['--end-group'])
if len(native_group) > 0:
native.extend(['--start-group'] + native_group + ['--end-group'])
# Add the inputs after the first group
norm_last, native_last = SplitLinkLine(inputs[end_group + 1:])
return normal + norm_last, native + native_last
# If no groups, split the inputs based on their type.
for f in inputs:
if ldtools.IsFlag(f):
normal.append(f)
elif filetype.IsNativeArchive(f) or filetype.IsNativeObject(f):
native.append(f)
else:
normal.append(f)
return (normal, native)
def HasBitcodeInputs(inputs):
for f in inputs:
if ldtools.IsFlag(f):
continue
elif filetype.IsLLVMBitcode(f) or filetype.IsBitcodeArchive(f):
return True
return False
def CheckInputsArch(inputs):
count = 0
for f in inputs:
if ldtools.IsFlag(f):
continue
elif filetype.IsLLVMBitcode(f) or filetype.IsBitcodeArchive(f):
pass
elif filetype.IsNative(f):
ArchMerge(f, True)
else:
Log.Fatal("%s: Unexpected type of file for linking (%s)",
pathtools.touser(f), filetype.FileType(f))
count += 1
if count == 0:
Log.Fatal("no input files")
def DoLLVMPasses(pass_list):
def Func(infile, outfile):
filtered_list = [pass_option for pass_option in pass_list
if pass_option not in env.get('LLVM_PASSES_TO_DISABLE')]
RunDriver('pnacl-opt', filtered_list + [infile, '-o', outfile])
return Func
def DoTranslate(infile, outfile):
args = env.get('TRANSLATE_FLAGS')
args += ['-Wl,'+s for s in env.get('LD_FLAGS_NATIVE')]
if infile:
args += [infile]
args += ['-Wl,'+s if ldtools.IsFlag(s) else s
for s in env.get('NATIVE_OBJECTS')]
args += ['-o', outfile]
RunDriver('pnacl-translate', args)
def LinkBC(inputs, output):
'''Input: a bunch of bc/o/lib input files
Output: a combined & optimized bitcode file
'''
# Produce combined bitcode file
RunWithEnv('${RUN_BCLD}',
inputs=inputs,
output=output)
def get_help(unused_argv):
return """Usage: pnacl-ld [options] <input files> -o <output>
Bitcode linker for PNaCl. Similar to the binutils "ld" tool,
but links bitcode instead of native code. Supports many of the
"ld" flags. Below are a subset of them.
OPTIONS:
-o <file> Set output file name
-l LIBNAME Search for library LIBNAME
-L DIRECTORY, --library-path DIRECTORY
Add DIRECTORY to library search path
-r, -relocatable Generate relocatable output
-O<opt-level> Optimize output file
-M, --print-map Print map file on standard output
--whole-archive Include all objects from following archives
--no-whole-archive Turn off --whole-archive
-s, --strip-all Strip all symbols
-S, --strip-debug Strip debugging symbols
-u SYM, --undefined=SYM Start with undefined reference to SYM
-help | -h Output this help.
"""
| bsd-3-clause |
public-ink/public-ink | server/appengine/lib/mpl_toolkits/mplot3d/art3d.py | 10 | 25411 | # art3d.py, original mplot3d version by John Porter
# Parts rewritten by Reinier Heeres <[email protected]>
# Minor additions by Ben Axelrod <[email protected]>
'''
Module containing 3D artist code and functions to convert 2D
artists into 3D versions which can be added to an Axes3D.
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from matplotlib import lines, text as mtext, path as mpath, colors as mcolors
from matplotlib import artist
from matplotlib.collections import Collection, LineCollection, \
PolyCollection, PatchCollection, PathCollection
from matplotlib.cm import ScalarMappable
from matplotlib.patches import Patch
from matplotlib.colors import Normalize
from matplotlib.cbook import iterable
import warnings
import numpy as np
import math
from . import proj3d
def norm_angle(a):
"""Return angle between -180 and +180"""
a = (a + 360) % 360
if a > 180:
a = a - 360
return a
def norm_text_angle(a):
"""Return angle between -90 and +90"""
a = (a + 180) % 180
if a > 90:
a = a - 180
return a
def get_dir_vector(zdir):
if zdir == 'x':
return np.array((1, 0, 0))
elif zdir == 'y':
return np.array((0, 1, 0))
elif zdir == 'z':
return np.array((0, 0, 1))
elif zdir is None:
return np.array((0, 0, 0))
elif iterable(zdir) and len(zdir) == 3:
return zdir
else:
raise ValueError("'x', 'y', 'z', None or vector of length 3 expected")
class Text3D(mtext.Text):
'''
Text object with 3D position and (in the future) direction.
'''
def __init__(self, x=0, y=0, z=0, text='', zdir='z', **kwargs):
'''
*x*, *y*, *z* Position of text
*text* Text string to display
*zdir* Direction of text
Keyword arguments are passed onto :func:`~matplotlib.text.Text`.
'''
mtext.Text.__init__(self, x, y, text, **kwargs)
self.set_3d_properties(z, zdir)
def set_3d_properties(self, z=0, zdir='z'):
x, y = self.get_position()
self._position3d = np.array((x, y, z))
self._dir_vec = get_dir_vector(zdir)
self.stale = True
def draw(self, renderer):
proj = proj3d.proj_trans_points([self._position3d, \
self._position3d + self._dir_vec], renderer.M)
dx = proj[0][1] - proj[0][0]
dy = proj[1][1] - proj[1][0]
if dx==0. and dy==0.:
# atan2 raises ValueError: math domain error on 0,0
angle = 0.
else:
angle = math.degrees(math.atan2(dy, dx))
self.set_position((proj[0][0], proj[1][0]))
self.set_rotation(norm_text_angle(angle))
mtext.Text.draw(self, renderer)
self.stale = False
def text_2d_to_3d(obj, z=0, zdir='z'):
"""Convert a Text to a Text3D object."""
obj.__class__ = Text3D
obj.set_3d_properties(z, zdir)
class Line3D(lines.Line2D):
'''
3D line object.
'''
def __init__(self, xs, ys, zs, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.lines.Line2D`.
'''
lines.Line2D.__init__(self, [], [], *args, **kwargs)
self._verts3d = xs, ys, zs
def set_3d_properties(self, zs=0, zdir='z'):
xs = self.get_xdata()
ys = self.get_ydata()
try:
# If *zs* is a list or array, then this will fail and
# just proceed to juggle_axes().
zs = float(zs)
zs = [zs for x in xs]
except TypeError:
pass
self._verts3d = juggle_axes(xs, ys, zs, zdir)
self.stale = True
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_data(xs, ys)
lines.Line2D.draw(self, renderer)
self.stale = False
def line_2d_to_3d(line, zs=0, zdir='z'):
'''
Convert a 2D line to 3D.
'''
line.__class__ = Line3D
line.set_3d_properties(zs, zdir)
def path_to_3d_segment(path, zs=0, zdir='z'):
'''Convert a path to a 3D segment.'''
if not iterable(zs):
zs = np.ones(len(path)) * zs
seg = []
pathsegs = path.iter_segments(simplify=False, curves=False)
for (((x, y), code), z) in zip(pathsegs, zs):
seg.append((x, y, z))
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d
def paths_to_3d_segments(paths, zs=0, zdir='z'):
'''
Convert paths from a collection object to 3D segments.
'''
if not iterable(zs):
zs = np.ones(len(paths)) * zs
segments = []
for path, pathz in zip(paths, zs):
segments.append(path_to_3d_segment(path, pathz, zdir))
return segments
def path_to_3d_segment_with_codes(path, zs=0, zdir='z'):
'''Convert a path to a 3D segment with path codes.'''
if not iterable(zs):
zs = np.ones(len(path)) * zs
seg = []
codes = []
pathsegs = path.iter_segments(simplify=False, curves=False)
for (((x, y), code), z) in zip(pathsegs, zs):
seg.append((x, y, z))
codes.append(code)
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d, codes
def paths_to_3d_segments_with_codes(paths, zs=0, zdir='z'):
'''
Convert paths from a collection object to 3D segments with path codes.
'''
if not iterable(zs):
zs = np.ones(len(paths)) * zs
segments = []
codes_list = []
for path, pathz in zip(paths, zs):
segs, codes = path_to_3d_segment_with_codes(path, pathz, zdir)
segments.append(segs)
codes_list.append(codes)
return segments, codes_list
class Line3DCollection(LineCollection):
'''
A collection of 3D lines.
'''
def __init__(self, segments, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.collections.LineCollection`.
'''
LineCollection.__init__(self, segments, *args, **kwargs)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def set_segments(self, segments):
'''
Set 3D segments
'''
self._segments3d = np.asanyarray(segments)
LineCollection.set_segments(self, [])
def do_3d_projection(self, renderer):
'''
Project the points according to renderer matrix.
'''
xyslist = [
proj3d.proj_trans_points(points, renderer.M) for points in
self._segments3d]
segments_2d = [list(zip(xs, ys)) for (xs, ys, zs) in xyslist]
LineCollection.set_segments(self, segments_2d)
# FIXME
minz = 1e9
for (xs, ys, zs) in xyslist:
minz = min(minz, min(zs))
return minz
def draw(self, renderer, project=False):
if project:
self.do_3d_projection(renderer)
LineCollection.draw(self, renderer)
def line_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a LineCollection to a Line3DCollection object."""
segments3d = paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Line3DCollection
col.set_segments(segments3d)
class Patch3D(Patch):
'''
3D patch object.
'''
def __init__(self, *args, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_3d_properties(self, verts, zs=0, zdir='z'):
if not iterable(zs):
zs = np.ones(len(verts)) * zs
self._segment3d = [juggle_axes(x, y, z, zdir) \
for ((x, y), z) in zip(verts, zs)]
self._facecolor3d = Patch.get_facecolor(self)
def get_path(self):
return self._path2d
def get_facecolor(self):
return self._facecolor2d
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = list(zip(*s))
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(list(zip(vxs, vys)))
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def draw(self, renderer):
Patch.draw(self, renderer)
class PathPatch3D(Patch3D):
'''
3D PathPatch object.
'''
def __init__(self, path, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, **kwargs)
self.set_3d_properties(path, zs, zdir)
def set_3d_properties(self, path, zs=0, zdir='z'):
Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir)
self._code3d = path.codes
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = list(zip(*s))
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(list(zip(vxs, vys)), self._code3d)
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def get_patch_verts(patch):
"""Return a list of vertices for the path of a patch."""
trans = patch.get_patch_transform()
path = patch.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
else:
return []
def patch_2d_to_3d(patch, z=0, zdir='z'):
"""Convert a Patch to a Patch3D object."""
verts = get_patch_verts(patch)
patch.__class__ = Patch3D
patch.set_3d_properties(verts, z, zdir)
def pathpatch_2d_to_3d(pathpatch, z=0, zdir='z'):
"""Convert a PathPatch to a PathPatch3D object."""
path = pathpatch.get_path()
trans = pathpatch.get_patch_transform()
mpath = trans.transform_path(path)
pathpatch.__class__ = PathPatch3D
pathpatch.set_3d_properties(mpath, z, zdir)
class Patch3DCollection(PatchCollection):
'''
A collection of 3D patches.
'''
def __init__(self, *args, **kwargs):
"""
Create a collection of flat 3D patches with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of patches in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PatchCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
self._depthshade = kwargs.pop('depthshade', True)
PatchCollection.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
self.stale = True
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PatchCollection.set_offsets(self, list(zip(vxs, vys)))
if vzs.size > 0:
return min(vzs)
else:
return np.nan
class Path3DCollection(PathCollection):
'''
A collection of 3D paths.
'''
def __init__(self, *args, **kwargs):
"""
Create a collection of flat 3D paths with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of paths in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PathCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
self._depthshade = kwargs.pop('depthshade', True)
PathCollection.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
self.stale = True
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PathCollection.set_offsets(self, list(zip(vxs, vys)))
if vzs.size > 0 :
return min(vzs)
else :
return np.nan
def patch_collection_2d_to_3d(col, zs=0, zdir='z', depthshade=True):
"""
Convert a :class:`~matplotlib.collections.PatchCollection` into a
:class:`Patch3DCollection` object
(or a :class:`~matplotlib.collections.PathCollection` into a
:class:`Path3DCollection` object).
Keywords:
*za* The location or locations to place the patches in the
collection along the *zdir* axis. Defaults to 0.
*zdir* The axis in which to place the patches. Default is "z".
*depthshade* Whether to shade the patches to give a sense of depth.
Defaults to *True*.
"""
if isinstance(col, PathCollection):
col.__class__ = Path3DCollection
elif isinstance(col, PatchCollection):
col.__class__ = Patch3DCollection
col._depthshade = depthshade
col.set_3d_properties(zs, zdir)
class Poly3DCollection(PolyCollection):
'''
A collection of 3D polygons.
'''
def __init__(self, verts, *args, **kwargs):
'''
Create a Poly3DCollection.
*verts* should contain 3D coordinates.
Keyword arguments:
zsort, see set_zsort for options.
Note that this class does a bit of magic with the _facecolors
and _edgecolors properties.
'''
zsort = kwargs.pop('zsort', True)
PolyCollection.__init__(self, verts, *args, **kwargs)
self.set_zsort(zsort)
self._codes3d = None
_zsort_functions = {
'average': np.average,
'min': np.min,
'max': np.max,
}
def set_zsort(self, zsort):
'''
Set z-sorting behaviour:
boolean: if True use default 'average'
string: 'average', 'min' or 'max'
'''
if zsort is True:
zsort = 'average'
if zsort is not False:
if zsort in self._zsort_functions:
zsortfunc = self._zsort_functions[zsort]
else:
return False
else:
zsortfunc = None
self._zsort = zsort
self._sort_zpos = None
self._zsortfunc = zsortfunc
self.stale = True
def get_vector(self, segments3d):
"""Optimize points for projection"""
si = 0
ei = 0
segis = []
points = []
for p in segments3d:
points.extend(p)
ei = si+len(p)
segis.append((si, ei))
si = ei
if len(segments3d) > 0 :
xs, ys, zs = list(zip(*points))
else :
# We need this so that we can skip the bad unpacking from zip()
xs, ys, zs = [], [], []
ones = np.ones(len(xs))
self._vec = np.array([xs, ys, zs, ones])
self._segis = segis
def set_verts(self, verts, closed=True):
'''Set 3D vertices.'''
self.get_vector(verts)
# 2D verts will be updated at draw time
PolyCollection.set_verts(self, [], closed)
def set_verts_and_codes(self, verts, codes):
'''Sets 3D vertices with path codes'''
# set vertices with closed=False to prevent PolyCollection from
# setting path codes
self.set_verts(verts, closed=False)
# and set our own codes instead.
self._codes3d = codes
def set_3d_properties(self):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
self._sort_zpos = None
self.set_zsort(True)
self._facecolors3d = PolyCollection.get_facecolors(self)
self._edgecolors3d = PolyCollection.get_edgecolors(self)
self._alpha3d = PolyCollection.get_alpha(self)
self.stale = True
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def do_3d_projection(self, renderer):
'''
Perform the 3D projection for this object.
'''
# FIXME: This may no longer be needed?
if self._A is not None:
self.update_scalarmappable()
self._facecolors3d = self._facecolors
txs, tys, tzs = proj3d.proj_transform_vec(self._vec, renderer.M)
xyzlist = [(txs[si:ei], tys[si:ei], tzs[si:ei])
for si, ei in self._segis]
# This extra fuss is to re-order face / edge colors
cface = self._facecolors3d
cedge = self._edgecolors3d
if len(cface) != len(xyzlist):
cface = cface.repeat(len(xyzlist), axis=0)
if len(cedge) != len(xyzlist):
if len(cedge) == 0:
cedge = cface
else:
cedge = cedge.repeat(len(xyzlist), axis=0)
# if required sort by depth (furthest drawn first)
if self._zsort:
indices = range(len(xyzlist))
z_segments_2d = [(self._zsortfunc(zs), list(zip(xs, ys)), fc, ec,
idx) for (xs, ys, zs), fc, ec, idx in
zip(xyzlist, cface, cedge, indices)]
z_segments_2d.sort(key=lambda x: x[0], reverse=True)
else:
raise ValueError("whoops")
segments_2d = [s for z, s, fc, ec, idx in z_segments_2d]
if self._codes3d is not None:
codes = [self._codes3d[idx] for z, s, fc, ec, idx in z_segments_2d]
PolyCollection.set_verts_and_codes(self, segments_2d, codes)
else:
PolyCollection.set_verts(self, segments_2d)
self._facecolors2d = [fc for z, s, fc, ec, idx in z_segments_2d]
if len(self._edgecolors3d) == len(cface):
self._edgecolors2d = [ec for z, s, fc, ec, idx in z_segments_2d]
else:
self._edgecolors2d = self._edgecolors3d
# Return zorder value
if self._sort_zpos is not None:
zvec = np.array([[0], [0], [self._sort_zpos], [1]])
ztrans = proj3d.proj_transform_vec(zvec, renderer.M)
return ztrans[2][0]
elif tzs.size > 0 :
# FIXME: Some results still don't look quite right.
# In particular, examine contourf3d_demo2.py
# with az = -54 and elev = -45.
return np.min(tzs)
else :
return np.nan
def set_facecolor(self, colors):
PolyCollection.set_facecolor(self, colors)
self._facecolors3d = PolyCollection.get_facecolor(self)
set_facecolors = set_facecolor
def set_edgecolor(self, colors):
PolyCollection.set_edgecolor(self, colors)
self._edgecolors3d = PolyCollection.get_edgecolor(self)
set_edgecolors = set_edgecolor
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float or *None*.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = mcolors.to_rgba_array(
self._facecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
self._edgecolors = mcolors.to_rgba_array(
self._edgecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
self.stale = True
def get_facecolors(self):
return self._facecolors2d
get_facecolor = get_facecolors
def get_edgecolors(self):
return self._edgecolors2d
get_edgecolor = get_edgecolors
def draw(self, renderer):
return Collection.draw(self, renderer)
def poly_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a PolyCollection to a Poly3DCollection object."""
segments_3d, codes = paths_to_3d_segments_with_codes(col.get_paths(),
zs, zdir)
col.__class__ = Poly3DCollection
col.set_verts_and_codes(segments_3d, codes)
col.set_3d_properties()
def juggle_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that 2D xs, ys can be plotted in the plane
orthogonal to zdir. zdir is normally x, y or z. However, if zdir
starts with a '-' it is interpreted as a compensation for rotate_axes.
"""
if zdir == 'x':
return zs, xs, ys
elif zdir == 'y':
return xs, zs, ys
elif zdir[0] == '-':
return rotate_axes(xs, ys, zs, zdir)
else:
return xs, ys, zs
def rotate_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that the axes are rotated with zdir along
the original z axis. Prepending the axis with a '-' does the
inverse transform, so zdir can be x, -x, y, -y, z or -z
"""
if zdir == 'x':
return ys, zs, xs
elif zdir == '-x':
return zs, xs, ys
elif zdir == 'y':
return zs, xs, ys
elif zdir == '-y':
return ys, zs, xs
else:
return xs, ys, zs
def iscolor(c):
try:
if len(c) == 4 or len(c) == 3:
if iterable(c[0]):
return False
if hasattr(c[0], '__float__'):
return True
except:
return False
return False
def get_colors(c, num):
"""Stretch the color argument to provide the required number num"""
if type(c) == type("string"):
c = mcolors.to_rgba(c)
if iscolor(c):
return [c] * num
if len(c) == num:
return c
elif iscolor(c):
return [c] * num
elif len(c) == 0: #if edgecolor or facecolor is specified as 'none'
return [[0,0,0,0]] * num
elif iscolor(c[0]):
return [c[0]] * num
else:
raise ValueError('unknown color format %s' % c)
def zalpha(colors, zs):
"""Modify the alphas of the color list according to depth"""
# FIXME: This only works well if the points for *zs* are well-spaced
# in all three dimensions. Otherwise, at certain orientations,
# the min and max zs are very close together.
# Should really normalize against the viewing depth.
colors = get_colors(colors, len(zs))
if zs.size > 0 :
norm = Normalize(min(zs), max(zs))
sats = 1 - norm(zs) * 0.7
colors = [(c[0], c[1], c[2], c[3] * s) for c, s in zip(colors, sats)]
return colors
| gpl-3.0 |
rnder/data-science-from-scratch | code-python3/gradient_descent.py | 12 | 5816 | from collections import Counter
from linear_algebra import distance, vector_subtract, scalar_multiply
from functools import reduce
import math, random
def sum_of_squares(v):
"""computes the sum of squared elements in v"""
return sum(v_i ** 2 for v_i in v)
def difference_quotient(f, x, h):
return (f(x + h) - f(x)) / h
def plot_estimated_derivative():
def square(x):
return x * x
def derivative(x):
return 2 * x
derivative_estimate = lambda x: difference_quotient(square, x, h=0.00001)
# plot to show they're basically the same
import matplotlib.pyplot as plt
x = range(-10,10)
plt.plot(x, map(derivative, x), 'rx') # red x
plt.plot(x, map(derivative_estimate, x), 'b+') # blue +
plt.show() # purple *, hopefully
def partial_difference_quotient(f, v, i, h):
# add h to just the i-th element of v
w = [v_j + (h if j == i else 0)
for j, v_j in enumerate(v)]
return (f(w) - f(v)) / h
def estimate_gradient(f, v, h=0.00001):
return [partial_difference_quotient(f, v, i, h)
for i, _ in enumerate(v)]
def step(v, direction, step_size):
"""move step_size in the direction from v"""
return [v_i + step_size * direction_i
for v_i, direction_i in zip(v, direction)]
def sum_of_squares_gradient(v):
return [2 * v_i for v_i in v]
def safe(f):
"""define a new function that wraps f and return it"""
def safe_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
return float('inf') # this means "infinity" in Python
return safe_f
#
#
# minimize / maximize batch
#
#
def minimize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
"""use gradient descent to find theta that minimizes target function"""
step_sizes = [100, 10, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
theta = theta_0 # set theta to initial value
target_fn = safe(target_fn) # safe version of target_fn
value = target_fn(theta) # value we're minimizing
while True:
gradient = gradient_fn(theta)
next_thetas = [step(theta, gradient, -step_size)
for step_size in step_sizes]
# choose the one that minimizes the error function
next_theta = min(next_thetas, key=target_fn)
next_value = target_fn(next_theta)
# stop if we're "converging"
if abs(value - next_value) < tolerance:
return theta
else:
theta, value = next_theta, next_value
def negate(f):
"""return a function that for any input x returns -f(x)"""
return lambda *args, **kwargs: -f(*args, **kwargs)
def negate_all(f):
"""the same when f returns a list of numbers"""
return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]
def maximize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
return minimize_batch(negate(target_fn),
negate_all(gradient_fn),
theta_0,
tolerance)
#
# minimize / maximize stochastic
#
def in_random_order(data):
"""generator that returns the elements of data in random order"""
indexes = [i for i, _ in enumerate(data)] # create a list of indexes
random.shuffle(indexes) # shuffle them
for i in indexes: # return the data in that order
yield data[i]
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
data = list(zip(x, y))
theta = theta_0 # initial guess
alpha = alpha_0 # initial step size
min_theta, min_value = None, float("inf") # the minimum so far
iterations_with_no_improvement = 0
# if we ever go 100 iterations with no improvement, stop
while iterations_with_no_improvement < 100:
value = sum( target_fn(x_i, y_i, theta) for x_i, y_i in data )
if value < min_value:
# if we've found a new minimum, remember it
# and go back to the original step size
min_theta, min_value = theta, value
iterations_with_no_improvement = 0
alpha = alpha_0
else:
# otherwise we're not improving, so try shrinking the step size
iterations_with_no_improvement += 1
alpha *= 0.9
# and take a gradient step for each of the data points
for x_i, y_i in in_random_order(data):
gradient_i = gradient_fn(x_i, y_i, theta)
theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))
return min_theta
def maximize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
return minimize_stochastic(negate(target_fn),
negate_all(gradient_fn),
x, y, theta_0, alpha_0)
if __name__ == "__main__":
print("using the gradient")
v = [random.randint(-10,10) for i in range(3)]
tolerance = 0.0000001
while True:
#print v, sum_of_squares(v)
gradient = sum_of_squares_gradient(v) # compute the gradient at v
next_v = step(v, gradient, -0.01) # take a negative gradient step
if distance(next_v, v) < tolerance: # stop if we're converging
break
v = next_v # continue if we're not
print("minimum v", v)
print("minimum value", sum_of_squares(v))
print()
print("using minimize_batch")
v = [random.randint(-10,10) for i in range(3)]
v = minimize_batch(sum_of_squares, sum_of_squares_gradient, v)
print("minimum v", v)
print("minimum value", sum_of_squares(v))
| unlicense |
haijieg/SFrame | oss_src/unity/python/sframe/data_structures/sarray.py | 1 | 136255 | """
This module defines the SArray class which provides the
ability to create, access and manipulate a remote scalable array object.
SArray acts similarly to pandas.Series but without indexing.
The data is immutable, homogeneous, and is stored on the GraphLab Server side.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from .. import connect as _mt
from ..connect import main as glconnect
from ..cython.cy_flexible_type import pytype_from_dtype, pytype_from_array_typecode
from ..cython.cy_flexible_type import infer_type_of_list, infer_type_of_sequence
from ..cython.cy_sarray import UnitySArrayProxy
from ..cython.context import debug_trace as cython_context
from ..util import _is_non_string_iterable, _make_internal_url
from .image import Image as _Image
from .. import aggregate as _aggregate
from ..deps import numpy, HAS_NUMPY
from ..deps import pandas, HAS_PANDAS
import time
import sys
import array
import collections
import datetime
import warnings
import numbers
__all__ = ['SArray']
if sys.version_info.major > 2:
long = int
def _create_sequential_sarray(size, start=0, reverse=False):
if type(size) is not int:
raise TypeError("size must be int")
if type(start) is not int:
raise TypeError("size must be int")
if type(reverse) is not bool:
raise TypeError("reverse must me bool")
with cython_context():
return SArray(_proxy=glconnect.get_unity().create_sequential_sarray(size, start, reverse))
class SArray(object):
"""
An immutable, homogeneously typed array object backed by persistent storage.
SArray is scaled to hold data that are much larger than the machine's main
memory. It fully supports missing values and random access. The
data backing an SArray is located on the same machine as the GraphLab
Server process. Each column in an :py:class:`~graphlab.SFrame` is an
SArray.
Parameters
----------
data : list | numpy.ndarray | pandas.Series | string
The input data. If this is a list, numpy.ndarray, or pandas.Series,
the data in the list is converted and stored in an SArray.
Alternatively if this is a string, it is interpreted as a path (or
url) to a text file. Each line of the text file is loaded as a
separate row. If ``data`` is a directory where an SArray was previously
saved, this is loaded as an SArray read directly out of that
directory.
dtype : {None, int, float, str, list, array.array, dict, datetime.datetime, graphlab.Image}, optional
The data type of the SArray. If not specified (None), we attempt to
infer it from the input. If it is a numpy array or a Pandas series, the
dtype of the array/series is used. If it is a list, the dtype is
inferred from the inner list. If it is a URL or path to a text file, we
default the dtype to str.
ignore_cast_failure : bool, optional
If True, ignores casting failures but warns when elements cannot be
casted into the specified dtype.
Notes
-----
- If ``data`` is pandas.Series, the index will be ignored.
- The datetime is based on the Boost datetime format (see http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html
for details)
Examples
--------
SArray can be constructed in various ways:
Construct an SArray from list.
>>> from graphlab import SArray
>>> sa = SArray(data=[1,2,3,4,5], dtype=int)
Construct an SArray from numpy.ndarray.
>>> sa = SArray(data=numpy.asarray([1,2,3,4,5]), dtype=int)
or:
>>> sa = SArray(numpy.asarray([1,2,3,4,5]), int)
Construct an SArray from pandas.Series.
>>> sa = SArray(data=pd.Series([1,2,3,4,5]), dtype=int)
or:
>>> sa = SArray(pd.Series([1,2,3,4,5]), int)
If the type is not specified, automatic inference is attempted:
>>> SArray(data=[1,2,3,4,5]).dtype()
int
>>> SArray(data=[1,2,3,4,5.0]).dtype()
float
The SArray supports standard datatypes such as: integer, float and string.
It also supports three higher level datatypes: float arrays, dict
and list (array of arbitrary types).
Create an SArray from a list of strings:
>>> sa = SArray(data=['a','b'])
Create an SArray from a list of float arrays;
>>> sa = SArray([[1,2,3], [3,4,5]])
Create an SArray from a list of lists:
>>> sa = SArray(data=[['a', 1, {'work': 3}], [2, 2.0]])
Create an SArray from a list of dictionaries:
>>> sa = SArray(data=[{'a':1, 'b': 2}, {'b':2, 'c': 1}])
Create an SArray from a list of datetime objects:
>>> sa = SArray(data=[datetime.datetime(2011, 10, 20, 9, 30, 10)])
Construct an SArray from local text file. (Only works for local server).
>>> sa = SArray('/tmp/a_to_z.txt.gz')
Construct an SArray from a text file downloaded from a URL.
>>> sa = SArray('http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz')
**Numeric Operators**
SArrays support a large number of vectorized operations on numeric types.
For instance:
>>> sa = SArray([1,1,1,1,1])
>>> sb = SArray([2,2,2,2,2])
>>> sc = sa + sb
>>> sc
dtype: int
Rows: 5
[3, 3, 3, 3, 3]
>>> sc + 2
dtype: int
Rows: 5
[5, 5, 5, 5, 5]
Operators which are supported include all numeric operators (+,-,*,/), as
well as comparison operators (>, >=, <, <=), and logical operators (&, | ).
For instance:
>>> sa = SArray([1,2,3,4,5])
>>> (sa >= 2) & (sa <= 4)
dtype: int
Rows: 5
[0, 1, 1, 1, 0]
The numeric operators (+,-,*,/) also work on array types:
>>> sa = SArray(data=[[1.0,1.0], [2.0,2.0]])
>>> sa + 1
dtype: list
Rows: 2
[array('f', [2.0, 2.0]), array('f', [3.0, 3.0])]
>>> sa + sa
dtype: list
Rows: 2
[array('f', [2.0, 2.0]), array('f', [4.0, 4.0])]
The addition operator (+) can also be used for string concatenation:
>>> sa = SArray(data=['a','b'])
>>> sa + "x"
dtype: str
Rows: 2
['ax', 'bx']
This can be useful for performing type interpretation of lists or
dictionaries stored as strings:
>>> sa = SArray(data=['a,b','c,d'])
>>> ("[" + sa + "]").astype(list) # adding brackets make it look like a list
dtype: list
Rows: 2
[['a', 'b'], ['c', 'd']]
All comparison operations and boolean operators are supported and emit
binary SArrays.
>>> sa = SArray([1,2,3,4,5])
>>> sa >= 2
dtype: int
Rows: 3
[0, 1, 1, 1, 1]
>>> (sa >= 2) & (sa <= 4)
dtype: int
Rows: 3
[0, 1, 1, 1, 0]
**Element Access and Slicing**
SArrays can be accessed by integer keys just like a regular python list.
Such operations may not be fast on large datasets so looping over an SArray
should be avoided.
>>> sa = SArray([1,2,3,4,5])
>>> sa[0]
1
>>> sa[2]
3
>>> sa[5]
IndexError: SFrame index out of range
Negative indices can be used to access elements from the tail of the array
>>> sa[-1] # returns the last element
5
>>> sa[-2] # returns the second to last element
4
The SArray also supports the full range of python slicing operators:
>>> sa[1000:] # Returns an SArray containing rows 1000 to the end
>>> sa[:1000] # Returns an SArray containing rows 0 to row 999 inclusive
>>> sa[0:1000:2] # Returns an SArray containing rows 0 to row 1000 in steps of 2
>>> sa[-100:] # Returns an SArray containing last 100 rows
>>> sa[-100:len(sa):2] # Returns an SArray containing last 100 rows in steps of 2
**Logical Filter**
An SArray can be filtered using
>>> array[binary_filter]
where array and binary_filter are SArrays of the same length. The result is
a new SArray which contains only elements of 'array' where its matching row
in the binary_filter is non zero.
This permits the use of boolean operators that can be used to perform
logical filtering operations. For instance:
>>> sa = SArray([1,2,3,4,5])
>>> sa[(sa >= 2) & (sa <= 4)]
dtype: int
Rows: 3
[2, 3, 4]
This can also be used more generally to provide filtering capability which
is otherwise not expressible with simple boolean functions. For instance:
>>> sa = SArray([1,2,3,4,5])
>>> sa[sa.apply(lambda x: math.log(x) <= 1)]
dtype: int
Rows: 3
[1, 2]
This is equivalent to
>>> sa.filter(lambda x: math.log(x) <= 1)
dtype: int
Rows: 3
[1, 2]
**Iteration**
The SArray is also iterable, but not efficiently since this involves a
streaming transmission of data from the server to the client. This should
not be used for large data.
>>> sa = SArray([1,2,3,4,5])
>>> [i + 1 for i in sa]
[2, 3, 4, 5, 6]
This can be used to convert an SArray to a list:
>>> sa = SArray([1,2,3,4,5])
>>> l = list(sa)
>>> l
[1, 2, 3, 4, 5]
"""
__slots__ = ["__proxy__", "_getitem_cache"]
__construct_ctr = int(time.time()) % 1000
def __init__(self, data=[], dtype=None, ignore_cast_failure=False, _proxy=None):
"""
__init__(data=list(), dtype=None, ignore_cast_failure=False)
Construct a new SArray. The source of data includes: list,
numpy.ndarray, pandas.Series, and urls.
"""
SArray.__construct_ctr += 1
if SArray.__construct_ctr % 1000 == 0:
_mt._get_metric_tracker().track('sarray.init1000')
if dtype is not None and type(dtype) != type:
raise TypeError('dtype must be a type, e.g. use int rather than \'int\'')
if (_proxy):
self.__proxy__ = _proxy
elif type(data) == SArray:
self.__proxy__ = data.__proxy__
else:
self.__proxy__ = UnitySArrayProxy(glconnect.get_client())
# we need to perform type inference
if dtype is None:
if HAS_PANDAS and isinstance(data, pandas.Series):
# if it is a pandas series get the dtype of the series
dtype = pytype_from_dtype(data.dtype)
if dtype == object:
# we need to get a bit more fine grained than that
dtype = infer_type_of_sequence(data.values)
elif HAS_NUMPY and isinstance(data, numpy.ndarray):
# first try the fast inproc method
try:
from .. import numpy_loader
if numpy_loader.numpy_activation_successful():
from ..numpy import _fast_numpy_to_sarray
ret = _fast_numpy_to_sarray(data)
# conversion is good!
# swap the proxy.
self.__proxy__, ret.__proxy__ = ret.__proxy__, self.__proxy__
return
else:
dtype = infer_type_of_sequence(data)
except:
pass
# if it is a numpy array, get the dtype of the array
dtype = pytype_from_dtype(data.dtype)
if dtype == object:
# we need to get a bit more fine grained than that
dtype = infer_type_of_sequence(data)
if len(data.shape) == 2:
# we need to make it an array or a list
if dtype == float or dtype == int:
dtype = array.array
else:
dtype = list
elif len(data.shape) > 2:
raise TypeError("Cannot convert Numpy arrays of greater than 2 dimensions")
elif (isinstance(data, str) or
(sys.version_info.major < 3 and isinstance(data, unicode))):
# if it is a file, we default to string
dtype = str
elif isinstance(data, array.array):
dtype = pytype_from_array_typecode(data.typecode)
elif isinstance(data, collections.Sequence):
# Covers any ordered python container and arrays.
# Convert it to a list first.
dtype = infer_type_of_sequence(data)
else:
dtype = None
if HAS_PANDAS and isinstance(data, pandas.Series):
with cython_context():
self.__proxy__.load_from_iterable(data.values, dtype, ignore_cast_failure)
elif (isinstance(data, str) or (sys.version_info.major <= 2 and isinstance(data, unicode))):
internal_url = _make_internal_url(data)
with cython_context():
self.__proxy__.load_autodetect(internal_url, dtype)
elif ((HAS_NUMPY and isinstance(data, numpy.ndarray))
or isinstance(data, array.array)
or isinstance(data, collections.Sequence)):
with cython_context():
self.__proxy__.load_from_iterable(data, dtype, ignore_cast_failure)
else:
raise TypeError("Unexpected data source. " \
"Possible data source types are: list, " \
"numpy.ndarray, pandas.Series, and string(url)")
@classmethod
def date_range(cls,start_time,end_time,freq):
'''
Returns a new SArray that represents a fixed frequency datetime index.
Parameters
----------
start_time : datetime.datetime
Left bound for generating dates.
end_time : datetime.datetime
Right bound for generating dates.
freq : datetime.timedelta
Fixed frequency between two consecutive data points.
Returns
-------
out : SArray
Examples
--------
>>> import datetime as dt
>>> start = dt.datetime(2013, 5, 7, 10, 4, 10)
>>> end = dt.datetime(2013, 5, 10, 10, 4, 10)
>>> sa = gl.SArray.date_range(start,end,dt.timedelta(1))
>>> print sa
dtype: datetime
Rows: 4
[datetime.datetime(2013, 5, 7, 10, 4, 10),
datetime.datetime(2013, 5, 8, 10, 4, 10),
datetime.datetime(2013, 5, 9, 10, 4, 10),
datetime.datetime(2013, 5, 10, 10, 4, 10)]
'''
if not isinstance(start_time,datetime.datetime):
raise TypeError("The ``start_time`` argument must be from type datetime.datetime.")
if not isinstance(end_time,datetime.datetime):
raise TypeError("The ``end_time`` argument must be from type datetime.datetime.")
if not isinstance(freq,datetime.timedelta):
raise TypeError("The ``freq`` argument must be from type datetime.timedelta.")
from .. import extensions
return extensions.date_range(start_time,end_time,freq.total_seconds())
@classmethod
def from_const(cls, value, size, dtype=type(None)):
"""
Constructs an SArray of size with a const value.
Parameters
----------
value : [int | float | str | array.array | list | dict | datetime]
The value to fill the SArray
size : int
The size of the SArray
dtype : type
The type of the SArray. If not specified, is automatically detected
from the value. This should be specified if value=None since the
actual type of the SArray can be anything.
Examples
--------
Construct an SArray consisting of 10 zeroes:
>>> graphlab.SArray.from_const(0, 10)
Construct an SArray consisting of 10 missing string values:
>>> graphlab.SArray.from_const(None, 10, str)
"""
assert isinstance(size, (int, long)) and size >= 0, "size must be a positive int"
if not isinstance(value, (type(None), int, float, str, array.array, list, dict, datetime.datetime)):
raise TypeError('Cannot create sarray of value type %s' % str(type(value)))
proxy = UnitySArrayProxy(glconnect.get_client())
proxy.load_from_const(value, size, dtype)
return cls(_proxy=proxy)
@classmethod
def from_sequence(cls, *args):
"""
from_sequence(start=0, stop)
Create an SArray from sequence
.. sourcecode:: python
Construct an SArray of integer values from 0 to 999
>>> gl.SArray.from_sequence(1000)
This is equivalent, but more efficient than:
>>> gl.SArray(range(1000))
Construct an SArray of integer values from 10 to 999
>>> gl.SArray.from_sequence(10, 1000)
This is equivalent, but more efficient than:
>>> gl.SArray(range(10, 1000))
Parameters
----------
start : int, optional
The start of the sequence. The sequence will contain this value.
stop : int
The end of the sequence. The sequence will not contain this value.
"""
start = None
stop = None
# fill with args. This checks for from_sequence(100), from_sequence(10,100)
if len(args) == 1:
stop = args[0]
elif len(args) == 2:
start = args[0]
stop = args[1]
if stop is None and start is None:
raise TypeError("from_sequence expects at least 1 argument. got 0")
elif start is None:
return _create_sequential_sarray(stop)
else:
size = stop - start
# this matches the behavior of range
# i.e. range(100,10) just returns an empty array
if (size < 0):
size = 0
return _create_sequential_sarray(size, start)
@classmethod
def from_avro(cls, filename):
"""
Construct an SArray from an Avro file. The SArray type is determined by
the schema of the Avro file.
Parameters
----------
filename : str
The Avro file to load into an SArray.
Examples
--------
Construct an SArray from a local Avro file named 'data.avro':
>>> graphlab.SArray.from_avro('/data/data.avro')
Notes
-----
Currently only supports direct loading of files on the local filesystem.
References
----------
- `Avro Specification <http://avro.apache.org/docs/1.7.7/spec.html>`_
"""
proxy = UnitySArrayProxy(glconnect.get_client())
proxy.load_from_avro(filename)
return cls(_proxy = proxy)
@classmethod
def where(cls, condition, istrue, isfalse, dtype=None):
"""
Selects elements from either istrue or isfalse depending on the value
of the condition SArray.
Parameters
----------
condition : SArray
An SArray of values such that for each value, if non-zero, yields a
value from istrue, otherwise from isfalse.
istrue : SArray or constant
The elements selected if condition is true. If istrue is an SArray,
this must be of the same length as condition.
isfalse : SArray or constant
The elements selected if condition is false. If istrue is an SArray,
this must be of the same length as condition.
dtype : type
The type of result SArray. This is required if both istrue and isfalse
are constants of ambiguous types.
Examples
--------
Returns an SArray with the same values as g with values above 10
clipped to 10
>>> g = SArray([6,7,8,9,10,11,12,13])
>>> SArray.where(g > 10, 10, g)
dtype: int
Rows: 8
[6, 7, 8, 9, 10, 10, 10, 10]
Returns an SArray with the same values as g with values below 10
clipped to 10
>>> SArray.where(g > 10, g, 10)
dtype: int
Rows: 8
[10, 10, 10, 10, 10, 11, 12, 13]
Returns an SArray with the same values of g with all values == 1
replaced by None
>>> g = SArray([1,2,3,4,1,2,3,4])
>>> SArray.where(g == 1, None, g)
dtype: int
Rows: 8
[None, 2, 3, 4, None, 2, 3, 4]
Returns an SArray with the same values of g, but with each missing value
replaced by its corresponding element in replace_none
>>> g = SArray([1,2,None,None])
>>> replace_none = SArray([3,3,2,2])
>>> SArray.where(g != None, g, replace_none)
dtype: int
Rows: 4
[1, 2, 2, 2]
"""
true_is_sarray = isinstance(istrue, SArray)
false_is_sarray = isinstance(isfalse, SArray)
if not true_is_sarray and false_is_sarray:
istrue = cls(_proxy=condition.__proxy__.to_const(istrue, isfalse.dtype()))
if true_is_sarray and not false_is_sarray:
isfalse = cls(_proxy=condition.__proxy__.to_const(isfalse, istrue.dtype()))
if not true_is_sarray and not false_is_sarray:
if dtype is None:
if istrue is None:
dtype = type(isfalse)
elif isfalse is None:
dtype = type(istrue)
elif type(istrue) != type(isfalse):
raise TypeError("true and false inputs are of different types")
elif type(istrue) == type(isfalse):
dtype = type(istrue)
if dtype is None:
raise TypeError("Both true and false are None. Resultant type cannot be inferred.")
istrue = cls(_proxy=condition.__proxy__.to_const(istrue, dtype))
isfalse = cls(_proxy=condition.__proxy__.to_const(isfalse, dtype))
return cls(_proxy=condition.__proxy__.ternary_operator(istrue.__proxy__, isfalse.__proxy__))
def to_numpy(self):
"""
Converts this SArray to a numpy array
This operation will construct a numpy array in memory. Care must
be taken when size of the returned object is big.
Returns
-------
out : numpy.ndarray
A Numpy Array containing all the values of the SArray
"""
assert HAS_NUMPY
import numpy
return numpy.asarray(self)
def __get_content_identifier__(self):
"""
Returns the unique identifier of the content that backs the SArray
Notes
-----
Meant for internal use only.
"""
with cython_context():
return self.__proxy__.get_content_identifier()
def save(self, filename, format=None):
"""
Saves the SArray to file.
The saved SArray will be in a directory named with the `targetfile`
parameter.
Parameters
----------
filename : string
A local path or a remote URL. If format is 'text', it will be
saved as a text file. If format is 'binary', a directory will be
created at the location which will contain the SArray.
format : {'binary', 'text', 'csv'}, optional
Format in which to save the SFrame. Binary saved SArrays can be
loaded much faster and without any format conversion losses.
'text' and 'csv' are synonymous: Each SArray row will be written
as a single line in an output text file. If not
given, will try to infer the format from filename given. If file
name ends with 'csv', 'txt' or '.csv.gz', then save as 'csv' format,
otherwise save as 'binary' format.
"""
from .sframe import SFrame as _SFrame
if format == None:
if filename.endswith(('.csv', '.csv.gz', 'txt')):
format = 'text'
else:
format = 'binary'
if format == 'binary':
with cython_context():
self.__proxy__.save(_make_internal_url(filename))
elif format == 'text' or format == 'csv':
sf = _SFrame({'X1':self})
with cython_context():
sf.__proxy__.save_as_csv(_make_internal_url(filename), {'header':False})
else:
raise ValueError("Unsupported format: {}".format(format))
def __repr__(self):
"""
Returns a string description of the SArray.
"""
data_str = self.__str__()
ret = "dtype: " + str(self.dtype().__name__) + "\n"
if (self.__has_size__()):
ret = ret + "Rows: " + str(self.size()) + "\n"
else:
ret = ret + "Rows: ?\n"
ret = ret + data_str
return ret
def __str__(self):
"""
Returns a string containing the first 100 elements of the array.
"""
# If sarray is image, take head of elements casted to string.
if self.dtype() == _Image:
headln = str(list(self.astype(str).head(100)))
else:
if sys.version_info.major < 3:
headln = str(list(self.head(100)))
headln = unicode(headln.decode('string_escape'),'utf-8',errors='replace').encode('utf-8')
else:
headln = str(list(self.head(100)))
if (self.__proxy__.has_size() == False or self.size() > 100):
# cut the last close bracket
# and replace it with ...
headln = headln[0:-1] + ", ... ]"
return headln
def __nonzero__(self):
"""
Raises a ValueError exception.
The truth value of an array with more than one element is ambiguous. Use a.any() or a.all().
"""
# message copied from Numpy
raise ValueError("The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")
def __bool__(self):
"""
Raises a ValueError exception.
The truth value of an array with more than one element is ambiguous. Use a.any() or a.all().
"""
# message copied from Numpy
raise ValueError("The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")
def __len__(self):
"""
Returns the length of the array
"""
return self.size()
def __iter__(self):
"""
Provides an iterator to the contents of the array.
"""
def generator():
elems_at_a_time = 262144
self.__proxy__.begin_iterator()
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
while(True):
for j in ret:
yield j
if len(ret) == elems_at_a_time:
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
else:
break
return generator()
def __contains__(self, item):
"""
Returns true if any element in this SArray is identically equal to item.
Following are equivalent:
>>> element in sa
>>> sa.__contains__(element)
For an element-wise contains see ``SArray.contains``
"""
return (self == item).any()
def contains(self, item):
"""
Performs an element-wise search of "item" in the SArray.
Conceptually equivalent to:
>>> sa.apply(lambda x: item in x)
If the current SArray contains strings and item is a string. Produces a 1
for each row if 'item' is a substring of the row and 0 otherwise.
If the current SArray contains list or arrays, this produces a 1
for each row if 'item' is an element of the list or array.
If the current SArray contains dictionaries, this produces a 1
for each row if 'item' is a key in the dictionary.
Parameters
----------
item : any type
The item to search for.
Returns
-------
out : SArray
A binary SArray where a non-zero value denotes that the item
was found in the row. And 0 if it is not found.
Examples
--------
>>> SArray(['abc','def','ghi']).contains('a')
dtype: int
Rows: 3
[1, 0, 0]
>>> SArray([['a','b'],['b','c'],['c','d']]).contains('b')
dtype: int
Rows: 3
[1, 1, 0]
>>> SArray([{'a':1},{'a':2,'b':1}, {'c':1}]).contains('a')
dtype: int
Rows: 3
[1, 1, 0]
See Also
--------
is_in
"""
return SArray(_proxy = self.__proxy__.left_scalar_operator(item, 'in'))
def is_in(self, other):
"""
Performs an element-wise search for each row in 'other'.
Conceptually equivalent to:
>>> sa.apply(lambda x: x in other)
If the current SArray contains strings and other is a string. Produces a 1
for each row if the row is a substring of 'other', and 0 otherwise.
If the 'other' is a list or array, this produces a 1
for each row if the row is an element of 'other'
Parameters
----------
other : list, array.array, str
The variable to search in.
Returns
-------
out : SArray
A binary SArray where a non-zero value denotes that row was
was found in 'other'. And 0 if it is not found.
Examples
--------
>>> SArray(['ab','bc','cd']).is_in('abc')
dtype: int
Rows: 3
[1, 1, 0]
>>> SArray(['a','b','c']).is_in(['a','b'])
dtype: int
Rows: 3
[1, 1, 0]
See Also
--------
contains
"""
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, 'in'))
# XXX: all of these functions are highly repetitive
def __add__(self, other):
"""
If other is a scalar value, adds it to the current array, returning
the new result. If other is an SArray, performs an element-wise
addition of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '+'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '+'))
def __sub__(self, other):
"""
If other is a scalar value, subtracts it from the current array, returning
the new result. If other is an SArray, performs an element-wise
subtraction of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '-'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '-'))
def __mul__(self, other):
"""
If other is a scalar value, multiplies it to the current array, returning
the new result. If other is an SArray, performs an element-wise
multiplication of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '*'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '*'))
def __div__(self, other):
"""
If other is a scalar value, divides each element of the current array
by the value, returning the result. If other is an SArray, performs
an element-wise division of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '/'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '/'))
def __truediv__(self, other):
"""
If other is a scalar value, divides each element of the current array
by the value, returning the result. If other is an SArray, performs
an element-wise division of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '/'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '/'))
def __floordiv__(self, other):
"""
If other is a scalar value, divides each element of the current array
by the value, returning floor of the result. If other is an SArray, performs
an element-wise division of the two arrays returning the floor of the result.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '/')).astype(int)
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '/')).astype(int)
def __pow__(self, other):
"""
If other is a scalar value, raises each element of the current array to
the power of that value, returning floor of the result. If other
is an SArray, performs an element-wise power of the two
arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '**'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '**'))
def __neg__(self):
"""
Returns the negative of each element.
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(0, '-'))
def __pos__(self):
if self.dtype() not in [int, long, float, array.array]:
raise RuntimeError("Runtime Exception. Unsupported type operation. "
"cannot perform operation + on type %s" % str(self.dtype()))
with cython_context():
return SArray(_proxy = self.__proxy__)
def __abs__(self):
"""
Returns the absolute value of each element.
"""
with cython_context():
return SArray(_proxy = self.__proxy__.left_scalar_operator(0, 'left_abs'))
def __mod__(self, other):
"""
Other must be a scalar value. Performs an element wise division remainder.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '%'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '%'))
def __lt__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '<'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '<'))
def __gt__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '>'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '>'))
def __le__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '<='))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '<='))
def __ge__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '>='))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '>='))
def __radd__(self, other):
"""
Adds a scalar value to the current array.
Returned array has the same type as the array on the right hand side
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '+'))
def __rsub__(self, other):
"""
Subtracts a scalar value from the current array.
Returned array has the same type as the array on the right hand side
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '-'))
def __rmul__(self, other):
"""
Multiplies a scalar value to the current array.
Returned array has the same type as the array on the right hand side
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '*'))
def __rdiv__(self, other):
"""
Divides a scalar value by each element in the array
Returned array has the same type as the array on the right hand side
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '/'))
def __rtruediv__(self, other):
"""
Divides a scalar value by each element in the array
Returned array has the same type as the array on the right hand side
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '/'))
def __rfloordiv__(self, other):
"""
Divides a scalar value by each element in the array returning the
floored result. Returned array has the same type as the array on the
right hand side
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '/')).astype(int)
def __rmod__(self, other):
"""
Divides a scalar value by each element in the array returning the
floored result. Returned array has the same type as the array on the
right hand side
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '%'))
def __rpow__(self, other):
"""
Raises each element of the current array to the power of that
value, returning floor of the result.
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '**'))
def __eq__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the new result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '=='))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '=='))
def __ne__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the new result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '!='))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '!='))
def __and__(self, other):
"""
Perform a logical element-wise 'and' against another SArray.
"""
if type(other) is SArray:
with cython_context():
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '&'))
else:
raise TypeError("SArray can only perform logical and against another SArray")
def __or__(self, other):
"""
Perform a logical element-wise 'or' against another SArray.
"""
if type(other) is SArray:
with cython_context():
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '|'))
else:
raise TypeError("SArray can only perform logical or against another SArray")
def __has_size__(self):
"""
Returns whether or not the size of the SArray is known.
"""
return self.__proxy__.has_size()
def __getitem__(self, other):
"""
If the key is an SArray of identical length, this function performs a
logical filter: i.e. it subselects all the elements in this array
where the corresponding value in the other array evaluates to true.
If the key is an integer this returns a single row of
the SArray. If the key is a slice, this returns an SArray with the
sliced rows. See the GraphLab Create User Guide for usage examples.
"""
if isinstance(other, numbers.Integral):
sa_len = len(self)
if other < 0:
other += sa_len
if other >= sa_len:
raise IndexError("SArray index out of range")
try:
lb, ub, value_list = self._getitem_cache
if lb <= other < ub:
return value_list[other - lb]
except AttributeError:
pass
# Not in cache, need to grab it
block_size = 1024 * (32 if self.dtype() in [int, long, float] else 4)
block_num = int(other // block_size)
lb = block_num * block_size
ub = min(sa_len, lb + block_size)
val_list = list(SArray(_proxy = self.__proxy__.copy_range(lb, 1, ub)))
self._getitem_cache = (lb, ub, val_list)
return val_list[other - lb]
elif type(other) is SArray:
if self.__has_size__() and other.__has_size__() and len(other) != len(self):
raise IndexError("Cannot perform logical indexing on arrays of different length.")
with cython_context():
return SArray(_proxy = self.__proxy__.logical_filter(other.__proxy__))
elif type(other) is slice:
sa_len = len(self)
start = other.start
stop = other.stop
step = other.step
if start is None:
start = 0
if stop is None:
stop = sa_len
if step is None:
step = 1
# handle negative indices
if start < 0:
start = sa_len + start
if stop < 0:
stop = sa_len + stop
return SArray(_proxy = self.__proxy__.copy_range(start, step, stop))
else:
raise IndexError("Invalid type to use for indexing")
def materialize(self):
"""
For a SArray that is lazily evaluated, force persist this sarray
to disk, committing all lazy evaluated operations.
"""
return self.__materialize__()
def __materialize__(self):
"""
For a SArray that is lazily evaluated, force persist this sarray
to disk, committing all lazy evaluated operations.
"""
with cython_context():
self.__proxy__.materialize()
def is_materialized(self):
"""
Returns whether or not the sarray has been materialized.
"""
return self.__is_materialized__()
def __is_materialized__(self):
"""
Returns whether or not the sarray has been materialized.
"""
return self.__proxy__.is_materialized()
def size(self):
"""
The size of the SArray.
"""
return self.__proxy__.size()
def dtype(self):
"""
The data type of the SArray.
Returns
-------
out : type
The type of the SArray.
Examples
--------
>>> sa = gl.SArray(["The quick brown fox jumps over the lazy dog."])
>>> sa.dtype()
str
>>> sa = gl.SArray(range(10))
>>> sa.dtype()
int
"""
return self.__proxy__.dtype()
def head(self, n=10):
"""
Returns an SArray which contains the first n rows of this SArray.
Parameters
----------
n : int
The number of rows to fetch.
Returns
-------
out : SArray
A new SArray which contains the first n rows of the current SArray.
Examples
--------
>>> gl.SArray(range(10)).head(5)
dtype: int
Rows: 5
[0, 1, 2, 3, 4]
"""
return SArray(_proxy=self.__proxy__.head(n))
def vector_slice(self, start, end=None):
"""
If this SArray contains vectors or lists, this returns a new SArray
containing each individual element sliced, between start and
end (exclusive).
Parameters
----------
start : int
The start position of the slice.
end : int, optional.
The end position of the slice. Note that the end position
is NOT included in the slice. Thus a g.vector_slice(1,3) will extract
entries in position 1 and 2. If end is not specified, the return
array will contain only one element, the element at the start
position.
Returns
-------
out : SArray
Each individual vector sliced according to the arguments.
Examples
--------
If g is a vector of floats:
>>> g = SArray([[1,2,3],[2,3,4]])
>>> g
dtype: array
Rows: 2
[array('d', [1.0, 2.0, 3.0]), array('d', [2.0, 3.0, 4.0])]
>>> g.vector_slice(0) # extracts the first element of each vector
dtype: float
Rows: 2
[1.0, 2.0]
>>> g.vector_slice(0, 2) # extracts the first two elements of each vector
dtype: array.array
Rows: 2
[array('d', [1.0, 2.0]), array('d', [2.0, 3.0])]
If a vector cannot be sliced, the result will be None:
>>> g = SArray([[1],[1,2],[1,2,3]])
>>> g
dtype: array.array
Rows: 3
[array('d', [1.0]), array('d', [1.0, 2.0]), array('d', [1.0, 2.0, 3.0])]
>>> g.vector_slice(2)
dtype: float
Rows: 3
[None, None, 3.0]
>>> g.vector_slice(0,2)
dtype: list
Rows: 3
[None, array('d', [1.0, 2.0]), array('d', [1.0, 2.0])]
If g is a vector of mixed types (float, int, str, array, list, etc.):
>>> g = SArray([['a',1,1.0],['b',2,2.0]])
>>> g
dtype: list
Rows: 2
[['a', 1, 1.0], ['b', 2, 2.0]]
>>> g.vector_slice(0) # extracts the first element of each vector
dtype: list
Rows: 2
[['a'], ['b']]
"""
if (self.dtype() != array.array) and (self.dtype() != list):
raise RuntimeError("Only Vector type can be sliced")
if end == None:
end = start + 1
with cython_context():
return SArray(_proxy=self.__proxy__.vector_slice(start, end))
def subslice(self, start=None, stop=None, step=None):
"""
This returns an SArray with each element sliced accordingly to the
slice specified. This is conceptually equivalent to:
>>> g.apply(lambda x: x[start:step:stop])
The SArray must be of type list, vector, or string.
For instance:
>>> g = SArray(["abcdef","qwerty"])
>>> g.subslice(start=0, stop=2)
dtype: str
Rows: 2
["ab", "qw"]
>>> g.subslice(3,-1)
dtype: str
Rows: 2
["de", "rt"]
>>> g.subslice(3)
dtype: str
Rows: 2
["def", "rty"]
>>> g = SArray([[1,2,3], [4,5,6]])
>>> g.subslice(0, 1)
dtype: str
Rows: 2
[[1], [4]]
Parameters
----------
start : int or None (default)
The start position of the slice
stop: int or None (default)
The stop position of the slice
step: int or None (default)
The step size of the slice
Returns
-------
out : SArray
Each individual vector/string/list sliced according to the arguments.
"""
if self.dtype() not in [str, array.array, list]:
raise TypeError("SArray must contain strings, arrays or lists")
with cython_context():
return SArray(_proxy=self.__proxy__.subslice(start, step, stop))
def _count_words(self, to_lower=True, delimiters=["\r", "\v", "\n", "\f", "\t", " "]):
"""
This returns an SArray with, for each input string, a dict from the unique,
delimited substrings to their number of occurrences within the original
string.
The SArray must be of type string.
..WARNING:: This function is deprecated, and will be removed in future
versions of GraphLab Create. Please use the `text_analytics.count_words`
function instead.
Parameters
----------
to_lower : bool, optional
"to_lower" indicates whether to map the input strings to lower case
before counts
delimiters: list[string], optional
"delimiters" is a list of which characters to delimit on to find tokens
Returns
-------
out : SArray
for each input string, a dict from the unique, delimited substrings
to their number of occurrences within the original string.
Examples
--------
>>> sa = graphlab.SArray(["The quick brown fox jumps.",
"Word word WORD, word!!!word"])
>>> sa._count_words()
dtype: dict
Rows: 2
[{'quick': 1, 'brown': 1, 'jumps': 1, 'fox': 1, 'the': 1},
{'word': 2, 'word,': 1, 'word!!!word': 1}]
"""
if (self.dtype() != str):
raise TypeError("Only SArray of string type is supported for counting bag of words")
if (not all([len(delim) == 1 for delim in delimiters])):
raise ValueError("Delimiters must be single-character strings")
# construct options, will extend over time
options = dict()
options["to_lower"] = to_lower == True
# defaults to std::isspace whitespace delimiters if no others passed in
options["delimiters"] = delimiters
with cython_context():
return SArray(_proxy=self.__proxy__.count_bag_of_words(options))
def _count_ngrams(self, n=2, method="word", to_lower=True, ignore_space=True):
"""
For documentation, see graphlab.text_analytics.count_ngrams().
..WARNING:: This function is deprecated, and will be removed in future
versions of GraphLab Create. Please use the `text_analytics.count_words`
function instead.
"""
if (self.dtype() != str):
raise TypeError("Only SArray of string type is supported for counting n-grams")
if (type(n) != int):
raise TypeError("Input 'n' must be of type int")
if (n < 1):
raise ValueError("Input 'n' must be greater than 0")
if (n > 5):
warnings.warn("It is unusual for n-grams to be of size larger than 5.")
# construct options, will extend over time
options = dict()
options["to_lower"] = to_lower == True
options["ignore_space"] = ignore_space == True
if method == "word":
with cython_context():
return SArray(_proxy=self.__proxy__.count_ngrams(n, options))
elif method == "character" :
with cython_context():
return SArray(_proxy=self.__proxy__.count_character_ngrams(n, options))
else:
raise ValueError("Invalid 'method' input value. Please input either 'word' or 'character' ")
def dict_trim_by_keys(self, keys, exclude=True):
"""
Filter an SArray of dictionary type by the given keys. By default, all
keys that are in the provided list in ``keys`` are *excluded* from the
returned SArray.
Parameters
----------
keys : list
A collection of keys to trim down the elements in the SArray.
exclude : bool, optional
If True, all keys that are in the input key list are removed. If
False, only keys that are in the input key list are retained.
Returns
-------
out : SArray
A SArray of dictionary type, with each dictionary element trimmed
according to the input criteria.
See Also
--------
dict_trim_by_values
Examples
--------
>>> sa = graphlab.SArray([{"this":1, "is":1, "dog":2},
{"this": 2, "are": 2, "cat": 1}])
>>> sa.dict_trim_by_keys(["this", "is", "and", "are"], exclude=True)
dtype: dict
Rows: 2
[{'dog': 2}, {'cat': 1}]
"""
if not _is_non_string_iterable(keys):
keys = [keys]
with cython_context():
return SArray(_proxy=self.__proxy__.dict_trim_by_keys(keys, exclude))
def dict_trim_by_values(self, lower=None, upper=None):
"""
Filter dictionary values to a given range (inclusive). Trimming is only
performed on values which can be compared to the bound values. Fails on
SArrays whose data type is not ``dict``.
Parameters
----------
lower : int or long or float, optional
The lowest dictionary value that would be retained in the result. If
not given, lower bound is not applied.
upper : int or long or float, optional
The highest dictionary value that would be retained in the result.
If not given, upper bound is not applied.
Returns
-------
out : SArray
An SArray of dictionary type, with each dict element trimmed
according to the input criteria.
See Also
--------
dict_trim_by_keys
Examples
--------
>>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_trim_by_values(2,5)
dtype: dict
Rows: 2
[{'is': 5}, {'this': 2, 'cat': 5}]
>>> sa.dict_trim_by_values(upper=5)
dtype: dict
Rows: 2
[{'this': 1, 'is': 5}, {'this': 2, 'are': 1, 'cat': 5}]
"""
if not (lower is None or isinstance(lower, numbers.Number)):
raise TypeError("lower bound has to be a numeric value")
if not (upper is None or isinstance(upper, numbers.Number)):
raise TypeError("upper bound has to be a numeric value")
with cython_context():
return SArray(_proxy=self.__proxy__.dict_trim_by_values(lower, upper))
def dict_keys(self):
"""
Create an SArray that contains all the keys from each dictionary
element as a list. Fails on SArrays whose data type is not ``dict``.
Returns
-------
out : SArray
A SArray of list type, where each element is a list of keys
from the input SArray element.
See Also
--------
dict_values
Examples
---------
>>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_keys()
dtype: list
Rows: 2
[['this', 'is', 'dog'], ['this', 'are', 'cat']]
"""
with cython_context():
return SArray(_proxy=self.__proxy__.dict_keys())
def dict_values(self):
"""
Create an SArray that contains all the values from each dictionary
element as a list. Fails on SArrays whose data type is not ``dict``.
Returns
-------
out : SArray
A SArray of list type, where each element is a list of values
from the input SArray element.
See Also
--------
dict_keys
Examples
--------
>>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_values()
dtype: list
Rows: 2
[[1, 5, 7], [2, 1, 5]]
"""
with cython_context():
return SArray(_proxy=self.__proxy__.dict_values())
def dict_has_any_keys(self, keys):
"""
Create a boolean SArray by checking the keys of an SArray of
dictionaries. An element of the output SArray is True if the
corresponding input element's dictionary has any of the given keys.
Fails on SArrays whose data type is not ``dict``.
Parameters
----------
keys : list
A list of key values to check each dictionary against.
Returns
-------
out : SArray
A SArray of int type, where each element indicates whether the
input SArray element contains any key in the input list.
See Also
--------
dict_has_all_keys
Examples
--------
>>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7}, {"animal":1},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_has_any_keys(["is", "this", "are"])
dtype: int
Rows: 3
[1, 0, 1]
"""
if not _is_non_string_iterable(keys):
keys = [keys]
with cython_context():
return SArray(_proxy=self.__proxy__.dict_has_any_keys(keys))
def dict_has_all_keys(self, keys):
"""
Create a boolean SArray by checking the keys of an SArray of
dictionaries. An element of the output SArray is True if the
corresponding input element's dictionary has all of the given keys.
Fails on SArrays whose data type is not ``dict``.
Parameters
----------
keys : list
A list of key values to check each dictionary against.
Returns
-------
out : SArray
A SArray of int type, where each element indicates whether the
input SArray element contains all keys in the input list.
See Also
--------
dict_has_any_keys
Examples
--------
>>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_has_all_keys(["is", "this"])
dtype: int
Rows: 2
[1, 0]
"""
if not _is_non_string_iterable(keys):
keys = [keys]
with cython_context():
return SArray(_proxy=self.__proxy__.dict_has_all_keys(keys))
def apply(self, fn, dtype=None, skip_undefined=True, seed=None):
"""
apply(fn, dtype=None, skip_undefined=True, seed=None)
Transform each element of the SArray by a given function. The result
SArray is of type ``dtype``. ``fn`` should be a function that returns
exactly one value which can be cast into the type specified by
``dtype``. If ``dtype`` is not specified, the first 100 elements of the
SArray are used to make a guess about the data type.
Parameters
----------
fn : function
The function to transform each element. Must return exactly one
value which can be cast into the type specified by ``dtype``.
This can also be a toolkit extension function which is compiled
as a native shared library using SDK.
dtype : {None, int, float, str, list, array.array, dict, graphlab.Image}, optional
The data type of the new SArray. If ``None``, the first 100 elements
of the array are used to guess the target data type.
skip_undefined : bool, optional
If True, will not apply ``fn`` to any undefined values.
seed : int, optional
Used as the seed if a random number generator is included in ``fn``.
Returns
-------
out : SArray
The SArray transformed by ``fn``. Each element of the SArray is of
type ``dtype``.
See Also
--------
SFrame.apply
Examples
--------
>>> sa = graphlab.SArray([1,2,3])
>>> sa.apply(lambda x: x*2)
dtype: int
Rows: 3
[2, 4, 6]
Using native toolkit extension function:
.. code-block:: c++
#include <graphlab/sdk/toolkit_function_macros.hpp>
#include <cmath>
using namespace graphlab;
double logx(const flexible_type& x, double base) {
return log((double)(x)) / log(base);
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(logx, "x", "base");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> import example
>>> sa = graphlab.SArray([1,2,4])
>>> sa.apply(lambda x: example.logx(x, 2))
dtype: float
Rows: 3
[0.0, 1.0, 2.0]
"""
assert callable(fn), "Input function must be callable."
dryrun = [fn(i) for i in self.head(100) if i is not None]
if dtype == None:
dtype = infer_type_of_list(dryrun)
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
# log metric
# First phase test if it is a toolkit function
nativefn = None
try:
from .. import extensions
nativefn = extensions._build_native_function_call(fn)
except:
# failure are fine. we just fall out into the next few phases
pass
if nativefn is not None:
# this is a toolkit lambda. We can do something about it
nativefn.native_fn_name = nativefn.native_fn_name.encode()
with cython_context():
return SArray(_proxy=self.__proxy__.transform_native(nativefn, dtype, skip_undefined, seed))
with cython_context():
return SArray(_proxy=self.__proxy__.transform(fn, dtype, skip_undefined, seed))
def filter(self, fn, skip_undefined=True, seed=None):
"""
Filter this SArray by a function.
Returns a new SArray filtered by this SArray. If `fn` evaluates an
element to true, this element is copied to the new SArray. If not, it
isn't. Throws an exception if the return type of `fn` is not castable
to a boolean value.
Parameters
----------
fn : function
Function that filters the SArray. Must evaluate to bool or int.
skip_undefined : bool, optional
If True, will not apply fn to any undefined values.
seed : int, optional
Used as the seed if a random number generator is included in fn.
Returns
-------
out : SArray
The SArray filtered by fn. Each element of the SArray is of
type int.
Examples
--------
>>> sa = graphlab.SArray([1,2,3])
>>> sa.filter(lambda x: x < 3)
dtype: int
Rows: 2
[1, 2]
"""
assert callable(fn), "Input must be callable"
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
with cython_context():
return SArray(_proxy=self.__proxy__.filter(fn, skip_undefined, seed))
def sample(self, fraction, seed=None):
"""
Create an SArray which contains a subsample of the current SArray.
Parameters
----------
fraction : float
The fraction of the rows to fetch. Must be between 0 and 1.
seed : int
The random seed for the random number generator.
Returns
-------
out : SArray
The new SArray which contains the subsampled rows.
Examples
--------
>>> sa = graphlab.SArray(range(10))
>>> sa.sample(.3)
dtype: int
Rows: 3
[2, 6, 9]
"""
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
if (self.size() == 0):
return SArray()
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
with cython_context():
return SArray(_proxy=self.__proxy__.sample(fraction, seed))
def hash(self, seed=0):
"""
Returns an SArray with a hash of each element. seed can be used
to change the hash function to allow this method to be used for
random number generation.
Parameters
----------
seed : int
Defaults to 0. Can be changed to different values to get
different hash results.
Returns
-------
out : SArray
An integer SArray with a hash value for each element. Identical
elements are hashed to the same value
"""
with cython_context():
return SArray(_proxy=self.__proxy__.hash(seed))
@classmethod
def random_integers(cls, size, seed=None):
"""
Returns an SArray with random integer values.
"""
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
return cls.from_sequence(size).hash(seed)
def _save_as_text(self, url):
"""
Save the SArray to disk as text file.
"""
raise NotImplementedError
def all(self):
"""
Return True if every element of the SArray evaluates to True. For
numeric SArrays zeros and missing values (``None``) evaluate to False,
while all non-zero, non-missing values evaluate to True. For string,
list, and dictionary SArrays, empty values (zero length strings, lists
or dictionaries) or missing values (``None``) evaluate to False. All
other values evaluate to True.
Returns True on an empty SArray.
Returns
-------
out : bool
See Also
--------
any
Examples
--------
>>> graphlab.SArray([1, None]).all()
False
>>> graphlab.SArray([1, 0]).all()
False
>>> graphlab.SArray([1, 2]).all()
True
>>> graphlab.SArray(["hello", "world"]).all()
True
>>> graphlab.SArray(["hello", ""]).all()
False
>>> graphlab.SArray([]).all()
True
"""
with cython_context():
return self.__proxy__.all()
def any(self):
"""
Return True if any element of the SArray evaluates to True. For numeric
SArrays any non-zero value evaluates to True. For string, list, and
dictionary SArrays, any element of non-zero length evaluates to True.
Returns False on an empty SArray.
Returns
-------
out : bool
See Also
--------
all
Examples
--------
>>> graphlab.SArray([1, None]).any()
True
>>> graphlab.SArray([1, 0]).any()
True
>>> graphlab.SArray([0, 0]).any()
False
>>> graphlab.SArray(["hello", "world"]).any()
True
>>> graphlab.SArray(["hello", ""]).any()
True
>>> graphlab.SArray(["", ""]).any()
False
>>> graphlab.SArray([]).any()
False
"""
with cython_context():
return self.__proxy__.any()
def max(self):
"""
Get maximum numeric value in SArray.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type.
Returns
-------
out : type of SArray
Maximum value of SArray
See Also
--------
min
Examples
--------
>>> graphlab.SArray([14, 62, 83, 72, 77, 96, 5, 25, 69, 66]).max()
96
"""
with cython_context():
return self.__proxy__.max()
def min(self):
"""
Get minimum numeric value in SArray.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type.
Returns
-------
out : type of SArray
Minimum value of SArray
See Also
--------
max
Examples
--------
>>> graphlab.SArray([14, 62, 83, 72, 77, 96, 5, 25, 69, 66]).min()
"""
with cython_context():
return self.__proxy__.min()
def argmax(self):
"""
Get the index of the maximum numeric value in SArray.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type.
Returns
-------
out : int
Index of the maximum value of SArray
See Also
--------
argmin
Examples
--------
>>> graphlab.SArray([14, 62, 83, 72, 77, 96, 5, 25, 69, 66]).argmax()
"""
from .sframe import SFrame as _SFrame
if len(self) == 0:
return None
if not any([isinstance(self[0], i) for i in [int,float,long]]):
raise TypeError("SArray must be of type 'int', 'long', or 'float'.")
sf = _SFrame(self).add_row_number()
sf_out = sf.groupby(key_columns=[],operations={'maximum_x1': _aggregate.ARGMAX('X1','id')})
return sf_out['maximum_x1'][0]
def argmin(self):
"""
Get the index of the minimum numeric value in SArray.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type.
Returns
-------
out : int
index of the minimum value of SArray
See Also
--------
argmax
Examples
--------
>>> graphlab.SArray([14, 62, 83, 72, 77, 96, 5, 25, 69, 66]).argmin()
"""
from .sframe import SFrame as _SFrame
if len(self) == 0:
return None
if not any([isinstance(self[0], i) for i in [int,float,long]]):
raise TypeError("SArray must be of type 'int', 'long', or 'float'.")
sf = _SFrame(self).add_row_number()
sf_out = sf.groupby(key_columns=[],operations={'minimum_x1': _aggregate.ARGMIN('X1','id')})
return sf_out['minimum_x1'][0]
def sum(self):
"""
Sum of all values in this SArray.
Raises an exception if called on an SArray of strings, lists, or
dictionaries. If the SArray contains numeric arrays (array.array) and
all the arrays are the same length, the sum over all the arrays will be
returned. Returns None on an empty SArray. For large values, this may
overflow without warning.
Returns
-------
out : type of SArray
Sum of all values in SArray
"""
with cython_context():
return self.__proxy__.sum()
def mean(self):
"""
Mean of all the values in the SArray, or mean image.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type or non-Image type.
Returns
-------
out : float | graphlab.Image
Mean of all values in SArray, or image holding per-pixel mean
across the input SArray.
"""
with cython_context():
if self.dtype() == _Image:
from .. import extensions
return extensions.generate_mean(self)
else:
return self.__proxy__.mean()
def std(self, ddof=0):
"""
Standard deviation of all the values in the SArray.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type or if `ddof` >= length of SArray.
Parameters
----------
ddof : int, optional
"delta degrees of freedom" in the variance calculation.
Returns
-------
out : float
The standard deviation of all the values.
"""
with cython_context():
return self.__proxy__.std(ddof)
def var(self, ddof=0):
"""
Variance of all the values in the SArray.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type or if `ddof` >= length of SArray.
Parameters
----------
ddof : int, optional
"delta degrees of freedom" in the variance calculation.
Returns
-------
out : float
Variance of all values in SArray.
"""
with cython_context():
return self.__proxy__.var(ddof)
def num_missing(self):
"""
Number of missing elements in the SArray.
Returns
-------
out : int
Number of missing values.
"""
with cython_context():
return self.__proxy__.num_missing()
def nnz(self):
"""
Number of non-zero elements in the SArray.
Returns
-------
out : int
Number of non-zero elements.
"""
with cython_context():
return self.__proxy__.nnz()
def datetime_to_str(self,str_format="%Y-%m-%dT%H:%M:%S%ZP"):
"""
Create a new SArray with all the values cast to str. The string format is
specified by the 'str_format' parameter.
Parameters
----------
str_format : str
The format to output the string. Default format is "%Y-%m-%dT%H:%M:%S%ZP".
Returns
-------
out : SArray[str]
The SArray converted to the type 'str'.
Examples
--------
>>> dt = datetime.datetime(2011, 10, 20, 9, 30, 10, tzinfo=GMT(-5))
>>> sa = graphlab.SArray([dt])
>>> sa.datetime_to_str("%e %b %Y %T %ZP")
dtype: str
Rows: 1
[20 Oct 2011 09:30:10 GMT-05:00]
See Also
----------
str_to_datetime
References
----------
[1] Boost date time from string conversion guide (http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html)
"""
if(self.dtype() != datetime.datetime):
raise TypeError("datetime_to_str expects SArray of datetime as input SArray")
with cython_context():
return SArray(_proxy=self.__proxy__.datetime_to_str(str_format))
def str_to_datetime(self,str_format="%Y-%m-%dT%H:%M:%S%ZP"):
"""
Create a new SArray with all the values cast to datetime. The string format is
specified by the 'str_format' parameter.
Parameters
----------
str_format : str
The string format of the input SArray. Default format is "%Y-%m-%dT%H:%M:%S%ZP".
If str_format is "ISO", the the format is "%Y%m%dT%H%M%S%F%q"
Returns
-------
out : SArray[datetime.datetime]
The SArray converted to the type 'datetime'.
Examples
--------
>>> sa = graphlab.SArray(["20-Oct-2011 09:30:10 GMT-05:30"])
>>> sa.str_to_datetime("%d-%b-%Y %H:%M:%S %ZP")
dtype: datetime
Rows: 1
datetime.datetime(2011, 10, 20, 9, 30, 10, tzinfo=GMT(-5.5))
See Also
----------
datetime_to_str
References
----------
[1] boost date time to string conversion guide (http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html)
"""
if(self.dtype() != str):
raise TypeError("str_to_datetime expects SArray of str as input SArray")
with cython_context():
return SArray(_proxy=self.__proxy__.str_to_datetime(str_format))
def pixel_array_to_image(self, width, height, channels, undefined_on_failure=True, allow_rounding=False):
"""
Create a new SArray with all the values cast to :py:class:`graphlab.image.Image`
of uniform size.
Parameters
----------
width: int
The width of the new images.
height: int
The height of the new images.
channels: int.
Number of channels of the new images.
undefined_on_failure: bool , optional , default True
If True, return None type instead of Image type in failure instances.
If False, raises error upon failure.
allow_rounding: bool, optional , default False
If True, rounds non-integer values when converting to Image type.
If False, raises error upon rounding.
Returns
-------
out : SArray[graphlab.Image]
The SArray converted to the type 'graphlab.Image'.
See Also
--------
astype, str_to_datetime, datetime_to_str
Examples
--------
The MNIST data is scaled from 0 to 1, but our image type only loads integer pixel values
from 0 to 255. If we just convert without scaling, all values below one would be cast to
0.
>>> mnist_array = graphlab.SArray('http://s3.amazonaws.com/dato-datasets/mnist/mnist_vec_sarray')
>>> scaled_mnist_array = mnist_array * 255
>>> mnist_img_sarray = gl.SArray.pixel_array_to_image(scaled_mnist_array, 28, 28, 1, allow_rounding = True)
"""
if(self.dtype() != array.array):
raise TypeError("array_to_img expects SArray of arrays as input SArray")
num_to_test = 10
num_test = min(self.size(), num_to_test)
mod_values = [val % 1 for x in range(num_test) for val in self[x]]
out_of_range_values = [(val > 255 or val < 0) for x in range(num_test) for val in self[x]]
if sum(mod_values) != 0.0 and not allow_rounding:
raise ValueError("There are non-integer values in the array data. Images only support integer data values between 0 and 255. To permit rounding, set the 'allow_rounding' paramter to 1.")
if sum(out_of_range_values) != 0:
raise ValueError("There are values outside the range of 0 to 255. Images only support integer data values between 0 and 255.")
from .. import extensions
return extensions.vector_sarray_to_image_sarray(self, width, height, channels, undefined_on_failure)
def astype(self, dtype, undefined_on_failure=False):
"""
Create a new SArray with all values cast to the given type. Throws an
exception if the types are not castable to the given type.
Parameters
----------
dtype : {int, float, str, list, array.array, dict, datetime.datetime}
The type to cast the elements to in SArray
undefined_on_failure: bool, optional
If set to True, runtime cast failures will be emitted as missing
values rather than failing.
Returns
-------
out : SArray [dtype]
The SArray converted to the type ``dtype``.
Notes
-----
- The string parsing techniques used to handle conversion to dictionary
and list types are quite generic and permit a variety of interesting
formats to be interpreted. For instance, a JSON string can usually be
interpreted as a list or a dictionary type. See the examples below.
- For datetime-to-string and string-to-datetime conversions,
use sa.datetime_to_str() and sa.str_to_datetime() functions.
- For array.array to graphlab.Image conversions, use sa.pixel_array_to_image()
Examples
--------
>>> sa = graphlab.SArray(['1','2','3','4'])
>>> sa.astype(int)
dtype: int
Rows: 4
[1, 2, 3, 4]
Given an SArray of strings that look like dicts, convert to a dictionary
type:
>>> sa = graphlab.SArray(['{1:2 3:4}', '{a:b c:d}'])
>>> sa.astype(dict)
dtype: dict
Rows: 2
[{1: 2, 3: 4}, {'a': 'b', 'c': 'd'}]
"""
if (dtype == _Image) and (self.dtype() == array.array):
raise TypeError("Cannot cast from image type to array with sarray.astype(). Please use sarray.pixel_array_to_img() instead.")
with cython_context():
return SArray(_proxy=self.__proxy__.astype(dtype, undefined_on_failure))
def clip(self, lower=float('nan'), upper=float('nan')):
"""
Create a new SArray with each value clipped to be within the given
bounds.
In this case, "clipped" means that values below the lower bound will be
set to the lower bound value. Values above the upper bound will be set
to the upper bound value. This function can operate on SArrays of
numeric type as well as array type, in which case each individual
element in each array is clipped. By default ``lower`` and ``upper`` are
set to ``float('nan')`` which indicates the respective bound should be
ignored. The method fails if invoked on an SArray of non-numeric type.
Parameters
----------
lower : int, optional
The lower bound used to clip. Ignored if equal to ``float('nan')``
(the default).
upper : int, optional
The upper bound used to clip. Ignored if equal to ``float('nan')``
(the default).
Returns
-------
out : SArray
See Also
--------
clip_lower, clip_upper
Examples
--------
>>> sa = graphlab.SArray([1,2,3])
>>> sa.clip(2,2)
dtype: int
Rows: 3
[2, 2, 2]
"""
with cython_context():
return SArray(_proxy=self.__proxy__.clip(lower, upper))
def clip_lower(self, threshold):
"""
Create new SArray with all values clipped to the given lower bound. This
function can operate on numeric arrays, as well as vector arrays, in
which case each individual element in each vector is clipped. Throws an
exception if the SArray is empty or the types are non-numeric.
Parameters
----------
threshold : float
The lower bound used to clip values.
Returns
-------
out : SArray
See Also
--------
clip, clip_upper
Examples
--------
>>> sa = graphlab.SArray([1,2,3])
>>> sa.clip_lower(2)
dtype: int
Rows: 3
[2, 2, 3]
"""
with cython_context():
return SArray(_proxy=self.__proxy__.clip(threshold, float('nan')))
def clip_upper(self, threshold):
"""
Create new SArray with all values clipped to the given upper bound. This
function can operate on numeric arrays, as well as vector arrays, in
which case each individual element in each vector is clipped.
Parameters
----------
threshold : float
The upper bound used to clip values.
Returns
-------
out : SArray
See Also
--------
clip, clip_lower
Examples
--------
>>> sa = graphlab.SArray([1,2,3])
>>> sa.clip_upper(2)
dtype: int
Rows: 3
[1, 2, 2]
"""
with cython_context():
return SArray(_proxy=self.__proxy__.clip(float('nan'), threshold))
def tail(self, n=10):
"""
Get an SArray that contains the last n elements in the SArray.
Parameters
----------
n : int
The number of elements to fetch
Returns
-------
out : SArray
A new SArray which contains the last n rows of the current SArray.
"""
with cython_context():
return SArray(_proxy=self.__proxy__.tail(n))
def dropna(self):
"""
Create new SArray containing only the non-missing values of the
SArray.
A missing value shows up in an SArray as 'None'. This will also drop
float('nan').
Returns
-------
out : SArray
The new SArray with missing values removed.
"""
with cython_context():
return SArray(_proxy = self.__proxy__.drop_missing_values())
def fillna(self, value):
"""
Create new SArray with all missing values (None or NaN) filled in
with the given value.
The size of the new SArray will be the same as the original SArray. If
the given value is not the same type as the values in the SArray,
`fillna` will attempt to convert the value to the original SArray's
type. If this fails, an error will be raised.
Parameters
----------
value : type convertible to SArray's type
The value used to replace all missing values
Returns
-------
out : SArray
A new SArray with all missing values filled
"""
with cython_context():
return SArray(_proxy = self.__proxy__.fill_missing_values(value))
def topk_index(self, topk=10, reverse=False):
"""
Create an SArray indicating which elements are in the top k.
Entries are '1' if the corresponding element in the current SArray is a
part of the top k elements, and '0' if that corresponding element is
not. Order is descending by default.
Parameters
----------
topk : int
The number of elements to determine if 'top'
reverse : bool
If True, return the topk elements in ascending order
Returns
-------
out : SArray (of type int)
Notes
-----
This is used internally by SFrame's topk function.
"""
with cython_context():
return SArray(_proxy = self.__proxy__.topk_index(topk, reverse))
def sketch_summary(self, background=False, sub_sketch_keys=None):
"""
Summary statistics that can be calculated with one pass over the SArray.
Returns a graphlab.Sketch object which can be further queried for many
descriptive statistics over this SArray. Many of the statistics are
approximate. See the :class:`~graphlab.Sketch` documentation for more
detail.
Parameters
----------
background : boolean, optional
If True, the sketch construction will return immediately and the
sketch will be constructed in the background. While this is going on,
the sketch can be queried incrementally, but at a performance penalty.
Defaults to False.
sub_sketch_keys : int | str | list of int | list of str, optional
For SArray of dict type, also constructs sketches for a given set of keys,
For SArray of array type, also constructs sketches for the given indexes.
The sub sketches may be queried using: :py:func:`~graphlab.Sketch.element_sub_sketch()`.
Defaults to None in which case no subsketches will be constructed.
Returns
-------
out : Sketch
Sketch object that contains descriptive statistics for this SArray.
Many of the statistics are approximate.
"""
from ..data_structures.sketch import Sketch
if (self.dtype() == _Image):
raise TypeError("sketch_summary() is not supported for arrays of image type")
if (type(background) != bool):
raise TypeError("'background' parameter has to be a boolean value")
if (sub_sketch_keys != None):
if (self.dtype() != dict and self.dtype() != array.array):
raise TypeError("sub_sketch_keys is only supported for SArray of dictionary or array type")
if not _is_non_string_iterable(sub_sketch_keys):
sub_sketch_keys = [sub_sketch_keys]
value_types = set([type(i) for i in sub_sketch_keys])
if (len(value_types) != 1):
raise ValueError("sub_sketch_keys member values need to have the same type.")
value_type = value_types.pop();
if (self.dtype() == dict and value_type != str):
raise TypeError("Only string value(s) can be passed to sub_sketch_keys for SArray of dictionary type. "+
"For dictionary types, sketch summary is computed by casting keys to string values.")
if (self.dtype() == array.array and value_type != int):
raise TypeError("Only int value(s) can be passed to sub_sketch_keys for SArray of array type")
else:
sub_sketch_keys = list()
return Sketch(self, background, sub_sketch_keys = sub_sketch_keys)
def append(self, other):
"""
Append an SArray to the current SArray. Creates a new SArray with the
rows from both SArrays. Both SArrays must be of the same type.
Parameters
----------
other : SArray
Another SArray whose rows are appended to current SArray.
Returns
-------
out : SArray
A new SArray that contains rows from both SArrays, with rows from
the ``other`` SArray coming after all rows from the current SArray.
See Also
--------
SFrame.append
Examples
--------
>>> sa = graphlab.SArray([1, 2, 3])
>>> sa2 = graphlab.SArray([4, 5, 6])
>>> sa.append(sa2)
dtype: int
Rows: 6
[1, 2, 3, 4, 5, 6]
"""
if type(other) is not SArray:
raise RuntimeError("SArray append can only work with SArray")
if self.dtype() != other.dtype():
raise RuntimeError("Data types in both SArrays have to be the same")
with cython_context():
return SArray(_proxy = self.__proxy__.append(other.__proxy__))
def unique(self):
"""
Get all unique values in the current SArray.
Raises a TypeError if the SArray is of dictionary type. Will not
necessarily preserve the order of the given SArray in the new SArray.
Returns
-------
out : SArray
A new SArray that contains the unique values of the current SArray.
See Also
--------
SFrame.unique
"""
from .sframe import SFrame as _SFrame
tmp_sf = _SFrame()
tmp_sf.add_column(self, 'X1')
res = tmp_sf.groupby('X1',{})
return SArray(_proxy=res['X1'].__proxy__)
def show(self, view=None):
"""
show(view=None)
Visualize the SArray with GraphLab Create :mod:`~graphlab.canvas`. This function starts Canvas
if it is not already running. If the SArray has already been plotted,
this function will update the plot.
Parameters
----------
view : str, optional
The name of the SFrame view to show. Can be one of:
- None: Use the default (depends on the dtype of the SArray).
- 'Categorical': Shows most frequent items in this SArray, sorted
by frequency. Only valid for str, int, or float dtypes.
- 'Numeric': Shows a histogram (distribution of values) for the
SArray. Only valid for int or float dtypes.
- 'Dictionary': Shows a cross filterable list of keys (categorical)
and values (categorical or numeric). Only valid for dict dtype.
- 'Array': Shows a Numeric view, filterable by sub-column (index).
Only valid for array.array dtype.
- 'List': Shows a Categorical view, aggregated across all sub-
columns (indices). Only valid for list dtype.
Returns
-------
view : graphlab.canvas.view.View
An object representing the GraphLab Canvas view
See Also
--------
canvas
Examples
--------
Suppose 'sa' is an SArray, we can view it in GraphLab Canvas using:
>>> sa.show()
If 'sa' is a numeric (int or float) SArray, we can view it as
a categorical variable using:
>>> sa.show(view='Categorical')
"""
from ..visualization.show import show
show(self, view=view)
def item_length(self):
"""
Length of each element in the current SArray.
Only works on SArrays of dict, array, or list type. If a given element
is a missing value, then the output elements is also a missing value.
This function is equivalent to the following but more performant:
sa_item_len = sa.apply(lambda x: len(x) if x is not None else None)
Returns
-------
out_sf : SArray
A new SArray, each element in the SArray is the len of the corresponding
items in original SArray.
Examples
--------
>>> sa = SArray([
... {"is_restaurant": 1, "is_electronics": 0},
... {"is_restaurant": 1, "is_retail": 1, "is_electronics": 0},
... {"is_restaurant": 0, "is_retail": 1, "is_electronics": 0},
... {"is_restaurant": 0},
... {"is_restaurant": 1, "is_electronics": 1},
... None])
>>> sa.item_length()
dtype: int
Rows: 6
[2, 3, 3, 1, 2, None]
"""
if (self.dtype() not in [list, dict, array.array]):
raise TypeError("item_length() is only applicable for SArray of type list, dict and array.")
with cython_context():
return SArray(_proxy = self.__proxy__.item_length())
def split_datetime(self, column_name_prefix = "X", limit=None, tzone=False):
"""
Splits an SArray of datetime type to multiple columns, return a
new SFrame that contains expanded columns. A SArray of datetime will be
split by default into an SFrame of 6 columns, one for each
year/month/day/hour/minute/second element.
**Column Naming**
When splitting a SArray of datetime type, new columns are named:
prefix.year, prefix.month, etc. The prefix is set by the parameter
"column_name_prefix" and defaults to 'X'. If column_name_prefix is
None or empty, then no prefix is used.
**Timezone Column**
If tzone parameter is True, then timezone information is represented
as one additional column which is a float shows the offset from
GMT(0.0) or from UTC.
Parameters
----------
column_name_prefix: str, optional
If provided, expanded column names would start with the given prefix.
Defaults to "X".
limit: list[str], optional
Limits the set of datetime elements to expand.
Possible values are 'year','month','day','hour','minute','second',
'weekday', 'isoweekday', 'tmweekday', and 'us'.
If not provided, only ['year','month','day','hour','minute','second']
are expanded.
- 'year': The year number
- 'month': A value between 1 and 12 where 1 is January.
- 'day': Day of the months. Begins at 1.
- 'hour': Hours since midnight.
- 'minute': Minutes after the hour.
- 'second': Seconds after the minute.
- 'us': Microseconds after the second. Between 0 and 999,999.
- 'weekday': A value between 0 and 6 where 0 is Monday.
- 'isoweekday': A value between 1 and 7 where 1 is Monday.
- 'tmweekday': A value between 0 and 7 where 0 is Sunday
tzone: bool, optional
A boolean parameter that determines whether to show timezone column or not.
Defaults to False.
Returns
-------
out : SFrame
A new SFrame that contains all expanded columns
Examples
--------
To expand only day and year elements of a datetime SArray
>>> sa = SArray(
[datetime(2011, 1, 21, 7, 7, 21, tzinfo=GMT(0)),
datetime(2010, 2, 5, 7, 8, 21, tzinfo=GMT(4.5)])
>>> sa.split_datetime(column_name_prefix=None,limit=['day','year'])
Columns:
day int
year int
Rows: 2
Data:
+-------+--------+
| day | year |
+-------+--------+
| 21 | 2011 |
| 5 | 2010 |
+-------+--------+
[2 rows x 2 columns]
To expand only year and tzone elements of a datetime SArray
with tzone column represented as a string. Columns are named with prefix:
'Y.column_name'.
>>> sa.split_datetime(column_name_prefix="Y",limit=['year'],tzone=True)
Columns:
Y.year int
Y.tzone float
Rows: 2
Data:
+----------+---------+
| Y.year | Y.tzone |
+----------+---------+
| 2011 | 0.0 |
| 2010 | 4.5 |
+----------+---------+
[2 rows x 2 columns]
"""
from .sframe import SFrame as _SFrame
if self.dtype() != datetime.datetime:
raise TypeError("Only column of datetime type is supported.")
if column_name_prefix == None:
column_name_prefix = ""
if type(column_name_prefix) != str:
raise TypeError("'column_name_prefix' must be a string")
# convert limit to column_keys
if limit != None:
if not _is_non_string_iterable(limit):
raise TypeError("'limit' must be a list");
name_types = set([type(i) for i in limit])
if (len(name_types) != 1):
raise TypeError("'limit' contains values that are different types")
if (name_types.pop() != str):
raise TypeError("'limit' must contain string values.")
if len(set(limit)) != len(limit):
raise ValueError("'limit' contains duplicate values")
column_types = []
if(limit == None):
limit = ['year','month','day','hour','minute','second']
column_types = [int] * len(limit)
if(tzone == True):
limit += ['tzone']
column_types += [float]
with cython_context():
return _SFrame(_proxy=self.__proxy__.expand(column_name_prefix, limit, column_types))
def unpack(self, column_name_prefix = "X", column_types=None, na_value=None, limit=None):
"""
Convert an SArray of list, array, or dict type to an SFrame with
multiple columns.
`unpack` expands an SArray using the values of each list/array/dict as
elements in a new SFrame of multiple columns. For example, an SArray of
lists each of length 4 will be expanded into an SFrame of 4 columns,
one for each list element. An SArray of lists/arrays of varying size
will be expand to a number of columns equal to the longest list/array.
An SArray of dictionaries will be expanded into as many columns as
there are keys.
When unpacking an SArray of list or array type, new columns are named:
`column_name_prefix`.0, `column_name_prefix`.1, etc. If unpacking a
column of dict type, unpacked columns are named
`column_name_prefix`.key1, `column_name_prefix`.key2, etc.
When unpacking an SArray of list or dictionary types, missing values in
the original element remain as missing values in the resultant columns.
If the `na_value` parameter is specified, all values equal to this
given value are also replaced with missing values. In an SArray of
array.array type, NaN is interpreted as a missing value.
:py:func:`graphlab.SFrame.pack_columns()` is the reverse effect of unpack
Parameters
----------
column_name_prefix: str, optional
If provided, unpacked column names would start with the given prefix.
column_types: list[type], optional
Column types for the unpacked columns. If not provided, column
types are automatically inferred from first 100 rows. Defaults to
None.
na_value: optional
Convert all values that are equal to `na_value` to
missing value if specified.
limit: list, optional
Limits the set of list/array/dict keys to unpack.
For list/array SArrays, 'limit' must contain integer indices.
For dict SArray, 'limit' must contain dictionary keys.
Returns
-------
out : SFrame
A new SFrame that contains all unpacked columns
Examples
--------
To unpack a dict SArray
>>> sa = SArray([{ 'word': 'a', 'count': 1},
... { 'word': 'cat', 'count': 2},
... { 'word': 'is', 'count': 3},
... { 'word': 'coming','count': 4}])
Normal case of unpacking SArray of type dict:
>>> sa.unpack(column_name_prefix=None)
Columns:
count int
word str
<BLANKLINE>
Rows: 4
<BLANKLINE>
Data:
+-------+--------+
| count | word |
+-------+--------+
| 1 | a |
| 2 | cat |
| 3 | is |
| 4 | coming |
+-------+--------+
[4 rows x 2 columns]
<BLANKLINE>
Unpack only keys with 'word':
>>> sa.unpack(limit=['word'])
Columns:
X.word str
<BLANKLINE>
Rows: 4
<BLANKLINE>
Data:
+--------+
| X.word |
+--------+
| a |
| cat |
| is |
| coming |
+--------+
[4 rows x 1 columns]
<BLANKLINE>
>>> sa2 = SArray([
... [1, 0, 1],
... [1, 1, 1],
... [0, 1]])
Convert all zeros to missing values:
>>> sa2.unpack(column_types=[int, int, int], na_value=0)
Columns:
X.0 int
X.1 int
X.2 int
<BLANKLINE>
Rows: 3
<BLANKLINE>
Data:
+------+------+------+
| X.0 | X.1 | X.2 |
+------+------+------+
| 1 | None | 1 |
| 1 | 1 | 1 |
| None | 1 | None |
+------+------+------+
[3 rows x 3 columns]
<BLANKLINE>
"""
from .sframe import SFrame as _SFrame
if self.dtype() not in [dict, array.array, list]:
raise TypeError("Only SArray of dict/list/array type supports unpack")
if column_name_prefix == None:
column_name_prefix = ""
if type(column_name_prefix) != str:
raise TypeError("'column_name_prefix' must be a string")
# validdate 'limit'
if limit != None:
if (not _is_non_string_iterable(limit)):
raise TypeError("'limit' must be a list");
name_types = set([type(i) for i in limit])
if (len(name_types) != 1):
raise TypeError("'limit' contains values that are different types")
# limit value should be numeric if unpacking sarray.array value
if (self.dtype() != dict) and (name_types.pop() != int):
raise TypeError("'limit' must contain integer values.")
if len(set(limit)) != len(limit):
raise ValueError("'limit' contains duplicate values")
if (column_types != None):
if not _is_non_string_iterable(column_types):
raise TypeError("column_types must be a list");
for column_type in column_types:
if (column_type not in (int, float, str, list, dict, array.array)):
raise TypeError("column_types contains unsupported types. Supported types are ['float', 'int', 'list', 'dict', 'str', 'array.array']")
if limit != None:
if len(limit) != len(column_types):
raise ValueError("limit and column_types do not have the same length")
elif self.dtype() == dict:
raise ValueError("if 'column_types' is given, 'limit' has to be provided to unpack dict type.")
else:
limit = range(len(column_types))
else:
head_rows = self.head(100).dropna()
lengths = [len(i) for i in head_rows]
if len(lengths) == 0 or max(lengths) == 0:
raise RuntimeError("Cannot infer number of items from the SArray, SArray may be empty. please explicitly provide column types")
# infer column types for dict type at server side, for list and array, infer from client side
if self.dtype() != dict:
length = max(lengths)
if limit == None:
limit = range(length)
else:
# adjust the length
length = len(limit)
if self.dtype() == array.array:
column_types = [float for i in range(length)]
else:
column_types = list()
for i in limit:
t = [(x[i] if ((x is not None) and len(x) > i) else None) for x in head_rows]
column_types.append(infer_type_of_list(t))
with cython_context():
if (self.dtype() == dict and column_types == None):
limit = limit if limit != None else []
return _SFrame(_proxy=self.__proxy__.unpack_dict(column_name_prefix.encode(), limit, na_value))
else:
return _SFrame(_proxy=self.__proxy__.unpack(column_name_prefix.encode(), limit, column_types, na_value))
def sort(self, ascending=True):
"""
Sort all values in this SArray.
Sort only works for sarray of type str, int and float, otherwise TypeError
will be raised. Creates a new, sorted SArray.
Parameters
----------
ascending: boolean, optional
If true, the sarray values are sorted in ascending order, otherwise,
descending order.
Returns
-------
out: SArray
Examples
--------
>>> sa = SArray([3,2,1])
>>> sa.sort()
dtype: int
Rows: 3
[1, 2, 3]
"""
from .sframe import SFrame as _SFrame
if self.dtype() not in (int, float, str, datetime.datetime):
raise TypeError("Only sarray with type (int, float, str, datetime.datetime) can be sorted")
sf = _SFrame()
sf['a'] = self
return sf.sort('a', ascending)['a']
def __check_min_observations(self, min_observations):
if min_observations is None:
min_observations = (1 << 64) - 1
if min_observations < 0:
raise ValueError("min_observations must be a positive integer")
return min_observations
def rolling_mean(self, window_start, window_end, min_observations=None):
"""
Calculate a new SArray of the mean of different subsets over this
SArray.
Also known as a "moving average" or "running average". The subset that
the mean is calculated over is defined as an inclusive range relative
to the position to each value in the SArray, using `window_start` and
`window_end`. For a better understanding of this, see the examples
below.
Parameters
----------
window_start : int
The start of the subset to calculate the mean relative to the
current value.
window_end : int
The end of the subset to calculate the mean relative to the current
value. Must be greater than `window_start`.
min_observations : int
Minimum number of non-missing observations in window required to
calculate the mean (otherwise result is None). None signifies that
the entire window must not include a missing value. A negative
number throws an error.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,4,5])
>>> series = pandas.Series([1,2,3,4,5])
A rolling mean with a window including the previous 2 entries including
the current:
>>> sa.rolling_mean(-2,0)
dtype: float
Rows: 5
[None, None, 2.0, 3.0, 4.0]
Pandas equivalent:
>>> pandas.rolling_mean(series, 3)
0 NaN
1 NaN
2 2
3 3
4 4
dtype: float64
Same rolling mean operation, but 2 minimum observations:
>>> sa.rolling_mean(-2,0,min_observations=2)
dtype: float
Rows: 5
[None, 1.5, 2.0, 3.0, 4.0]
Pandas equivalent:
>>> pandas.rolling_mean(series, 3, min_periods=2)
0 NaN
1 1.5
2 2.0
3 3.0
4 4.0
dtype: float64
A rolling mean with a size of 3, centered around the current:
>>> sa.rolling_mean(-1,1)
dtype: float
Rows: 5
[None, 2.0, 3.0, 4.0, None]
Pandas equivalent:
>>> pandas.rolling_mean(series, 3, center=True)
0 NaN
1 2
2 3
3 4
4 NaN
dtype: float64
A rolling mean with a window including the current and the 2 entries
following:
>>> sa.rolling_mean(0,2)
dtype: float
Rows: 5
[2.0, 3.0, 4.0, None, None]
A rolling mean with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_mean(-2,-1)
dtype: float
Rows: 5
[None, None, 1.5, 2.5, 3.5]
"""
min_observations = self.__check_min_observations(min_observations)
agg_op = None
if self.dtype() is array.array:
agg_op = '__builtin__vector__avg__'
else:
agg_op = '__builtin__avg__'
return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, min_observations))
def rolling_sum(self, window_start, window_end, min_observations=None):
"""
Calculate a new SArray of the sum of different subsets over this
SArray.
Also known as a "moving sum" or "running sum". The subset that
the sum is calculated over is defined as an inclusive range relative
to the position to each value in the SArray, using `window_start` and
`window_end`. For a better understanding of this, see the examples
below.
Parameters
----------
window_start : int
The start of the subset to calculate the sum relative to the
current value.
window_end : int
The end of the subset to calculate the sum relative to the current
value. Must be greater than `window_start`.
min_observations : int
Minimum number of non-missing observations in window required to
calculate the sum (otherwise result is None). None signifies that
the entire window must not include a missing value. A negative
number throws an error.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,4,5])
>>> series = pandas.Series([1,2,3,4,5])
A rolling sum with a window including the previous 2 entries including
the current:
>>> sa.rolling_sum(-2,0)
dtype: int
Rows: 5
[None, None, 6, 9, 12]
Pandas equivalent:
>>> pandas.rolling_sum(series, 3)
0 NaN
1 NaN
2 6
3 9
4 12
dtype: float64
Same rolling sum operation, but 2 minimum observations:
>>> sa.rolling_sum(-2,0,min_observations=2)
dtype: int
Rows: 5
[None, 3, 6, 9, 12]
Pandas equivalent:
>>> pandas.rolling_sum(series, 3, min_periods=2)
0 NaN
1 3
2 6
3 9
4 12
dtype: float64
A rolling sum with a size of 3, centered around the current:
>>> sa.rolling_sum(-1,1)
dtype: int
Rows: 5
[None, 6, 9, 12, None]
Pandas equivalent:
>>> pandas.rolling_sum(series, 3, center=True)
0 NaN
1 6
2 9
3 12
4 NaN
dtype: float64
A rolling sum with a window including the current and the 2 entries
following:
>>> sa.rolling_sum(0,2)
dtype: int
Rows: 5
[6, 9, 12, None, None]
A rolling sum with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_sum(-2,-1)
dtype: int
Rows: 5
[None, None, 3, 5, 7]
"""
min_observations = self.__check_min_observations(min_observations)
agg_op = None
if self.dtype() is array.array:
agg_op = '__builtin__vector__sum__'
else:
agg_op = '__builtin__sum__'
return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, min_observations))
def rolling_max(self, window_start, window_end, min_observations=None):
"""
Calculate a new SArray of the maximum value of different subsets over
this SArray.
The subset that the maximum is calculated over is defined as an
inclusive range relative to the position to each value in the SArray,
using `window_start` and `window_end`. For a better understanding of
this, see the examples below.
Parameters
----------
window_start : int
The start of the subset to calculate the maximum relative to the
current value.
window_end : int
The end of the subset to calculate the maximum relative to the current
value. Must be greater than `window_start`.
min_observations : int
Minimum number of non-missing observations in window required to
calculate the maximum (otherwise result is None). None signifies that
the entire window must not include a missing value. A negative
number throws an error.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,4,5])
>>> series = pandas.Series([1,2,3,4,5])
A rolling max with a window including the previous 2 entries including
the current:
>>> sa.rolling_max(-2,0)
dtype: int
Rows: 5
[None, None, 3, 4, 5]
Pandas equivalent:
>>> pandas.rolling_max(series, 3)
0 NaN
1 NaN
2 3
3 4
4 5
dtype: float64
Same rolling max operation, but 2 minimum observations:
>>> sa.rolling_max(-2,0,min_observations=2)
dtype: int
Rows: 5
[None, 2, 3, 4, 5]
Pandas equivalent:
>>> pandas.rolling_max(series, 3, min_periods=2)
0 NaN
1 2
2 3
3 4
4 5
dtype: float64
A rolling max with a size of 3, centered around the current:
>>> sa.rolling_max(-1,1)
dtype: int
Rows: 5
[None, 3, 4, 5, None]
Pandas equivalent:
>>> pandas.rolling_max(series, 3, center=True)
0 NaN
1 3
2 4
3 5
4 NaN
dtype: float64
A rolling max with a window including the current and the 2 entries
following:
>>> sa.rolling_max(0,2)
dtype: int
Rows: 5
[3, 4, 5, None, None]
A rolling max with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_max(-2,-1)
dtype: int
Rows: 5
[None, None, 2, 3, 4]
"""
min_observations = self.__check_min_observations(min_observations)
agg_op = '__builtin__max__'
return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, min_observations))
def rolling_min(self, window_start, window_end, min_observations=None):
"""
Calculate a new SArray of the minimum value of different subsets over
this SArray.
The subset that the minimum is calculated over is defined as an
inclusive range relative to the position to each value in the SArray,
using `window_start` and `window_end`. For a better understanding of
this, see the examples below.
Parameters
----------
window_start : int
The start of the subset to calculate the minimum relative to the
current value.
window_end : int
The end of the subset to calculate the minimum relative to the current
value. Must be greater than `window_start`.
min_observations : int
Minimum number of non-missing observations in window required to
calculate the minimum (otherwise result is None). None signifies that
the entire window must not include a missing value. A negative
number throws an error.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,4,5])
>>> series = pandas.Series([1,2,3,4,5])
A rolling min with a window including the previous 2 entries including
the current:
>>> sa.rolling_min(-2,0)
dtype: int
Rows: 5
[None, None, 1, 2, 3]
Pandas equivalent:
>>> pandas.rolling_min(series, 3)
0 NaN
1 NaN
2 1
3 2
4 3
dtype: float64
Same rolling min operation, but 2 minimum observations:
>>> sa.rolling_min(-2,0,min_observations=2)
dtype: int
Rows: 5
[None, 1, 1, 2, 3]
Pandas equivalent:
>>> pandas.rolling_min(series, 3, min_periods=2)
0 NaN
1 1
2 1
3 2
4 3
dtype: float64
A rolling min with a size of 3, centered around the current:
>>> sa.rolling_min(-1,1)
dtype: int
Rows: 5
[None, 1, 2, 3, None]
Pandas equivalent:
>>> pandas.rolling_min(series, 3, center=True)
0 NaN
1 1
2 2
3 3
4 NaN
dtype: float64
A rolling min with a window including the current and the 2 entries
following:
>>> sa.rolling_min(0,2)
dtype: int
Rows: 5
[1, 2, 3, None, None]
A rolling min with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_min(-2,-1)
dtype: int
Rows: 5
[None, None, 1, 2, 3]
"""
min_observations = self.__check_min_observations(min_observations)
agg_op = '__builtin__min__'
return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, min_observations))
def rolling_var(self, window_start, window_end, min_observations=None):
"""
Calculate a new SArray of the variance of different subsets over this
SArray.
The subset that the variance is calculated over is defined as an inclusive
range relative to the position to each value in the SArray, using
`window_start` and `window_end`. For a better understanding of this,
see the examples below.
Parameters
----------
window_start : int
The start of the subset to calculate the variance relative to the
current value.
window_end : int
The end of the subset to calculate the variance relative to the current
value. Must be greater than `window_start`.
min_observations : int
Minimum number of non-missing observations in window required to
calculate the variance (otherwise result is None). None signifies that
the entire window must not include a missing value. A negative
number throws an error.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,4,5])
>>> series = pandas.Series([1,2,3,4,5])
A rolling variance with a window including the previous 2 entries
including the current:
>>> sa.rolling_var(-2,0)
dtype: float
Rows: 5
[None, None, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666]
Pandas equivalent:
>>> pandas.rolling_var(series, 3, ddof=0)
0 NaN
1 NaN
2 0.666667
3 0.666667
4 0.666667
dtype: float64
Same rolling variance operation, but 2 minimum observations:
>>> sa.rolling_var(-2,0,min_observations=2)
dtype: float
Rows: 5
[None, 0.25, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666]
Pandas equivalent:
>>> pandas.rolling_var(series, 3, ddof=0, min_periods=2)
0 NaN
1 0.250000
2 0.666667
3 0.666667
4 0.666667
dtype: float64
A rolling variance with a size of 3, centered around the current:
>>> sa.rolling_var(-1,1)
dtype: float
Rows: 5
[None, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, None]
Pandas equivalent:
>>> pandas.rolling_var(series, 3, center=True)
0 NaN
1 0.666667
2 0.666667
3 0.666667
4 NaN
dtype: float64
A rolling variance with a window including the current and the 2 entries
following:
>>> sa.rolling_var(0,2)
dtype: float
Rows: 5
[0.6666666666666666, 0.6666666666666666, 0.6666666666666666, None, None]
A rolling variance with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_var(-2,-1)
dtype: float
Rows: 5
[None, None, 0.25, 0.25, 0.25]
"""
min_observations = self.__check_min_observations(min_observations)
agg_op = '__builtin__var__'
return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, min_observations))
def rolling_stdv(self, window_start, window_end, min_observations=None):
"""
Calculate a new SArray of the standard deviation of different subsets
over this SArray.
The subset that the standard deviation is calculated over is defined as
an inclusive range relative to the position to each value in the
SArray, using `window_start` and `window_end`. For a better
understanding of this, see the examples below.
Parameters
----------
window_start : int
The start of the subset to calculate the standard deviation
relative to the current value.
window_end : int
The end of the subset to calculate the standard deviation relative
to the current value. Must be greater than `window_start`.
min_observations : int
Minimum number of non-missing observations in window required to
calculate the standard deviation (otherwise result is None). None
signifies that the entire window must not include a missing value.
A negative number throws an error.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,4,5])
>>> series = pandas.Series([1,2,3,4,5])
A rolling standard deviation with a window including the previous 2
entries including the current:
>>> sa.rolling_stdv(-2,0)
dtype: float
Rows: 5
[None, None, 0.816496580927726, 0.816496580927726, 0.816496580927726]
Pandas equivalent:
>>> pandas.rolling_std(series, 3, ddof=0)
0 NaN
1 NaN
2 0.816497
3 0.816497
4 0.816497
dtype: float64
Same rolling standard deviation operation, but 2 minimum observations:
>>> sa.rolling_stdv(-2,0,min_observations=2)
dtype: float
Rows: 5
[None, 0.5, 0.816496580927726, 0.816496580927726, 0.816496580927726]
Pandas equivalent:
>>> pandas.rolling_std(series, 3, ddof=0, min_periods=2)
0 NaN
1 0.500000
2 0.816497
3 0.816497
4 0.816497
dtype: float64
A rolling standard deviation with a size of 3, centered around the
current:
>>> sa.rolling_stdv(-1,1)
dtype: float
Rows: 5
[None, 0.816496580927726, 0.816496580927726, 0.816496580927726, None]
Pandas equivalent:
>>> pandas.rolling_std(series, 3, center=True, ddof=0)
0 NaN
1 0.816497
2 0.816497
3 0.816497
4 NaN
dtype: float64
A rolling standard deviation with a window including the current and
the 2 entries following:
>>> sa.rolling_stdv(0,2)
dtype: float
Rows: 5
[0.816496580927726, 0.816496580927726, 0.816496580927726, None, None]
A rolling standard deviation with a window including the previous 2
entries NOT including the current:
>>> sa.rolling_stdv(-2,-1)
dtype: float
Rows: 5
[None, None, 0.5, 0.5, 0.5]
"""
min_observations = self.__check_min_observations(min_observations)
agg_op = '__builtin__stdv__'
return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, min_observations))
def rolling_count(self, window_start, window_end):
"""
Count the number of non-NULL values of different subsets over this
SArray.
The subset that the count is excecuted on is defined as an inclusive
range relative to the position to each value in the SArray, using
`window_start` and `window_end`. For a better understanding of this,
see the examples below.
Parameters
----------
window_start : int
The start of the subset to count relative to the current value.
window_end : int
The end of the subset to count relative to the current value. Must
be greater than `window_start`.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,None,5])
>>> series = pandas.Series([1,2,3,None,5])
A rolling count with a window including the previous 2 entries including
the current:
>>> sa.rolling_count(-2,0)
dtype: int
Rows: 5
[1, 2, 3, 2, 2]
Pandas equivalent:
>>> pandas.rolling_count(series, 3)
0 1
1 2
2 3
3 2
4 2
dtype: float64
A rolling count with a size of 3, centered around the current:
>>> sa.rolling_count(-1,1)
dtype: int
Rows: 5
[2, 3, 2, 2, 1]
Pandas equivalent:
>>> pandas.rolling_count(series, 3, center=True)
0 2
1 3
2 2
3 2
4 1
dtype: float64
A rolling count with a window including the current and the 2 entries
following:
>>> sa.rolling_count(0,2)
dtype: int
Rows: 5
[3, 2, 2, 1, 1]
A rolling count with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_count(-2,-1)
dtype: int
Rows: 5
[0, 1, 2, 2, 1]
"""
agg_op = '__builtin__nonnull__count__'
return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, 0))
def cumulative_sum(self):
"""
Return the cumulative sum of the elements in the SArray.
Returns an SArray where each element in the output corresponds to the
sum of all the elements preceding and including it. The SArray is
expected to be of numeric type (int, float), or a numeric vector type.
Returns
-------
out : sarray[int, float, array.array]
Notes
-----
- Missing values are ignored while performing the cumulative
aggregate operation.
- For SArray's of type array.array, all entries are expected to
be of the same size.
Examples
--------
>>> sa = SArray([1, 2, 3, 4, 5])
>>> sa.cumulative_sum()
dtype: int
rows: 3
[1, 3, 6, 10, 15]
"""
from .. import extensions
agg_op = "__builtin__cum_sum__"
return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op))
def cumulative_mean(self):
"""
Return the cumulative mean of the elements in the SArray.
Returns an SArray where each element in the output corresponds to the
mean value of all the elements preceding and including it. The SArray
is expected to be of numeric type (int, float), or a numeric vector
type.
Returns
-------
out : Sarray[float, array.array]
Notes
-----
- Missing values are ignored while performing the cumulative
aggregate operation.
- For SArray's of type array.array, all entries are expected to
be of the same size.
Examples
--------
>>> sa = SArray([1, 2, 3, 4, 5])
>>> sa.cumulative_mean()
dtype: float
rows: 3
[1, 1.5, 2, 2.5, 3]
"""
from .. import extensions
agg_op = "__builtin__cum_avg__"
return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op))
def cumulative_min(self):
"""
Return the cumulative minimum value of the elements in the SArray.
Returns an SArray where each element in the output corresponds to the
minimum value of all the elements preceding and including it. The
SArray is expected to be of numeric type (int, float).
Returns
-------
out : SArray[int, float]
Notes
-----
- Missing values are ignored while performing the cumulative
aggregate operation.
Examples
--------
>>> sa = SArray([1, 2, 3, 4, 0])
>>> sa.cumulative_min()
dtype: int
rows: 3
[1, 1, 1, 1, 0]
"""
from .. import extensions
agg_op = "__builtin__cum_min__"
return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op))
def cumulative_max(self):
"""
Return the cumulative maximum value of the elements in the SArray.
Returns an SArray where each element in the output corresponds to the
maximum value of all the elements preceding and including it. The
SArray is expected to be of numeric type (int, float).
Returns
-------
out : SArray[int, float]
Notes
-----
- Missing values are ignored while performing the cumulative
aggregate operation.
Examples
--------
>>> sa = SArray([1, 0, 3, 4, 2])
>>> sa.cumulative_max()
dtype: int
rows: 3
[1, 1, 3, 4, 4]
"""
from .. import extensions
agg_op = "__builtin__cum_max__"
return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op))
def cumulative_std(self):
"""
Return the cumulative standard deviation of the elements in the SArray.
Returns an SArray where each element in the output corresponds to the
standard deviation of all the elements preceding and including it. The
SArray is expected to be of numeric type, or a numeric vector type.
Returns
-------
out : SArray[int, float]
Notes
-----
- Missing values are ignored while performing the cumulative
aggregate operation.
Examples
--------
>>> sa = SArray([1, 2, 3, 4, 0])
>>> sa.cumulative_std()
dtype: float
rows: 3
[0.0, 0.5, 0.816496580927726, 1.118033988749895, 1.4142135623730951]
"""
from .. import extensions
agg_op = "__builtin__cum_std__"
return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op))
def cumulative_var(self):
"""
Return the cumulative variance of the elements in the SArray.
Returns an SArray where each element in the output corresponds to the
variance of all the elements preceding and including it. The SArray is
expected to be of numeric type, or a numeric vector type.
Returns
-------
out : SArray[int, float]
Notes
-----
- Missing values are ignored while performing the cumulative
aggregate operation.
Examples
--------
>>> sa = SArray([1, 2, 3, 4, 0])
>>> sa.cumulative_var()
dtype: float
rows: 3
[0.0, 0.25, 0.6666666666666666, 1.25, 2.0]
"""
from .. import extensions
agg_op = "__builtin__cum_var__"
return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op))
| bsd-3-clause |
brandonrobertz/BitcoinTradingAlgorithmToolkit | src/dtools.py | 2 | 6051 | import numpy as np
import pandas as pd
import processlogs2 as pl2
import indicators as ind
import data
import csv
import cPickle
import time
#################
#
# MINMAX NORMALIZE
#
#################
def minmax( dataset, crt=False, std=False):
""" Remap each column to -1, 1 based on min/max. We can choose to
skip CRT and STD. By default we only normalize presumably the TIs.
TODO: crt and std inclusion not implemented
"""
columns = dataset.columns
for column in columns:
#if ("STD" not in column) and ("CRT" not in column):
A = dataset[column]
imin = -1
imax = 1
dmin = np.min(A)
dmax = np.max(A)
B = imin + (imax - imin)*(A - dmin)/(dmax - dmin)
dataset = dataset.drop( [column], axis=1)
dataset = dataset.join( pd.DataFrame( {column:B}, index=[B.index]), how="outer")
#elif crt or std:
# raise NotImplementedError
return dataset
#################
#
# GEN DS
#
#################
def gen_ds( dataset, forward, opts, type='CRT'):
# trim leading NaN vals ... takes time to warm up the indicators
dataset = cut_init_nans( dataset)
# targets as compound returns
if type == 'CRT':
# generate ohlc so we can calculate n-forward CRTs
ohlc = pd.DataFrame( {"open":dataset["LTC_open_%s"%opts["time_str"]],
"high":dataset["LTC_high_%s"%opts["time_str"]],
"low":dataset["LTC_low_%s"%opts["time_str"]],
"close": dataset["LTC_close_%s"%opts["time_str"]]},
index=dataset["LTC_open_%s"%opts["time_str"]].index)
CRT_1 = ind.CRT( ohlc, 1)
# move those forward CRTs back, so we can use them as target, correct predictions
tgt = CRT_1.shift(-1)
elif type == 'PRICE':
# bring next price back so we can use it as target
tgt = dataset["LTC_close_%s"%opts["time_str"]].shift(-1)
tgt.name = 'tgt'
# drop off OHLC data
dataset = dataset.drop( [ "LTC_open_%s"%opts["time_str"],
"LTC_high_%s"%opts["time_str"],
"LTC_low_%s"%opts["time_str"],
"LTC_close_%s"%opts["time_str"] ], axis=1)
# trim nans off end
tgt = tgt.ix[:-1*forward]; dataset = dataset.ix[:-1*forward]
# return
return dataset, tgt
###########################
# CRT SINGLE
###########################
def crt_single( initial, final, n = 1):
""" Calculate compound return of two prices. For use in
generating target values from and to match our input.
initial : initial investment
final : final price
n : (default 5) periods this happened over. Since its relative to
the compound returns in our LTC Coin class, which is calculated
from OHLC data every 1min, this needs to match that breakdown
(mins to mins, secs to secs, etc).
"""
crt = np.log( final / initial) / n
return crt
###########################
# TARGETS
###########################
def gen_target( dataset, N=1):
""" Generate N-period target predictions based on close column
in dataset. We assume this is already in pct change. The close
column shouldn't be normalized or anything.
"""
tgt = dataset.close.shift(-1*N)
return tgt
###########################
# TARGETS
###########################
def gen_target_crt( dataset, N=1):
""" Generate N-period target predictions based on close column
in dataset. The close column shouldn't be normalized or anything.
"""
if N < 0:
# what was the change over the last period? (btwn t-N and t)
tgt = crt_single( dataset.close.shift(-1*N), dataset.close )
else:
#
tgt = crt_single( dataset.close, dataset.close.shift(-1*N))
return tgt
############################
# CUT_INIT_NANS
###########################
def cut_init_nans( dataset):
""" Because of initialization of things like rolling means, etc,
the first vals of a lot of the columns produced by Data are
NaN. Well, this fucks backprop, so we need to trim it.
dataset : a pandas DataFrame
returns: the dataset minus the NaNs at the start
"""
# find first non-NaN val, cut there (it needs time to warm up)
iii = 0
for iii in xrange( len(dataset)):
nn = False
for iiii in dataset.ix[iii]:
if np.isnan(iiii):
nn = True
if nn == False:
dataset = dataset.ix[iii:]
break
return dataset
############################
# SAVE_CSV
###########################
def save_csv( dataset_t, tgt, name=""):
""" Save a dataset, both the inputs (dataset) and targets (tgt)
as two CSV files for input to a matlab neural network
dataset_t : the dataset used as input to neural net
tgt : the targets, or correct values, to train/evaluate the NN
name : name to label dataset ... e.g. "train", "test", etc
"""
if not name:
name = str(time.time())
with open('%s.dataset.csv'%name, 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONE)
writer.writerows(dataset_t.values)
with open('%s.tgt.csv'%name, 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONE)
if type(tgt.values[0]) == np.ndarray:
writer.writerows([ v for v in tgt.values])
else:
writer.writerows([ [v] for v in tgt.values])
###########################
# STORE_PICK
###########################
def store_pick( d):
""" Store a fully-loaded Data class as a pickle
d : a Data class
"""
name = str(time.time())+".d."+d.filename+".pickle"
filename = os.path.realpath( os.path.join( "pickles", name))
f = open( filename, "w")
cPickle.dump( d, f)
f.close()
###########################
# LOAD_PICK
###########################
def load_pick( filename):
""" Load a fully-loaded Data class as a pickle
filename : filename of pickle
"""
filename = os.path.realpath( os.path.join( "pickles", filename))
f = open( filename, "r")
d = cPickle.load( f)
f.close()
return d
| gpl-3.0 |
jakevdp/2014_fall_ASTR599 | notebooks/fig_code/helpers.py | 74 | 2301 | """
Small helpers for code that is not shown in the notebooks
"""
from sklearn import neighbors, datasets, linear_model
import pylab as pl
import numpy as np
from matplotlib.colors import ListedColormap
# Create color maps for 3-class classification problem, as with iris
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
def plot_iris_knn():
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
knn = neighbors.KNeighborsClassifier(n_neighbors=3)
knn.fit(X, y)
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
pl.xlabel('sepal length (cm)')
pl.ylabel('sepal width (cm)')
pl.axis('tight')
def plot_polynomial_regression():
rng = np.random.RandomState(0)
x = 2*rng.rand(100) - 1
f = lambda t: 1.2 * t**2 + .1 * t**3 - .4 * t **5 - .5 * t ** 9
y = f(x) + .4 * rng.normal(size=100)
x_test = np.linspace(-1, 1, 100)
pl.figure()
pl.scatter(x, y, s=4)
X = np.array([x**i for i in range(5)]).T
X_test = np.array([x_test**i for i in range(5)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='4th order')
X = np.array([x**i for i in range(10)]).T
X_test = np.array([x_test**i for i in range(10)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='9th order')
pl.legend(loc='best')
pl.axis('tight')
pl.title('Fitting a 4th and a 9th order polynomial')
pl.figure()
pl.scatter(x, y, s=4)
pl.plot(x_test, f(x_test), label="truth")
pl.axis('tight')
pl.title('Ground truth (9th order polynomial)')
| apache-2.0 |
shenzebang/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
LiZoRN/lizorn.github.io | talks/product/code/txt/PacificRimSpider.py | 3 | 42651 | # _*_ coding: utf-8 _*_
__author__ = 'lizorn'
__date__ = '2018/4/5 19:56'
from urllib import request
from urllib.error import URLError, HTTPError
from bs4 import BeautifulSoup as bs
import re
import jieba # 分词包
import pandas as pd
import numpy #numpy计算包
import matplotlib.pyplot as plt
import matplotlib
from wordcloud import WordCloud #词云包
# headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
# cookies = {'cookie':'bid=0Hwjvc-4OnE; ll="118173"; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1522457407%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DfrKwcZRSimGHLMvXj6iGkVFOXpPB1-x2KXgG3ytcgjHGTaXmDbel3nM5yObAEvcR%26wd%3D%26eqid%3D85cb540e00026f95000000045abedb3d%22%5D; _pk_ses.100001.4cf6=*; __utma=30149280.1673909815.1515314150.1521467190.1522457407.4; __utmc=30149280; __utmz=30149280.1522457407.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=223695111.1124617317.1522457407.1522457407.1522457407.1; __utmb=223695111.0.10.1522457407; __utmc=223695111; __utmz=223695111.1522457407.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __yadk_uid=13Q68v4czDmbhs7EwXEQH4ZeJDrf4Z0E; _vwo_uuid_v2=D223470BC8673F2EA458950B595558C7B|c34cbc6386491154b19d1664fe47b0d6; __utmt_t1=1; __utmt=1; ps=y; ue="[email protected]"; dbcl2="140103872:B8C7nqlvWXk"; ck=6RJM; _pk_id.100001.4cf6=98fcd272a4c63ce7.1522457407.1.1522460095.1522457407.; __utmb=30149280.36.8.1522460095181; push_noty_num=0; push_doumail_num=0; ap=1; RT=s=1522460157064&r=https%3A%2F%2Fmovie.douban.com%2Fsubject%2F20435622%2Fcomments%3Fstart%3D260%26limit%3D20%26sort%3Dnew_score%26status%3DP%26percent_type%3D'}
# 一,数据采集
comment_list = []
for i in range(0, int(30027 / 20) + 1, 20):
url = 'https://movie.douban.com/subject/20435622/comments?start=%s&limit=20&sort=new_score&status=P&percent_type=' % i
try:
resp = request.urlopen(url)
except HTTPError as e:
break
html = resp.read().decode('utf-8')
soup = bs(html, "html.parser")
comment_div_lits = soup.find_all('div', class_='comment')
for item in comment_div_lits:
if item.find_all('p')[0].string is not None:
comment_list.append(item.find_all('p')[0].string)
# 二、数据清洗
comments = ''
for k in range(len(comment_list)):
comments = comments + (str(comment_list[k])).strip()
pattern = re.compile(r'[\u4e00-\u9fa5]+')
filterdata = re.findall(pattern, comments)
cleaned_comments = ''.join(filterdata)
#
# cleaned_comments = '影片放到中段的的时候景甜突然发现自己并不是反派于是妆容一下子就变淡了衣着也变得朴素了她放下身段从底层做起由一开始的霸道总裁变成了最后的机械电焊工剧情其实比好就是机甲打怪兽还多了机甲打机甲开菊兽合体等内容还有剧情反转大景甜第一次在合拍片中秀出了存在感中国元素多到泛滥啊说中国特供也不为过难怪外媒会酸但有句话怎么说来着不服憋着我决定重看下第一部确定下自己当初为什么如此痴迷那部电影大段的吧啦吧啦以及看不清的乱打这部到底在做什么全片毫无记忆点而上部我现在还记得集装箱如板砖一样狂拍开菊兽最可恨的是这部完全不燃了已沦为平庸好莱坞大片我要看大机甲打怪兽我要听第一部的不是来看你拖拖拉拉乱七八糟拍一堆文戏不是来看你蛇皮走位蹦来蹦去打不过就头脑简单一头撞死不是来看你五年之后特效反向进步十年我看是开倒车多放会儿说不定还能涨一星不知道除了主题曲和块多钱的特效还剩下些什么太开心大甜甜尴尬本尬想演个霸道女王风结果活脱脱一个乡镇女企业家悉尼和东京做错了什么尤其是悉尼天天见的街道风景太熟悉导致有点观感不真实东京那一仗太爽了四个机器人都好赞而且又开发出了新技术最喜欢女主自己造的跟屎壳郎一样的小机甲圆滚滚的太可爱灵活从剧情到打斗场面都透露着一股浓浓的廉价山寨片质感后半部分干脆拍成了向超凡战队看齐的青少年科幻片怎么鲜艳怎么来用充斥着无用说教台词的废戏和冷笑话填满分钟在其衬托下去年的变形金刚甚至都成了佳作怪兽和机甲都丑出新高度只有每次出场都换一套衣服的景甜是美的嗯一部百分百的好莱坞制作故事上延续了前作的世界观但这部续作在合作模式形象塑造故事创作和宇宙观设定上都远远超过了前作真的我应该看的是部盗版环太平洋山寨货即视感到爆炸连都变难听了而且讲真我竟然他们讲的中文都听不懂得看英文字幕台词尬到不行甜甜求求你在国内发展吧咱们别出去丢人了陀螺回来拍第三部吧最后我要给变形金刚道歉我错了我不应该说你难看的看到景甜一出来心里一咯噔不过最后看完觉得比我意料中好看太多对比第一部比奥斯卡更能体现陀螺的水平更荒谬的应该是那个黑人男主吧他到底是什么来头为什么哪里都有他为什么星球大战和这部都要选那个黑人啊长得很帅吗身材很棒吗演技很好吗下面很厉害吗全程尴尬无聊的剧情泡沫化的叙事为了大场面而大场面这无疑又是一部爆米花式的机器人大战怪兽的电影在电影院好几次都想上厕所所以这是一部充满尿点的电影为啥还是景甜大小姐就不能换个看着这电影不就是看特效吗智商党们麻烦去看悬疑片不然对不起你们的智商打得超级爽的最后景甜拯救世界中国万岁整部片质感很男主真的太丑了好赶客景甜好尴尬衣服换了一套又一套不知道她在做还是拍电影果然有景甜的都是大烂片希望她不要再拍戏了好喜欢第一部但是知道是景甜拍第二部就很失望了但还是去看了好浪费我的感情本片又名变形金刚之如来神掌大战合体哥斯拉怪兽编剧继承了女一必须父母双亡靠捡破烂然后拯救世界的套路想问问编剧难道美帝人民都是天才吗捡个垃圾就可以徒手建高达你把念过书读过的人情何以堪反派必须最后秒变纸老虎还要放句狠话我一定还会回来的大甜甜毁再接再厉吧我的妈呀景甜到底什么背景把菊地凛子写死了就是为了扩张景甜宇宙吧果然资本家的钱就是不一样啊庞大而密集的中国元素虽不至于太过尴尬但整体已经毫无上一部的暗黑美学感没有精神内核的环太平洋和多年前的奥特曼系列竟然有了异曲同工之处况且奥特曼好歹也是大几十年前的东西了你说你丢不丢人嘛活在被景甜拯救的世界里我不如去死我们的美人真剑佑在本片中给人留下的印象就只有一个大粗脖子真的看不出环太平洋的续集没看过环太平洋的还以为是变形金刚系列呢大甜甜的国际梦想秀代表着中国打怪兽哟哟切克闹你问我在哪里我根本就不知道什么剧情都不重要我甜即将要征服大宇宙国人钱好赚负分别人的续集都是恨不得把前作的优点无限放大到恶心这部倒好优秀气质一点不剩新人物建立不起来还把仅存的旧角色浪费干净连配乐主旋律都只出现了一次而且位置很奇怪无疑是年度最假续集景甜和也基本没交集比想象中好太多了弥补了第一部故事编排上面的问题并且创作了更鲜明的人物和更复杂的故事虽然失去了第一部的气势但是依然娱乐性很高东京大战依然是值回票价看完了本片还讽刺景甜的人真的就是傻逼了这是一部有景甜都不忍心减分的电影燃炸了多一星情怀少了无论在水平还是情趣上都非常的陀螺续作不再是那种黑夜中湿哒哒充满机油迷醉味道的调子更亮的场景和更灵活的机甲带来的是更为塑料感的观影体验当然对萝卜爱好者来说依然是不能错过的一大年度爽片毕竟出现和钢大木的世纪大合影这种事情就够高潮很久了点映看完哭着出影院不是有机器人就能向隔壁变形金刚比烂的好吗比大做为第一部的脑残粉完全不能忍受强势插入中英文混杂拯救世界后大汗淋漓的某星抢去本该属于机甲和怪兽的亮相时刻就像大招如来神掌时死死抓住甩不掉的小强被陀螺冷落也不无道理除了前作的主题曲续作几乎找不到亮点整部片子的质感很你知道有一套从天而降的掌法吗我只想知道日本做错了什么总是让他们一而再再而三碰见怪兽以及大面积居住地被摧毁菊地凛子做错了什么让她那么早便当还有景甜到底关系为何那么硬怎么每部打怪兽的国际戏都有她参演怪兽心里我到底做错了什么刚组装好成为巨型神受结果神威都没显就又要去见列祖列宗其实这是国际版奥特曼吧真得是尬这么好的一副牌竟然就这样打烂了最后的决战还不如第一部的开场第一部硬伤也有所以就不废话多打这一部废话这个多啊剧情空洞特效三星景甜出戏大家慎看景甜算我求你为什么光天化日就会魅力全无呢是技术不达标还是清风水上漂恐怕还是整体基调视觉氛围甚至是机甲恋兽情怀均不达标的综合原因我至今都忘不了菊地凛子撑伞在青衣基地出场的画面至于影迷津津乐道的级片其实续集这种级口味爆米花才是吧菊地凛子就这么成为森麻子了贾秀琰帅气脑洞大开牛逼的都藏在正片里了良心中国元素真多景甜的戏份堪比女二张晋蓝盈盈惊喜基本不太违和演员太用力时不时出戏一邵氏工业之所以出二五仔主要是党建缺位没有落实党管原则二不想上京的怪物不是好怪物具备了一个好莱坞爆米花大片该有的良好品相刺激的打斗和好笑的桥段安排在白天大场面也尽量满足了普通影迷的期待我知道你喜欢暗黑系故事按照手册写有反骨有绝望有反击这时候槽点来了机甲们在被几乎灭门后即刻被修复而面对大怪兽的终极武器竟是如来神掌年月观影比变形金刚好看最后居然出来个组合金刚比电影版大力神的效果好看的时候一直祈祷下一个镜头出现的是怪兽而不是景甜因为景甜的人设实在是太讨厌了尤其不爽的是一般来说她这种角色应该被当成幕后黑手干掉结果竟然是队友这就是来自中国的资本让人讨厌但无法拒绝已经完全没有了机甲和怪兽的独特美学魅力变成两个物体之间的争斗毫无人类智慧的无脑爽片最后的大怪兽看起来分明就是荒原狼嘛整部都很赞只有最后一招设置的太蠢了扣掉一分剧情分特效分预告应该是全片打斗高潮了我就看你环大西洋怎么拍博耶加小东木大甜甜简直就是温格麾下的扎卡伊沃比威尔希尔称之为你行你上三巨头没争议吧一人一星不过分吧哎可喜欢第一部了当年怒打五星来着凭空想象的科技也是值得夸奖了景甜这次演绎的比任何一次好莱坞合作都自然值得夸奖电影打斗怪兽可圈可点胆子大的话剧情特效都值得去影院一看披着科技元素和伪装的特摄片本质上依旧是奥特曼打怪兽不过爽快就行了这种片子除了视觉效果和激烈打斗之外其他并不重要看完觉得票价不亏全程无尿点当然科技宅可能不满意景大小姐这次发挥真的没败人品再这片子国内票房铁定超过美帝本土一部无人物无集中戏剧冲突无丰富镜头只有尴尬笑点的爆米花的正确打开方式便是玩味其中的政治所指放了大半部国际斗争威胁论烟幕弹之后开始煞有介事地反思赛博格化最终背离第一部原旨反而显得前半部更像重点戏里戏外景甜都是标准好莱坞视角中国人可一出来还是燃得想哭怎么办再不济也不至于两星吧真是生活水平提高了惯出豆瓣上越来越多的事儿豆瓣评分被这些事儿毁的越来越没参考价值陀螺良心监制细节狗血却简单粗暴不要因为景甜飞扬跋扈乱耍酷流浪者飞天砸毁就瞎黑机甲怪兽都有升级黑哥豪爽小萝莉叛逆干练菊池悲壮败笔是怪兽脑子入侵疯子科学家操纵机甲开启虫洞但是打机甲怪兽小密集群怪兽与怪兽合体最过瘾牛日本决战花样多打得狠至少硬碰硬不敷衍比之前预想的要好保留了陀螺的原创构架构思机甲和怪兽融合的点子是胖子的遗产有访谈为证是对世界观的补完而非续貂但视觉风格变了少了首部艺术色彩质感更加动漫化但燃点还是必须有的正片星情怀别再吐槽中国元素了没有中国资金这片根本没得拍但跑步机那块还是笑了中规中矩全靠铁拳砸脸撑满全场三星半真的很喜欢大荧幕上看机甲打怪兽这部不能和陀螺的相提并论纯粹娱乐来讲还是不错的简化人物关系和剧情内涵仅突出了机甲战斗和其他强续命的独立日一样求生欲很强但也没丢掉便当习俗略欣慰彩蛋有独立日的影子呀我还挺喜欢波耶加且更爱甜甜甜甜比心心景甜环太分并没有爆烂机甲打机甲那两场戏都可圈可点主题上和这两天中美贸易大战撞车啊只是换成了科技制衡你中文太烂了回家练练再和我说话景甜依然最大笑点就地枪毙还有吊绳溜冰哈哈哈哈另外我好像在东京看到了京东这太难判断了从左向右读还是从右向左读都对无限怀念四年半前的那个暑假看完第一部时的激动还能忆起恨不能马上看到续集的畅想年月唯有靠一小段原不至幻灭到零不如重温原声更过瘾将近一半的冗长尴尬文戏铺垫所为何事宣扬团队精神与家人概念近乎无聊事实证明讲不好中文无法拯救世界颤抖吧歪异果星仁其实环太平洋就是成年人的奥特曼为什么环的评价各方面都不如环因为换导演了吉尔莫德尔托罗他为了水形物语推掉了环结果大家都知道了水形物语成了奥斯卡大赢家最佳影片最佳导演系列电影除非第一部口碑票房双扑街否则不能轻易换导演不知道说了些什么只记依偎在谁的肩膀分钟啥第一次出现主题曲的时间恐龙战队续集啥这才是这部电影的真名字景甜啊真正的女主角还行比变形金刚系列强一丢丢既然剧情不行就应该把第一部的主题曲不要钱一样循环播放呀导演请你听听群众的呼声怪兽请暂停攻击一下中美双方围绕机甲要不要征收的关税展开了激烈的讨论毕竟调查征税门类里有工业机器人项如果你抱着看一部爆米花大片以外的期待买票那真的就是你自己的问题了但是即便是带着正确的心态入场环还是会令你失望这部电影里没有你从未见过的特效场面也没有让你感到激动的故事尽量客观说环太平洋的剧情仅有的看点基本全在怪兽方包括先驱对人类大脑长达十年的入侵还有开菊兽合体勉强可以算作小高潮但是黑人主演是真的不行人设演技台词一样拿得出手的都没有作战策略只会一起上还不如景格格最后几分钟跑步机的戏拉好感真子便当完全不能忍第一部之所以有死忠不是因为怪兽不是因为机器人对撞和大场面是因为机甲崇拜在看之前我就说虽然这个班底就注定是烂片但只要给我三分钟驾驶机甲的临场感我就满足了结果一秒钟也没有说是变形金刚也没人反对全程面无表情看完像玩具被抢走后委屈想哭唯一的亮点是换装之后的景甜哈哈哈外星人都学不会中文话说回头无脑机甲爽片的要素都在可拍三部的内容一部齐活部分内容还有点儿感打戏之外还在东京卖了情怀没什么好抱怨的在整体的中二气氛烘托下连景甜也变得好好看呢不多说了我要去打猎啦三傻闹东京然后来了拆迁队接着三傻合体了虽然不是的概念但无人机暴走真是分分秒最后拆东京还暗示了第三东京地下城的存在一口一个太穿越了虽然也有很强烈的青少年科幻倾向但比要成年一点最后的拆东京就像是某些变形金刚哥斯拉元素的重组六分逗我呢機甲打戲不錯劇情不會太無聊然後我必須說自從看過章子怡在柯洛弗的表演之後我對景甜的接受度上升哎呀原來是變形金剛阿幹嘛叫自己環太平洋愛的人是不會喜歡的不是只要機器人怪獸打打架就好耶之所以經典是因為懂得並實現了日系機甲動畫與特攝的精華看這些長大的人真的會熱血沸騰而在換掉導演的續集蕩然無存了支持国产支持黑人支持景甜五星第一部的情怀全失因为第一部的我才给它一星景甜就不说了好莱坞那么多有演技的黑人男演员为什么星球大战和这部都要选那个一点观众缘都没有的蠢蛋不要以为白天的戏多真实感就有所上升屁跟奥特曼打小怪兽似的机甲和怪兽都没有阴影的什么鬼中国特供人傻钱多速来如果说环太平洋是虎虎虎的话那么环太平洋就是空天猎整部电影的感觉就像电梯里放的那首变调的一样那细腰的机甲头上长个角不就是那谁了吗那明艳的橘红色不就是那谁吗那插入栓如果做成细圆柱体不就是那什么了吗我看到东京地下逃生电梯的配色怪兽来袭的字时都要哭了我给五星一公司参与了一点投资二把大中国说的很三中国演员品牌都有足的戏份然后说电影本身看的首映杜比影厅送了海报开头大哥单口相声啊中间也很乱特别不连贯结尾更莫名其妙一下死了虽然多了机甲机甲但走变形金刚的套路太严重了本身三星水平中国到底投资了多少钱还邵氏请了张晋都不让人打一场和怪兽总是要去日本街市跑一场的真嗣身世的黑人这么壮硕那明日香在哪里最后那招是无敌如来神掌海外变种吗你记不记得有一套从天而降的掌法这故事根本让人燃不起来你拿第一部的逼我我也燃不起来啊卡司的演技让人捉急小个女主用力过猛小东木压根面无表情这俩人平均一下刚好令人意外的是景甜相比之下竟然还过得去话说得亏有大甜甜了要不然真心更没眼看我期待了两三年你给我看黑人小哥的青春期孩子的打架京东挂在上海的广告各国人的刻板印象怪兽爬山机甲坠落以及景田小姐的脸色陀螺导演当年的第一部让人眼前一亮这么多年被我们津津乐道我怀着最低期待值前来但大部分时间让人如坐针毡随处是尴尬的台词莫名其妙的黑化乱七八糟的打斗拯救世界的鸡汤随意乱晒中文台词的翻译腔也让人抓狂但这一切都在最后的如来神掌面前相形见绌相比之下当年陀螺简直是拍了部杰作阿这就是好导演与坏导演的差距假如菊地凛子的热血还能再爆发一下假如那栋怪兽骨架边的海滨豪宅不仅仅只是功能性过场假如最后那招大绝杀别搞得那么像功夫或许我还能多喜欢一点可惜拿了奥斯卡的陀螺目测已经彻底放弃该系列我虽然想看点不需要用脑的电影但是也不能这么侮辱我的智商呀没有第一部精彩但是还好没有玩脱剧情特效打斗戏还是能看的景甜宇宙第三部也是最好看的一部战斗燃爆特效一流场面宏大剧情热血说像奥特曼的朋友你确定你真的看过奥特曼吗电影五星景甜扣半星四星半推荐四舍五入算五星气质上太像变形金刚独立日安德游戏景甜这个角色真是一言难尽啊谁说没有违和感的前作受到的好评很依赖于那种沉重粗糙的打击感机械感机甲每挥一次拳都超带感麻子高喊母语刀斩怪兽简直爆燃啊这部续作基本是反前作而行之拜托没必要再拍一部变形金刚啊什么环太平洋纯粹就是变形金刚基本是按一个剧本大纲填充起来的标准流水线产物德尔托罗之前所构筑的庞大世界观未能有丝毫拓展甚至还萎缩了不少灌注的趣味感也消失殆尽没了陀螺来了景甜无论是故事设定还是特效动作场面几乎都在全面倒退就连结尾也似乎是照搬功夫还记得那招从天而降的掌法吗一点都不燃没有厚重的金属感没有了巨型机甲的压迫感前一部的诸多爽点没有得到延续唯一的一点兴奋感还是响起前作背景音乐的时候景甜拯救世界怪兽灭地球机甲打怪兽英雄驾机甲景甜救英雄所以就是景甜救地球景甜救人类景甜救世界颤抖吧怪兽颤抖吧人类身为昭和系特摄粉较之从头爽到尾的第一部这部看得几乎毫无感觉估计德胖看了也不会有多大感觉估计卖拷贝看了会很有感觉估计导演压根儿就没搞明白第一部的成功是因为什么感觉做再多烂片的心理预设也还是没料到能烂到底掉光作为导演处女作也不强求有人家奥斯卡导演在美学风格趣味上万分之一的追求所以根本没在这期待但好歹把目标观众群设在中学生啊这个繁杂冗长靠各式初级编剧技巧勉强达到糊弄小学生的金酸霉级空洞剧本是要作什么妖前一小时几乎废的电影里一共出现了三个次元地球怪兽的次元景甜的次元死于重力势能转化的动能加热能虽然没能达到预期有些遗憾但在屏上看巨大怪兽和机甲场面和效果还是很不错的几场打戏依旧震撼人心可惜熟悉的背景音乐响起配的画面着实糟糕新的配乐到爆故事有所增强但这种片要故事就跑偏了景甜依旧是电影界的演技还给她脸那么多特写景甜真的承包了我所有的笑点电影很真各种山寨廉价气息各个方面都远不如第一部这应该是超凡战队而不是环太平洋再说说景大小姐一个国内基本没什么票房号召力口碑也差的女星演技也永远停留在各种姿态的自恋中但是不但国内各大导演各大明星甘愿做绿叶而且无休止的赖上好莱坞这黑幕潜规则也太张扬了剧情较之前面的有所进步景甜在影片中也有存在感但是大场面的堆砌让人产生审美疲劳还行吧总体不如第一部特效不错打斗场面再多一些就好了看的不过瘾想给个分片子不算难看编剧确实有花心思比其他一些爆米花大片好点结尾的解决方案太粗暴当然远远比不上第一部了看特效就够值回票价了续作特别想要证明自己通过弱化标志性的主题曲杀死菊地凛子可以不爱但不要伤害景甜的译制腔台词等方式来努力的切断与前作的相同点但这正是我们特别不想看到的改变我们想要看的是各个不同国家特色的猎人机甲驾驶员在城市里与浴血厮杀而不是一条无聊又无趣的阴谋故事线星没有了上一部宏大而令人振奋的配乐看片的时候感觉好平淡还有景甜太容易让人出戏比起陀螺的第一部逊色了不少起码第一部还是有些神秘的黑暗风格这一部完全是色彩鲜艳的各种铠甲增加的机甲对打还算是有新意反派这智商是统治不了地球的我大景甜是要统治地球了最后竟然还致敬了星爷的功夫大量的中国投资和中国元素真的很多这完全是一部中国主导的中国制造没有第一部有诚意了整部电影可以用差劲说了看完好想当怪兽噢好莱坞科幻大片里有东方面孔而且还是很有感觉的这次比较新鲜尤其剧情比较紧凑特效逼真的看的过程中有被吓到了特效加一分景甜加一分剩余实际两分還可以吧覺得動作場面比上一集還多配樂也不錯依爽片看很值了还可以把超过预期就是文戏时间拉得那么长却没有把几个机甲训练员的性格描述清楚也没有讲他们哪个人训练哪个机甲我记得里面大家是不能随意驾驶任意一辆机甲的而且那么大一个军事基地又十年过去了应该有一批成熟的机甲员才对啊为什么非要让还有个月才完成训练的学员拯救世界呢无趣既看不到实体化的动漫风也看不到迷影恶趣味就是一无脑大片没变态金刚那么傻可也好不到哪儿去除了最后的一场东京大战之外没什么像样的打戏怎么看怎么都该是大反派的邵氏公司却像某甜的妆容一样永远伟光正简直就像梦里进了只苍蝇一样烦人景甜再次拯救了世界剧情还可以吧最让人感到尴尬的是说中文的时候真的很没有气势啊导演小时候肯定没少看奥特曼同时也没少受变形金刚的影响这点并非臆测因为全片都挺杂糅的加一点赛博朋克来一点废土美学有点怪兽文化再致敬下陀螺最值得一提的是融合生命的设计不过喜欢拍白天的巨幕导演总是值得夸赞的景甜真的蛮适合拍这种高贵冷艳的无表情角色大就这么撞一下就死了史诗级烂片并衬托出第一部的伟大相比于有些找失望没有糟糕的地方也没有精彩的地方或许美片就图这点热闹特效堆砌而成的好莱坞大片景甜比想象中有存在感比差远了还没开始就结束了可是高潮迭起令人窒息麻子上部那么艰难活下来居然就这么憋屈被发了盒饭我真想拍死这智障导演上部男主也不知去哪了打怪兽时间短的不行结局敷衍期待了几年就拍成这样失望看得热血沸腾差点鸡冻地哭出来的第一部怎么到了第二部会这样震惊东京街头惊现三头怪兽奥特曼为什么迟迟不肯出现究竟是人性的丧失还是道德的沦丧欢迎走进今天的环太平洋电影剧情超级简单但毫无燃点跟第一部不在一个水平看完就是内心异常平静无法忽略大甜甜的存在简直是女主角般的存在跪拜告辞好久没在电影院看这么难看的电影了瞧瞧看还是纸老虎在背后捣鬼冰天雪地跪求第一部人马回归还不错比想象中好老是黑大甜甜这次老实讲挺好的英语也有进步有希望成为口语一线水平不知道为什么看到这种电影里的中国人我怎么感觉自己跟到了动物园看到猴子一样心情激动看得很爽为此片的评分打抱不平多给一星变形金刚的机甲看得审美疲劳了环太平洋的巨型机甲看起来还是很震撼他的笨重不灵活相对于变形金刚是加分项神经元结合的设定其实可以深挖提升内涵可惜了机甲嘛大就对了越大越帅特别是久违的响起来后简直不要太帅裹脚布文戏最具魅力的菊地凛子登场不到十分钟就领了盒饭然后是景甜阿姨带着几位小鲜肉挑大梁超凡战队的即视感首部将打斗场景安排在太平洋的雨夜真是明智之举这部把战场移到大都市亮堂堂的白天是要向变形金刚靠拢了可特效都没人家做得有质感口碑扑街合理你还记得有招从天而降的掌法么不对这是天马流星拳吧哈哈哈不过精日份子真的是可恨至极除了不像前作真的像好多影视作品打起来像变形金刚后面像进了城像哥斯拉整体又仿佛和独立日颇有激情连下集预告都像他们对迷影有种误解好像把各种机甲揍怪兽拼起来就是环太平洋了少了陀螺是不行对了为什么怪兽没开啊一个不错的剧本被稀烂的节奏粗糙而毫无厚重感的特效以及磕了大麻一般的疲软配乐拖累的乏味无力游戏般的机甲设计和场景酷炫十足却极度缺乏前作的细节和冲击力总的来讲只能归结于导演对节奏和分镜的把控差距太大顺便虽然景小姐的演出没那么糟糕但一边说中文一边说英文真的很尴尬啊我去你记不记得有一招从天而降的掌法莫非是那失传已久的如来神掌还不错机甲很帅最开始出场的拳击手感觉还挺帅的没想到和部队注册的机甲一对比像个玩具不过最后的胜利也少不了拳击手的相助机甲打机甲机甲打怪兽挺过瘾的额外给电影里的中国元素一颗星这种大科幻里有中国演员还说着中国话感觉还是很不错的太乱来了糊里糊涂毫无章法尴尬的文戏弱鸡的打斗屎一般的剧情还行吧景甜没那么尴尬了星文戏弱智打戏不爽当台词说你们父母是谁不重要时镜头给到了赵雅芝儿子本片还有伊斯特伍德的儿子千叶真一的儿子以及甜甜景甜扮相百变但开口就变国产剧终极怪兽死的窝囊没啥必杀技就是血厚几个小幽默小反转算亮点第三部要想在中国大卖只有一招复活暴风赤红法国抢先美国全球首映吐槽怪我咯但是确实和大锅炖的变一样烂得不相上下我记得环还是年前和小虎爸爸一起看的超级燃景甜姐姐光环让我想撕屏满屏的尬演青少年的确适合打入小学生消费群看得够爽就行了别无他求啊真的有点舍不得无论如何也想再见到你请答应我一定要再出现好嘛是的就是你已经分手的华人之光大甜甜还有辐射和用完啦下一次抄什么呢景甜并不是环太平洋最烂的存在还我第一部主题曲菊地凛子给景甜做配角东京惊现京东植入开菊兽三位一体如来神掌从天一击毙命你确定我看的不是环大西洋各方面不如第一部啊好怀念看第一部的夏天不过当爆米花也不难看毕竟影院里面看机甲片的机会也不多了五年了这个系列就这么结束了同组机甲战士可以训练跳镜子舞同步率百分百景甜霸道女总裁下基层完成社会主义改造上一集犹记得菊地凛子雨中撑伞等直升机芦田爱菜废墟奔跑等救星续集刚看完就失忆作为环太平洋的续作雷霆再起其实深知前作的短板力图在剧情上构建更为充沛的张力但实际上脸谱化的人物和空洞乏味的台词使耗费大量时间所做的剧情铺垫几乎成为了无用之功而在前作中那股昔日的赛博朋克风在这部续作里亦荡然无存感觉和第一部比差太远了不是演员的问题是剧本的问题最后送死的那个机甲完全是为了送死而送死啊还有想让新队员登场没必要非得弄死老队员吧失望改成低幼向了吧不成功简直烂到昏昏欲睡这剧本写的这景甜演的怎么能用一个烂字就形容得全真正的狗尾续貂略拖前半小时没怎么看也能跟上节奏不过打戏还是非常燃的激动较上部还是陀螺执导好一点剧情有进步新加的中国元素也并没有想象的那么尴尬大甜甜不适合走高冷路线寄生虫好像饕餮打一二星的以后别看机甲片了没必要环太平洋雷霆再起还算是有些干货的至少挨过一个小时的无聊会迎来半小时的酣畅一战只是矛盾有点多上一部是机甲斗怪兽这一部却成了怪兽开机甲那么牛逼的怪物机甲一撞就死对东方异域极度迷恋却仍难逃演员短命毁个片甲不留的好莱坞式定律这样就能讨好中国观众了要不是景甜的换装秀和东京街头的京东广告这么无聊的东西怎么可能看得下去啊和变形金刚独立日一个套路大棚电影剧情单薄逻辑不通就他妈拍来骗中国人钱的变形金刚奥特曼真的很不好看了比起差远了然而也并不是很好看哎一定要说又什么可以的大概是第一部到这部还在用的吧不用迟疑没第一部好就是了我觉得陀螺自己对这电影都是拒绝的当然要是有记者问他他肯定不会说出来等了五年意外觉得还不错男主尬演机甲浮夸缺少质感有的情节没展开但是整体故事讲的流畅节奏也得当情节有反转和惊喜怪兽特效不错以及大甜甜总算没那么出戏值七分吧如果你是变形金刚的粉丝你可能会喜欢本片无论是特效场面人物塑造都很类似变形金刚系列产品只不过机器人更大只而已如果你是环太平洋第一部的粉丝你会失望的怪兽的戏份还不如景甜多仅有怪兽出场的最后几分钟才让我觉得算有些欣慰星半决战富士山下景甜拯救世界刚开场最喜欢的麻子就跪了同观影小朋友的妈妈甚是惋惜小朋友却说她不是日本人么日本人不都是坏人么小朋友妈妈竟无言以对满场的亚洲面孔证明老外爱看变形金刚爱看怪兽电影可就是不爱看机器人打怪兽所以还有续集的话也别假惺惺找黑哥做主角了直接扶正大甜甜多好一路给你从长城打到骷髅岛再打到环太平洋最后打去外太空完美换了导演到了这第二部只能说各方面十分凑过整体勉强及格另外不吹不黑景甜不仅是电影里的关键角色而且表现居然相当可以比金刚骷髅岛里可有可无的面瘫路人进步了十万个长城堪称本片最大惊喜还行吧作为无脑爆米花电影我觉得可以的萝莉拯救世界是必须的了大甜甜存在度提高不少中国基地中国将军中国军火商三星可以有加一星孩子说好喜欢军刀掏钱人物动机太牵强附会了逻辑漏洞大到几乎无法自圆其说最后的大战简直潦草不堪以及从星战开始就无法忍受的男主的颜值直接重映行吗咱别拍了一场砸了很多钱但就是不起来的趴人物一个也立不起来比景甜还要没有观众缘好厉害竟然能上我只想安静的看变形金刚打小怪兽结果三分之二时间都在整那些无脑又蠢到爆的剧情结尾也很无语就跟一那样安静的从头打到尾不好吗生气又名景甜的换装游戏哈哈哈今天晚上看的剧情还行感觉那个权将军好惨啊就这样领了盒饭特效好看话说大甜甜知道不知道她刚出场的口红色号不适合她这片差不多了被黑的有点惨不知道是不是因为有景甜没有太多的亮点但是机甲战斗的戏份还是挺多的相比黑豹古墓是实打实的爆米花电影了没有的重金属感但看的绝对爽没那么不堪大甜甜演的还不错建议观看后半段机器人打小怪兽还是很热血的一个怪兽不够再来一个两个不够三个总该够了吧但是情怀不是这么卖的何况环太还没有到可以卖情怀的地步不要计较剧情漏洞小东木和之间毫无火花小女主比不上芦田爱菜中英台词切换生硬等等等要时时刻刻保护大甜甜关键时刻还要靠大甜甜一记神助攻着实惊艳如来神掌的点子很妙怪兽形态和作战场面全面提升怪物生化机甲是亮点东京之战很燃亚德里亚霍纳是颜值担当几个大特写相当养眼了这次的机甲最爱复仇黑曜石喜欢这类型的电影热血故事情节什么的也挺好的可是评分不高是怎么回事再次感叹人家的特效真是棒编剧一定重温了吧量产机暴走很有当年号机的感觉嘛军刀雅典娜赛高不至于那么难看吧这不就是小时候看的奥特曼吗为啥成人还看这种低幼片为啥大家的理想总是拯救世界为啥我的男神张晋这么快就领盒饭了为啥直男喜欢看这种片陪男票看得我一脑子问号除了团战那几秒有点精彩外其他真的好一般最后我出戏到如来神掌了中国元素根本融合不进去一度感觉是在看国产片这批的机甲战士的人物形象没一个能立起来包括主角照理菊子的死应该能激发人物爆发但依然吊儿郎当到最后牛逼人设完全体现不出来小女主也无感基本上每个人都是打酱油的景田的角色更好笑全程令人出戏喜欢的朋友们答应我别看好么明明时长是标准的分钟就是给人一种内容不够的感觉多分钟的时候强化之前的还没登场整部结构显得头重脚轻前期一直在刻画女二号的成长看番位景甜女一然而女二的总体戏份又不多莫名其妙的编排本月最烂啦啦啦期待这部电影好久了好喜欢啊也好期待啊真的很棒希望你都去看下在第一部的版剧情前面加了一个剧场版的剧情看到那些无人机甲的时候瞬间想到量产机当然编剧还是花了心思做反转的新机甲新怪兽都很好看怪兽合体很棒打得也很爽但是机甲的动作都过于流畅失去了第一部真实的机械的笨重和凝滞感最后怀念一下天国的真人版怪兽摧毁城市的时候街道竟看不到一具死尸说明人类和怪兽还是可以和平共处的第一部我可是给了五星的啊看第二部这德性还不如去看奥特曼景甜张晋什么的真是太尬了中国资本能不能干点儿好事儿操纵机甲战士也是挺累的在里面跑啊踢的怪兽想用血液与富士山的稀有元素混合有点儿意思和第一辑的风格甚至是故事都已经相去甚远这次的便当发得相当不开心不过作为一部独立无脑的爆米花片还是有打有特效算是热闹景甜小姐没有阻碍观感但是看多她生活真人秀的样子就会发觉这个御姐形象是如此不合适最开心的是我的博士加戏了一大堆说教台词和尴尬玩笑勉强凑齐分钟就算了就放一下而且大战时间很短怪兽死得也很不可思议最开始就感觉陀螺不执导多半扑街果然缺乏第一部的那种燃传奇怪兽宇宙第五弹及景甜大战怪兽第三弹中国演员中国元素的大量植入让我们更加看到了好莱坞有多需要中国市场如派拉蒙老板所说没有中国市场好莱坞很可能就活不下去了还有就是景甜拯救了世界对就是她特效三颗星除开一点文戏外就是机甲打机甲和机甲打怪兽还是打的比较刺激的场面很大剧情比较弱智就为了打起来随便编的大甜甜浓妆的时候比较噶演暴发户企业家的节奏淡妆机械师的时候还挺好的男主就是星球大战的丑黑人女主的小机器人还挺好玩的绝对水平有三星但碍于前作太过耀眼相比之下本作是剧作美学配乐甚至动作场面的全方位溃败景甜仿佛是为了证明社会主义的优越性而存在的超人大小机体捉迷藏悉尼壮烈道别富士山团战万法朝宗如来佛掌虽然远没第一部的厚重质感赶鸭子囫囵吞枣的烂片节奏和灾难般的景甜但怪兽机甲化多功能组合往又进了一步以极低的期望去看感觉烂得还算彻底和开心导演你记得环大西洋吗中规中矩的剧情打怪兽还是很燃的比第一部好多了无视中国演员七分嘻嘻珍惜这个景甜吧换跑道了没几部好看了女文工团花穿越进各路好莱坞大片的的故事本身就很科幻以及雄霸的儿子比白娘子的儿子更俊美一些动作片打就完事了要是不看片名还以为你卖拷贝又拍了一部变形金刚虽然剧情稍微精彩一点点但依然全程是尿点不知道哪里该尬笑一下或者尬哭一下伊斯特伍德这儿子演技可以和大甜甜拼一拼好在大甜甜还是挺美的老美请别再拍中国特供片来骗钱了谢谢长不大的男孩这片适时描写了中美关系军事基地和邵氏的关系很像传奇和万达当初的关系一开始有层精神交战的意思美国对东方崛起的经济侵略的惧外心理值得玩味后来怪兽控制机甲如果能挑拨离间让双方开战坐收渔翁之利会比现在更有趣更现在还是流俗了不过导演本人还是深受华语武侠片和日本剑戟片的影响华裔阵容加一星景甜表现超乎预期加一星拳击手的短腿萌加一星最后一星因为青岛啤酒国外很多影评竟然说比第一部有提升真的是无语第一部陀螺个人风格影响下避免的很多大片通病在第二部基本全部都有漫威后大片的通病人物过多故事线过于繁琐没有必要的反转平庸的动作戏和糟糕的节奏等等感觉主创真的没有理解第一部的好可惜了在最后时刻她硬是趴在男女主角背上上了天零点场摄影和不如整部下来和剧情契合度不高前半部分比较无聊铺垫太多后半部分的战斗更像是机体武器展示中国元素多到爆炸总的来说这部电影向我们宣告环太平洋系列以后要继续圈钱啦因为它真的太承上启下了很难做为一部好的完整的电影来欣赏阿玛拉平时训练怎么都连接不好决战一下就进入状态了与特种部队独立日的续集一样先请上部老人领完便当然后让毛头小子们拯救世界很平庸几乎毫无亮点尤其丧失了机甲的厚重和质感搞成这样跟变形金刚有毛区别啊何况还没汽车人拍的好看连让人抖腿的都变奏的不燃了过分了啊大甜甜怒刷存在感还是有点尬张晋倒还是辣么帅整体蛮中国订制的这么对比来看陀螺拿奥斯卡果然是实至名归的如果要拍请让他导甜婊假双眼皮看得我尴尬的不行御台场为什么会有京东的广告为什么这么燃的只出现了一次这音乐真的差评看看景甜再看看凛子女王乡镇女企业家还我环太平洋原班人马景甜的植入还好还算自然这一部的打斗戏份没有上一部多没那么多钱可以烧了吧自动驾驶一出事这个又来高级黑一把怪不得陀螺不接这个本子和前作一比少了灵魂奥特曼打怪兽开头镜头晕的我闭着眼睛差点睡着后面剧情以为正义方会出现什么新的机器人结果啥都没出现最后我老公说来了个如来神掌撞死了简直不要太敷衍大甜甜表现不错终于在片中起了很大作用不然我要打星即使有大家都讨厌的景甜评分这么低也不科学啊跟翔一样的独立日和变形金刚相比这部真算得上爆米花中的良心作品了'
segment = jieba.lcut(cleaned_comments)
words_df = pd.DataFrame({'segment': segment})
# 去除常用高频词
stopwords = pd.read_csv("chineseStopWords.txt", index_col=False, quoting=3, sep="\t", names=['stopword'], encoding='utf-8')#quoting=3全不引用
words_df = words_df[~words_df.segment.isin(stopwords.stopword)]
# 词频统计
words_stat = words_df.groupby(by=['segment'])['segment'].agg({"计数":numpy.size})
words_stat = words_stat.reset_index().sort_values(by=["计数"], ascending=False)
print(words_stat.head())
# 词云
matplotlib.rcParams['figure.figsize'] = (10.0, 5.0)
wordcloud = WordCloud(font_path="simhei.ttf", background_color="white", max_font_size=80) # 指定字体类型、字体大小和字体颜色
word_frequence = {x[0]: x[1] for x in words_stat.head(1000).values}
# word_frequence_list = []
# for key in word_frequence:
# temp = (key, word_frequence[key])
# word_frequence_list.append(temp)
# print(word_frequence_list)
wordcloud = wordcloud.fit_words(word_frequence)
plt.imshow(wordcloud)
plt.show() | mit |
gviejo/ThalamusPhysio | python/main_pop_corr.py | 1 | 6453 |
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
from functions import *
import _pickle as cPickle
import time
import os, sys
import ipyparallel
import neuroseries as nts
import scipy.stats
from pylab import *
from multiprocessing import Pool
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
# datasets = [s for s in datasets if 'Mouse17' in s]
# sys.exit()
def compute_population_correlation(session):
# import numpy as np
# import scipy.io
# import scipy.stats
# import _pickle as cPickle
# import time
# import os, sys
# import neuroseries as nts
# import pandas as pd
# for session in datasets:
start_time = time.clock()
print(session)
store = pd.HDFStore("/mnt/DataGuillaume/population_activity/"+session.split("/")[1]+".h5")
rip_pop = store['rip']
rem_pop = store['rem']
wak_pop = store['wake']
store.close()
###############################################################################################################
# POPULATION CORRELATION FOR EACH RIPPLES
###############################################################################################################
#matrix of distance between ripples in second
interval_mat = np.vstack(nts.TsdFrame(rip_pop).as_units('s').index.values) - nts.TsdFrame(rip_pop).as_units('s').index.values
rip_corr = np.ones(interval_mat.shape)*np.nan
# doing the upper part of the diagonal
# rip_corr = np.eye(interval_mat.shape[0])
# bad
tmp = np.zeros_like(rip_corr)
tmp[np.triu_indices(interval_mat.shape[0], 1)] += 1
tmp[np.tril_indices(interval_mat.shape[0], 300)] += 1
index = np.where(tmp == 2)
for i, j in zip(index[0], index[1]):
rip_corr[i,j] = scipy.stats.pearsonr(rip_pop.iloc[i].values, rip_pop.iloc[j].values)[0]
rip_corr[j,i] = rip_corr[i,j]
# print(rip_corr[i,j])
allrip_corr = pd.DataFrame(index = interval_mat[index], data = rip_corr[index])
rip_corr = pd.DataFrame(index = rip_pop.index.values, data = rip_corr, columns = rip_pop.index.values)
np.fill_diagonal(rip_corr.values, 1.0)
rip_corr = rip_corr.fillna(0)
###############################################################################################################
# POPULATION CORRELATION FOR EACH THETA CYCLE OF REM
###############################################################################################################
# compute all time interval for each ep of theta
interval_mat = np.vstack(nts.TsdFrame(rem_pop).as_units('s').index.values) - nts.TsdFrame(rem_pop).as_units('s').index.values
rem_corr = np.ones(interval_mat.shape)*np.nan
# index = np.where(np.logical_and(interval_mat < 3.0, interval_mat >= 0.0))
# rem_corr = np.eye(interval_mat.shape[0])
# bad
tmp = np.zeros_like(rem_corr)
tmp[np.triu_indices(interval_mat.shape[0], 1)] += 1
tmp[np.tril_indices(interval_mat.shape[0], 300)] += 1
index = np.where(tmp == 2)
for i, j in zip(index[0], index[1]):
rem_corr[i,j] = scipy.stats.pearsonr(rem_pop.iloc[i].values, rem_pop.iloc[j].values)[0]
rem_corr[j,i] = rem_corr[i,j]
allrem_corr = pd.DataFrame(index = interval_mat[index], data = rem_corr[index])
rem_corr = pd.DataFrame(index = rem_pop.index.values, data = rem_corr, columns = rem_pop.index.values)
np.fill_diagonal(rem_corr.values, 1.0)
rem_corr = rem_corr.fillna(0)
###############################################################################################################
# POPULATION CORRELATION FOR EACH THETA CYCLE OF WAKE
###############################################################################################################
# compute all time interval for each ep of theta
interval_mat = np.vstack(nts.TsdFrame(wak_pop).as_units('s').index.values) - nts.TsdFrame(wak_pop).as_units('s').index.values
wak_corr = np.ones(interval_mat.shape)*np.nan
# index = np.where(np.logical_and(interval_mat < 3.0, interval_mat >= 0.0))
# wak_corr = np.eye(interval_mat.shape[0])
# bad
tmp = np.zeros_like(wak_corr)
tmp[np.triu_indices(interval_mat.shape[0], 1)] += 1
tmp[np.tril_indices(interval_mat.shape[0], 300)] += 1
index = np.where(tmp == 2)
for i, j in zip(index[0], index[1]):
wak_corr[i,j] = scipy.stats.pearsonr(wak_pop.iloc[i].values, wak_pop.iloc[j].values)[0]
wak_corr[j,i] = wak_corr[i,j]
allwak_corr = pd.DataFrame(index = interval_mat[index], data = wak_corr[index])
wak_corr = pd.DataFrame(index = wak_pop.index.values, data = wak_corr, columns = wak_pop.index.values)
np.fill_diagonal(wak_corr.values, 1.0)
wak_corr = wak_corr.fillna(0)
###############################################################################################################
# STORING
###############################################################################################################
store = pd.HDFStore("/mnt/DataGuillaume/corr_pop/"+session.split("/")[1]+".h5")
store.put('rip_corr', rip_corr)
store.put('allrip_corr', allrip_corr)
store.put('wak_corr', wak_corr)
store.put('allwak_corr', allwak_corr)
store.put('rem_corr', rem_corr)
store.put('allrem_corr', allrem_corr)
store.close()
print(time.clock() - start_time, "seconds")
return time.clock() - start_time
dview = Pool(8)
a = dview.map_async(compute_population_correlation, datasets)
# a = compute_population_correlation(datasets[0])
# ###############################################################################################################
# # PLOT
# ###############################################################################################################
# last = np.max([np.max(allrip_corr[:,0]),np.max(alltheta_corr[:,0])])
# bins = np.arange(0.0, last, 0.2)
# # average rip corr
# index_rip = np.digitize(allrip_corr[:,0], bins)
# mean_ripcorr = np.array([np.mean(allrip_corr[index_rip == i,1]) for i in np.unique(index_rip)[0:30]])
# # average theta corr
# index_theta = np.digitize(alltheta_corr[:,0], bins)
# mean_thetacorr = np.array([np.mean(alltheta_corr[index_theta == i,1]) for i in np.unique(index_theta)[0:30]])
# xt = list(bins[0:30][::-1]*-1.0)+list(bins[0:30])
# ytheta = list(mean_thetacorr[0:30][::-1])+list(mean_thetacorr[0:30])
# yrip = list(mean_ripcorr[0:30][::-1])+list(mean_ripcorr[0:30])
# plot(xt, ytheta, 'o-', label = 'theta')
# plot(xt, yrip, 'o-', label = 'ripple')
# legend()
# xlabel('s')
# ylabel('r')
# show()
| gpl-3.0 |
datapythonista/pandas | pandas/tests/indexes/common.py | 1 | 29504 | from datetime import datetime
import gc
from typing import Type
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas._libs.tslibs import Timestamp
from pandas.core.dtypes.common import is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
Float64Index,
Index,
Int64Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
Series,
TimedeltaIndex,
UInt64Index,
isna,
)
import pandas._testing as tm
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
class Base:
"""
Base class for index sub-class tests.
"""
_index_cls: Type[Index]
@pytest.fixture
def simple_index(self):
raise NotImplementedError("Method not implemented")
def create_index(self) -> Index:
raise NotImplementedError("Method not implemented")
def test_pickle_compat_construction(self):
# need an object to create with
msg = "|".join(
[
r"Index\(\.\.\.\) must be called with a collection of some "
r"kind, None was passed",
r"DatetimeIndex\(\) must be called with a collection of some "
r"kind, None was passed",
r"TimedeltaIndex\(\) must be called with a collection of some "
r"kind, None was passed",
r"__new__\(\) missing 1 required positional argument: 'data'",
r"__new__\(\) takes at least 2 arguments \(1 given\)",
]
)
with pytest.raises(TypeError, match=msg):
self._index_cls()
@pytest.mark.parametrize("name", [None, "new_name"])
def test_to_frame(self, name, simple_index):
# see GH-15230, GH-22580
idx = simple_index
if name:
idx_name = name
else:
idx_name = idx.name or 0
df = idx.to_frame(name=idx_name)
assert df.index is idx
assert len(df.columns) == 1
assert df.columns[0] == idx_name
assert df[idx_name].values is not idx.values
df = idx.to_frame(index=False, name=idx_name)
assert df.index is not idx
def test_shift(self, simple_index):
# GH8083 test the base class for shift
idx = simple_index
msg = (
f"This method is only implemented for DatetimeIndex, PeriodIndex and "
f"TimedeltaIndex; Got type {type(idx).__name__}"
)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1, 2)
def test_constructor_name_unhashable(self, simple_index):
# GH#29069 check that name is hashable
# See also same-named test in tests.series.test_constructors
idx = simple_index
with pytest.raises(TypeError, match="Index.name must be a hashable type"):
type(idx)(idx, name=[])
def test_create_index_existing_name(self, simple_index):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
expected = simple_index
if not isinstance(expected, MultiIndex):
expected.name = "foo"
result = Index(expected)
tm.assert_index_equal(result, expected)
result = Index(expected, name="bar")
expected.name = "bar"
tm.assert_index_equal(result, expected)
else:
expected.names = ["foo", "bar"]
result = Index(expected)
tm.assert_index_equal(
result,
Index(
Index(
[
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
],
dtype="object",
),
names=["foo", "bar"],
),
)
result = Index(expected, names=["A", "B"])
tm.assert_index_equal(
result,
Index(
Index(
[
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
],
dtype="object",
),
names=["A", "B"],
),
)
def test_numeric_compat(self, simple_index):
idx = simple_index
# Check that this doesn't cover MultiIndex case, if/when it does,
# we can remove multi.test_compat.test_numeric_compat
assert not isinstance(idx, MultiIndex)
if type(idx) is Index:
return
typ = type(idx._data).__name__
lmsg = "|".join(
[
rf"unsupported operand type\(s\) for \*: '{typ}' and 'int'",
"cannot perform (__mul__|__truediv__|__floordiv__) with "
f"this index type: {typ}",
]
)
with pytest.raises(TypeError, match=lmsg):
idx * 1
rmsg = "|".join(
[
rf"unsupported operand type\(s\) for \*: 'int' and '{typ}'",
"cannot perform (__rmul__|__rtruediv__|__rfloordiv__) with "
f"this index type: {typ}",
]
)
with pytest.raises(TypeError, match=rmsg):
1 * idx
div_err = lmsg.replace("*", "/")
with pytest.raises(TypeError, match=div_err):
idx / 1
div_err = rmsg.replace("*", "/")
with pytest.raises(TypeError, match=div_err):
1 / idx
floordiv_err = lmsg.replace("*", "//")
with pytest.raises(TypeError, match=floordiv_err):
idx // 1
floordiv_err = rmsg.replace("*", "//")
with pytest.raises(TypeError, match=floordiv_err):
1 // idx
def test_logical_compat(self, simple_index):
idx = simple_index
with pytest.raises(TypeError, match="cannot perform all"):
idx.all()
with pytest.raises(TypeError, match="cannot perform any"):
idx.any()
def test_repr_roundtrip(self, simple_index):
idx = simple_index
tm.assert_index_equal(eval(repr(idx)), idx)
def test_repr_max_seq_item_setting(self, simple_index):
# GH10182
idx = simple_index
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert "..." not in str(idx)
def test_copy_name(self, index):
# gh-12309: Check that the "name" argument
# passed at initialization is honored.
if isinstance(index, MultiIndex):
return
first = type(index)(index, copy=True, name="mario")
second = type(first)(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
# Not using tm.assert_index_equal() since names differ.
assert index.equals(first)
assert first.name == "mario"
assert second.name == "mario"
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
if not isinstance(index, CategoricalIndex):
# See gh-13365
s3 = s1 * s2
assert s3.index.name == "mario"
def test_copy_name2(self, index):
# gh-35592
if isinstance(index, MultiIndex):
return
assert index.copy(name="mario").name == "mario"
with pytest.raises(ValueError, match="Length of new names must be 1, got 2"):
index.copy(name=["mario", "luigi"])
msg = f"{type(index).__name__}.name must be a hashable type"
with pytest.raises(TypeError, match=msg):
index.copy(name=[["mario"]])
def test_ensure_copied_data(self, index):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
init_kwargs = {}
if isinstance(index, PeriodIndex):
# Needs "freq" specification:
init_kwargs["freq"] = index.freq
elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):
# RangeIndex cannot be initialized from data
# MultiIndex and CategoricalIndex are tested separately
return
index_type = type(index)
result = index_type(index.values, copy=True, **init_kwargs)
if is_datetime64tz_dtype(index.dtype):
result = result.tz_localize("UTC").tz_convert(index.tz)
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
index = index._with_freq(None)
tm.assert_index_equal(index, result)
if isinstance(index, PeriodIndex):
# .values an object array of Period, thus copied
result = index_type(ordinal=index.asi8, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.asi8, result.asi8, check_same="same")
elif isinstance(index, IntervalIndex):
# checked in test_interval.py
pass
else:
result = index_type(index.values, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.values, result.values, check_same="same")
def test_memory_usage(self, index):
index._engine.clear_mapping()
result = index.memory_usage()
if index.empty:
# we report 0 for no-length
assert result == 0
return
# non-zero length
index.get_loc(index[0])
result2 = index.memory_usage()
result3 = index.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(index, (RangeIndex, IntervalIndex)):
assert result2 > result
if index.inferred_type == "object":
assert result3 > result2
def test_argsort(self, request, index):
# separately tested
if isinstance(index, CategoricalIndex):
return
result = index.argsort()
expected = np.array(index).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self, index):
result = np.argsort(index)
expected = index.argsort()
tm.assert_numpy_array_equal(result, expected)
# these are the only two types that perform
# pandas compatibility input validation - the
# rest already perform separate (or no) such
# validation via their 'values' attribute as
# defined in pandas.core.indexes/base.py - they
# cannot be changed at the moment due to
# backwards compatibility concerns
if isinstance(type(index), (CategoricalIndex, RangeIndex)):
# TODO: why type(index)?
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, axis=1)
msg = "the 'kind' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, kind="mergesort")
msg = "the 'order' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, order=("a", "b"))
def test_repeat(self, simple_index):
rep = 2
idx = simple_index.copy()
expected = Index(idx.values.repeat(rep), name=idx.name)
tm.assert_index_equal(idx.repeat(rep), expected)
idx = simple_index
rep = np.arange(len(idx))
expected = Index(idx.values.repeat(rep), name=idx.name)
tm.assert_index_equal(idx.repeat(rep), expected)
def test_numpy_repeat(self, simple_index):
rep = 2
idx = simple_index
expected = idx.repeat(rep)
tm.assert_index_equal(np.repeat(idx, rep), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(idx, rep, axis=0)
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass, simple_index):
idx = simple_index
if isinstance(idx, (DatetimeIndex, TimedeltaIndex)):
# where does not preserve freq
idx = idx._with_freq(None)
cond = [True] * len(idx)
result = idx.where(klass(cond))
expected = idx
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = Index([idx._na_value] + idx[1:].tolist(), dtype=idx.dtype)
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_insert_base(self, index):
result = index[1:4]
if not len(index):
return
# test 0th element
assert index[0:4].equals(result.insert(0, index[0]))
def test_delete_base(self, index):
if not len(index):
return
if isinstance(index, RangeIndex):
# tested in class
return
expected = index[1:]
result = index.delete(0)
assert result.equals(expected)
assert result.name == expected.name
expected = index[:-1]
result = index.delete(-1)
assert result.equals(expected)
assert result.name == expected.name
length = len(index)
msg = f"index {length} is out of bounds for axis 0 with size {length}"
with pytest.raises(IndexError, match=msg):
index.delete(length)
def test_equals(self, index):
if isinstance(index, IntervalIndex):
# IntervalIndex tested separately, the index.equals(index.astype(object))
# fails for IntervalIndex
return
assert index.equals(index)
assert index.equals(index.copy())
assert index.equals(index.astype(object))
assert not index.equals(list(index))
assert not index.equals(np.array(index))
# Cannot pass in non-int64 dtype to RangeIndex
if not isinstance(index, RangeIndex):
same_values = Index(index, dtype=object)
assert index.equals(same_values)
assert same_values.equals(index)
if index.nlevels == 1:
# do not test MultiIndex
assert not index.equals(Series(index))
def test_equals_op(self, simple_index):
# GH9947, GH10637
index_a = simple_index
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
msg = "Lengths must match|could not be broadcast"
with pytest.raises(ValueError, match=msg):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with pytest.raises(ValueError, match=msg):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
series_b = Series(array_b)
series_c = Series(array_c)
series_d = Series(array_d)
with pytest.raises(ValueError, match=msg):
index_a == series_b
tm.assert_numpy_array_equal(index_a == series_a, expected1)
tm.assert_numpy_array_equal(index_a == series_c, expected2)
# cases where length is 1 for one of them
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_d
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
series_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
series_a == array_d
# comparing with a scalar should broadcast; note that we are excluding
# MultiIndex because in this case each item in the index is a tuple of
# length 2, and therefore is considered an array of length 2 in the
# comparison instead of a scalar
if not isinstance(index_a, MultiIndex):
expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
tm.assert_numpy_array_equal(index_a == item, expected3)
# For RangeIndex we can convert to Int64Index
tm.assert_series_equal(series_a == item, Series(expected3))
def test_format(self, simple_index):
# GH35439
idx = simple_index
expected = [str(x) for x in idx]
assert idx.format() == expected
def test_format_empty(self):
# GH35712
empty_idx = self._index_cls([])
assert empty_idx.format() == []
assert empty_idx.format(name=True) == [""]
def test_hasnans_isnans(self, index_flat):
# GH 11343, added tests for hasnans / isnans
index = index_flat
# cases in indices doesn't include NaN
idx = index.copy(deep=True)
expected = np.array([False] * len(idx), dtype=bool)
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is False
idx = index.copy(deep=True)
values = np.asarray(idx.values)
if len(index) == 0:
return
elif isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index, RangeIndex)):
return
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = type(index)(values, freq=index.freq)
else:
idx = type(index)(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_fillna(self, index):
# GH 11343
if len(index) == 0:
pass
elif isinstance(index, MultiIndex):
idx = index.copy(deep=True)
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.fillna(idx[0])
else:
idx = index.copy(deep=True)
result = idx.fillna(idx[0])
tm.assert_index_equal(result, idx)
assert result is not idx
msg = "'value' must be a scalar, passed: "
with pytest.raises(TypeError, match=msg):
idx.fillna([idx[0]])
idx = index.copy(deep=True)
values = np.asarray(idx.values)
if isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index, RangeIndex)):
return
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = type(index)(values, freq=index.freq)
else:
idx = type(index)(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_nulls(self, index):
# this is really a smoke test for the methods
# as these are adequately tested for function elsewhere
if len(index) == 0:
tm.assert_numpy_array_equal(index.isna(), np.array([], dtype=bool))
elif isinstance(index, MultiIndex):
idx = index.copy()
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.isna()
elif not index.hasnans:
tm.assert_numpy_array_equal(index.isna(), np.zeros(len(index), dtype=bool))
tm.assert_numpy_array_equal(index.notna(), np.ones(len(index), dtype=bool))
else:
result = isna(index)
tm.assert_numpy_array_equal(index.isna(), result)
tm.assert_numpy_array_equal(index.notna(), ~result)
def test_empty(self, simple_index):
# GH 15270
idx = simple_index
assert not idx.empty
assert idx[:0].empty
def test_join_self_unique(self, join_type, simple_index):
idx = simple_index
if idx.is_unique:
joined = idx.join(idx, how=join_type)
assert (idx == joined).all()
def test_map(self, simple_index):
# callable
idx = simple_index
# we don't infer UInt64
if isinstance(idx, UInt64Index):
expected = idx.astype("int64")
else:
expected = idx
result = idx.map(lambda x: x)
# For RangeIndex we convert to Int64Index
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: Series(values, index),
],
)
def test_map_dictlike(self, mapper, simple_index):
idx = simple_index
if isinstance(idx, CategoricalIndex):
pytest.skip(f"skipping tests for {type(idx)}")
identity = mapper(idx.values, idx)
# we don't infer to UInt64 for a dict
if isinstance(idx, UInt64Index) and isinstance(identity, dict):
expected = idx.astype("int64")
else:
expected = idx
result = idx.map(identity)
# For RangeIndex we convert to Int64Index
tm.assert_index_equal(result, expected)
# empty mappable
expected = Index([np.nan] * len(idx))
result = idx.map(mapper(expected, idx))
tm.assert_index_equal(result, expected)
def test_map_str(self, simple_index):
# GH 31202
idx = simple_index
result = idx.map(str)
expected = Index([str(x) for x in idx], dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("copy", [True, False])
@pytest.mark.parametrize("name", [None, "foo"])
@pytest.mark.parametrize("ordered", [True, False])
def test_astype_category(self, copy, name, ordered, simple_index):
# GH 18630
idx = simple_index
if name:
idx = idx.rename(name)
# standard categories
dtype = CategoricalDtype(ordered=ordered)
result = idx.astype(dtype, copy=copy)
expected = CategoricalIndex(idx, name=name, ordered=ordered)
tm.assert_index_equal(result, expected, exact=True)
# non-standard categories
dtype = CategoricalDtype(idx.unique().tolist()[:-1], ordered)
result = idx.astype(dtype, copy=copy)
expected = CategoricalIndex(idx, name=name, dtype=dtype)
tm.assert_index_equal(result, expected, exact=True)
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
result = idx.astype("category", copy=copy)
expected = CategoricalIndex(idx, name=name)
tm.assert_index_equal(result, expected, exact=True)
def test_is_unique(self, simple_index):
# initialize a unique index
index = simple_index.drop_duplicates()
assert index.is_unique is True
# empty index should be unique
index_empty = index[:0]
assert index_empty.is_unique is True
# test basic dupes
index_dup = index.insert(0, index[0])
assert index_dup.is_unique is False
# single NA should be unique
index_na = index.insert(0, np.nan)
assert index_na.is_unique is True
# multiple NA should not be unique
index_na_dup = index_na.insert(0, np.nan)
assert index_na_dup.is_unique is False
@pytest.mark.arm_slow
def test_engine_reference_cycle(self, simple_index):
# GH27585
index = simple_index
nrefs_pre = len(gc.get_referrers(index))
index._engine
assert len(gc.get_referrers(index)) == nrefs_pre
def test_getitem_2d_deprecated(self, simple_index):
# GH#30588
idx = simple_index
msg = "Support for multi-dimensional indexing"
check = not isinstance(idx, (RangeIndex, CategoricalIndex))
with tm.assert_produces_warning(
FutureWarning, match=msg, check_stacklevel=check
):
res = idx[:, None]
assert isinstance(res, np.ndarray), type(res)
def test_copy_shares_cache(self, simple_index):
# GH32898, GH36840
idx = simple_index
idx.get_loc(idx[0]) # populates the _cache.
copy = idx.copy()
assert copy._cache is idx._cache
def test_shallow_copy_shares_cache(self, simple_index):
# GH32669, GH36840
idx = simple_index
idx.get_loc(idx[0]) # populates the _cache.
shallow_copy = idx._view()
assert shallow_copy._cache is idx._cache
shallow_copy = idx._shallow_copy(idx._data)
assert shallow_copy._cache is not idx._cache
assert shallow_copy._cache == {}
def test_index_groupby(self, simple_index):
idx = simple_index[:5]
to_groupby = np.array([1, 2, np.nan, 2, 1])
tm.assert_dict_equal(
idx.groupby(to_groupby), {1.0: idx[[0, 4]], 2.0: idx[[1, 3]]}
)
to_groupby = DatetimeIndex(
[
datetime(2011, 11, 1),
datetime(2011, 12, 1),
pd.NaT,
datetime(2011, 12, 1),
datetime(2011, 11, 1),
],
tz="UTC",
).values
ex_keys = [Timestamp("2011-11-01"), Timestamp("2011-12-01")]
expected = {ex_keys[0]: idx[[0, 4]], ex_keys[1]: idx[[1, 3]]}
tm.assert_dict_equal(idx.groupby(to_groupby), expected)
class NumericBase(Base):
"""
Base class for numeric index (incl. RangeIndex) sub-class tests.
"""
def test_constructor_unwraps_index(self, dtype):
idx = Index([1, 2], dtype=dtype)
result = self._index_cls(idx)
expected = np.array([1, 2], dtype=dtype)
tm.assert_numpy_array_equal(result._data, expected)
def test_where(self):
# Tested in numeric.test_indexing
pass
def test_can_hold_identifiers(self, simple_index):
idx = simple_index
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is False
def test_format(self, simple_index):
# GH35439
idx = simple_index
max_width = max(len(str(x)) for x in idx)
expected = [str(x).ljust(max_width) for x in idx]
assert idx.format() == expected
def test_numeric_compat(self):
pass # override Base method
def test_insert_na(self, nulls_fixture, simple_index):
# GH 18295 (test missing)
index = simple_index
na_val = nulls_fixture
if na_val is pd.NaT:
expected = Index([index[0], pd.NaT] + list(index[1:]), dtype=object)
else:
expected = Float64Index([index[0], np.nan] + list(index[1:]))
result = index.insert(1, na_val)
tm.assert_index_equal(result, expected)
def test_arithmetic_explicit_conversions(self):
# GH 8608
# add/sub are overridden explicitly for Float/Int Index
index_cls = self._index_cls
if index_cls is RangeIndex:
idx = RangeIndex(5)
else:
idx = index_cls(np.arange(5, dtype="int64"))
# float conversions
arr = np.arange(5, dtype="int64") * 3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx, expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx, expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5, dtype="float64")
result = fidx - a
tm.assert_index_equal(result, expected)
expected = Float64Index(-arr)
a = np.zeros(5, dtype="float64")
result = a - fidx
tm.assert_index_equal(result, expected)
def test_invalid_dtype(self, invalid_dtype):
# GH 29539
dtype = invalid_dtype
msg = fr"Incorrect `dtype` passed: expected \w+(?: \w+)?, received {dtype}"
with pytest.raises(ValueError, match=msg):
self._index_cls([1, 2, 3], dtype=dtype)
| bsd-3-clause |
MohammedWasim/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
ClimbsRocks/scikit-learn | sklearn/datasets/samples_generator.py | 26 | 56554 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
LennonLab/Emergence | figure_code/MacroecologyPatterns/DiversityAbundanceScaling.py | 8 | 3887 | from __future__ import division
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import sys
import scipy as sc
from scipy import stats
import statsmodels.stats.api as sms
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.stats.outliers_influence import summary_table
mydir = os.path.expanduser('~/GitHub/Emergence')
sys.path.append(mydir+'/tools')
mydir2 = os.path.expanduser("~/")
def xfrm(X, _max): return -np.log10(_max - np.array(X))
def figplot(x, y, xlab, ylab, fig, n, binned = 1):
'''main figure plotting function'''
fig.add_subplot(2, 2, n)
y2 = list(y)
x2 = list(x)
if binned == 1:
X, Y = (np.array(t) for t in zip(*sorted(zip(x2, y2))))
Xi = xfrm(X, max(X)*1.05)
bins = np.linspace(np.min(Xi), np.max(Xi)+1, 100)
ii = np.digitize(Xi, bins)
y2 = np.array([np.mean(Y[ii==i]) for i in range(1, len(bins)) if len(Y[ii==i]) > 0])
x2 = np.array([np.mean(X[ii==i]) for i in range(1, len(bins)) if len(X[ii==i]) > 0])
d = pd.DataFrame({'x': list(x2)})
d['y'] = list(y2)
f = smf.ols('y ~ x', d).fit()
m, b, r, p, std_err = stats.linregress(x2, y2)
st, data, ss2 = summary_table(f, alpha=0.05)
fitted = data[:,2]
mean_ci_low, mean_ci_upp = data[:,4:6].T
ci_low, ci_upp = data[:,6:8].T
x2, y2, fitted, ci_low, ci_upp = zip(*sorted(zip(x2, y2, fitted, ci_low, ci_upp)))
if n == 1:
lbl = r'$rarity$'+ ' = '+str(round(10**b,2))+'*'+r'$N$'+'$^{'+str(round(m,2))+'}$'+'\n'+r'$r^2$' + '=' +str(round(r**2,2))
elif n == 2:
lbl = r'$Nmax$'+ ' = '+str(round(10**b,2))+'*'+r'$N$'+'$^{'+str(round(m,2))+'}$'+'\n'+r'$r^2$' + '=' +str(round(r**2,2))
elif n == 3:
lbl = r'$Ev$'+ ' = '+str(round(10**b,2))+'*'+r'$N$'+'$^{'+str(round(m,2))+'}$'+'\n'+ r'$r^2$' + '=' + str(round(r**2,2))
elif n == 4:
lbl = r'$S$'+ ' = '+str(round(10**b,2))+'*'+r'$N$'+'$^{'+str(round(m,2))+'}$'+'\n'+r'$r^2$' + '=' + str(round(r**2,2))
plt.scatter(x2, y2, color = 'SkyBlue', alpha= 1 , s = 12, linewidths=0.5, edgecolor='Steelblue', label=lbl)
if n == 3:
plt.legend(loc='best', fontsize=6, frameon=False)
else:
plt.legend(loc=2, fontsize=6, frameon=False)
plt.fill_between(x2, ci_upp, ci_low, color='b', lw=0.1, alpha=0.15)
plt.plot(x2, fitted, color='b', ls='--', lw=1.0, alpha=0.9)
plt.xlabel(xlab, fontsize=11)
plt.ylabel(ylab, fontsize=11)
plt.tick_params(axis='both', labelsize=6)
plt.xlim(0.9*min(x2), 1.1*max(x2))
plt.ylim(min(ci_low), max(ci_upp))
return fig
df = pd.read_csv(mydir + '/results/simulated_data/SimData.csv')
df2 = pd.DataFrame({'length' : df['length']})
df2['N'] = np.log10(df['total.abundance'].groupby(df['sim']).max())
df2['D'] = np.log10(df['N.max'].groupby(df['sim']).max())
df2['S'] = np.log10(df['species.richness'].groupby(df['sim']).max())
df2['E'] = np.log10(df['simpson.e'].groupby(df['sim']).min())
df2['R'] = np.log10(df['logmod.skew'].groupby(df['sim']).max())
df2 = df2.replace([np.inf, -np.inf], np.nan).dropna()
fig = plt.figure(figsize=(5, 4))
xlab = '$log$'+r'$_{10}$'+'($N$)'
ylab = 'Rarity, '+r'$log_{10}$'
fig = figplot(df2['N'], df2['R'], xlab, ylab, fig, 1)
xlab = '$log$'+r'$_{10}$'+'($N$)'
ylab = 'Dominance, '+r'$log_{10}$'
fig = figplot(df2['N'], df2['D'], xlab, ylab, fig, 2)
xlab = '$log$'+r'$_{10}$'+'($N$)'
ylab = 'Evenness, ' +r'$log_{10}$'
fig = figplot(df2['N'], df2['E'], xlab, ylab, fig, 3)
xlab = '$log$'+r'$_{10}$'+'($N$)'
ylab = 'Richness, ' +r'$log_{10}$'
fig = figplot(df2['N'], df2['S'], xlab, ylab, fig, 4)
#### Final Format and Save #####################################################
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.savefig(mydir + '/results/figures/DiversityAbundanceScaling.png', dpi=600, bbox_inches = "tight")
plt.close()
| mit |
gerritgr/LumPy | ame.py | 1 | 10946 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
This script generates code which implements AME lumping for a given model.
The generated script (placed in the ./output directory by default) runs independently of the toolset.
The autorun flag allows it to call the generated code directly after creation.
If the number of clusters (i.e. bin_num) is set to auto, the code is generated and executed until the stopping criterion (in evaluation.py) is fulfilled.
Additional output is written into LumpingLog.log (see utilities.py for logging options).
Caution:
The code uses eval/exec, please use with sanitized input only.
Existing files are overwritten without warning.
Example usage and arguments:
python ame.py model/SIR.model # to generate a script of SIR.model
See the README.md for more optinos.
For more information we refer to:
Kyriakopoulos et al. "Lumping of Degree Based Mean Field and Pair Approximation Equations for Multi State Contact Processes"
Website:
https://mosi.uni-saarland.de/?page_id=lumpy
Tested with Python 3.5.2.
"""
__author__ = "Gerrit Grossmann"
__copyright__ = "Copyright 2016, Gerrit Grossmann, Group of Modeling and Simulation at Saarland University"
__license__ = "GNU GPLv3"
__version__ = "0.1"
__email__ = "[email protected]"
#------------------------------------------------------
# Code Starts Here
#------------------------------------------------------
import model_parser
import numpy as np
from utilities import *
import sys
import sympy
import time
import pandas as pd
import os
from LumpEngine import lump
from ClusterEngine import clustering, plot_clustering
from ExprGenerator import gen_ame, gen_beta
sys.dont_write_bytecode = True
import scipy
import matplotlib
matplotlib.use('agg') #run without an X-server
from stopping_heuristic import *
#------------------------------------------------------
# Generate ODE expressions
#------------------------------------------------------
def generate_line(line_def_tuple):
# helper function for generate_odes, generates a single equation
s, m, model = line_def_tuple
k = np.sum(m)
line = gen_ame(s,k,m,model['independent_rules'],model['contact_rules'],model['states'])
return [line, m, k, s, model['degree_distribution'][k]]
def generate_odes(model):
# generates equations and beta-expressions
import itertools
from multiprocessing import Pool, cpu_count
states = model['states']
pool = Pool(cpu_count())
state_combinatinos = list(itertools.product(states,states,states))
beta_exprs = pool.map(gen_beta, state_combinatinos)
pool.close()
pool.join()
pool = Pool(cpu_count())
degrees = range(model['k_max']+1)
m_vecs = list()
for k in degrees:
m_vecs += sorted(list(m_k_of(k, len(states))))
formula_combinations = list(itertools.product(states,m_vecs, [model]))
odes = pool.map(generate_line, formula_combinations)
pool.close()
pool.join()
return odes, beta_exprs
def generate_odes_old(model):
beta_exprs = list()
odes = list()
states = model['states']
for s in states:
for s1 in states:
for s2 in states:
line = gen_beta(s, s1, s2)
beta_exprs += [line]
for s in states:
for k in range(model['k_max']+1):
for m in sorted(list(m_k_of(k, len(states)))):
line = gen_ame(s,k,m,model['independent_rules'],model['contact_rules'],states)
odes.append([line, m, k, s, model['degree_distribution'][k]])
return odes, beta_exprs
#------------------------------------------------------
# Convert to pandas dataframe
#------------------------------------------------------
def to_dataframe(odes):
# genereate dataframe from line, probably unnecessary in future
# should probably be done directly in generate_odes
labels = 'ode,neighborhood,degree,state,weight'.split(',')
ode_frame = pd.DataFrame.from_records(odes, columns=labels)
return ode_frame
#------------------------------------------------------
# Normalize initial values
#------------------------------------------------------
# All values corresponding to one particular degree should add up to one.
# This block normalizes the fractions/probabilities accordingly.
# Either a multinomial distribution for a uniform distribution is used
# for different neighborhood vectors.
def normalize_init(ode_frame, model):
init_dist = create_normalized_np(model['initial_distribution'])
states = model['states']
if 'init' in model and model['init'].lower().strip() == 'uniform':
logger.info('use uniform init (degree-wise)')
else:
logger.info('use multinomial init (degree-wise)')
def get_init_prob(row):
m = row[0]
s = row[1]
density = multinomial_pmf(m,init_dist) if np.sum(m) > 0 else 1.0
state_scale = init_dist[model['states'].index(s)]
if 'init' in model and model['init'].lower().strip() == 'uniform':
result = state_scale
else:
result = density * state_scale
return result
ode_frame['init_raw'] = ode_frame[['neighborhood', 'state']].apply(get_init_prob , axis=1)
model['max_cluster'] = len(set(ode_frame['neighborhood'].tolist()))
logger.info('Number of equations per state is: '+str(model['max_cluster']))
degree_count = {k: 0 for k in range(model['k_max']+1)}
sum_dict = {k: 0.0 for k in range(model['k_max']+1)}
for _, row in ode_frame.iterrows():
sum_dict[row['degree']] += row['init_raw']
degree_count[row['degree']] += 1
ode_frame['initial_value'] = ode_frame.apply(lambda row: 0 if sum_dict[row['degree']] == 0 else row['init_raw']/sum_dict[row['degree']] , axis=1)
del ode_frame['init_raw']
ode_frame['degree_count'] = ode_frame.apply(lambda row: degree_count[row['degree']] , axis=1)
ode_frame.to_csv(model['output_dir']+'ame_frame_original_{}.csv'.format(model['name']), header='sep=,')
#------------------------------------------------------
# Cluster ODEs
#------------------------------------------------------
# Next, we apply the lumping from LumpEngine.py
# We fist define the clusters, during the lumping we substitute (and scale) variables
# After that we need to compute some characteristic values for each cluster, to make the
# aggregated equations mathematically sound (e.g. mixed_mom_matrix).
def apply_lumping(ode_frame, model):
if model['bin_num'] == -1:
model['bin_num'] = np.sum([elemsin_k_vec_with_sum_m(len(model['states']),k) for k in range(model['k_max']+1)]) #TODO unused?
cluster_dict = clustering(model)
model['actual_cluster_number'] = len(set(cluster_dict.values()))
logger.info('Actual cluster number is: '+str(model['actual_cluster_number']))
plot_clustering(cluster_dict, model['output_path'][:-3]+'_clustering.pdf')
ode_frame['cluster_indicator'] = ode_frame.apply(lambda row: '{}_#_{}'.format(cluster_dict[row['neighborhood']],row['state']), axis=1)
logger.info('Start lumping.')
if 'scale_during_substitution' in model and 'equal_weight' in model:
ode_lumpy = lump(ode_frame, scale_during_substitution = eval(model['scale_during_substitution']), equal_weight = eval(model['equal_weight']))
elif 'scale_during_substitution' in model:
ode_lumpy = lump(ode_frame, scale_during_substitution = eval(model['scale_during_substitution']))
elif 'equal_weight' in model:
ode_lumpy = lump(ode_frame, equal_weight = eval(model['equal_weight']))
else:
ode_lumpy = lump(ode_frame)
logger.info('Lumping done.')
def agg_mvec(line): #TODO make this part of lumping with sympy
weight = line['weight_normalized']
mvecs = line['neighborhood']
assert(len(weight) == len(mvecs))
base = np.zeros(len(mvecs[0]))
for i in range(len(mvecs)):
m = np.array(mvecs[i])*weight[i] *line['degree_count_avg']/line['degree_count'][i]
base += m
return tuple(base)
def agg_mixedmom(line):
weight = line['weight_normalized']
mvecs = line['neighborhood']
assert(len(weight) == len(mvecs))
base = np.zeros([len(mvecs[0]),len(mvecs[0])])
for i in range(len(mvecs)):
m = mvecs[i]
w = weight[i]
scale = line['degree_count_avg']/line['degree_count'][i]
for i1, s1 in enumerate(m):
for i2, s2 in enumerate(m):
v = m[i1]*m[i2]*scale *w
base[i1,i2] += v
return repr(base).replace('array(','').replace(')','')
ode_lumpy['m'] = ode_lumpy.apply(agg_mvec, axis=1)
ode_lumpy['mixed_mom_matrix'] = ode_lumpy.apply(agg_mixedmom, axis=1)
ode_lumpy['name'] = ode_lumpy.apply(lambda l: l['ode'].split('=')[0].replace('dt_x["','').replace('"]',''), axis=1)
ode_lumpy = ode_lumpy.rename(columns={'weight_sum':'degree_prob_sum'})
ode_lumpy['state'] = ode_lumpy.apply(lambda l: l['state'][0], axis=1)
return ode_lumpy
#------------------------------------------------------
# Write Data
#------------------------------------------------------
def write_data(ode_lumpy, beta_exprs, model):
ode_lumpy.to_csv(model['output_dir']+'ame_frame_lumped_{}.csv'.format(model['name']), header='sep=,')
ode_str = ''
for line in beta_exprs:
ode_str += '\t' + str(line) + '\n'
for _, ode in ode_lumpy.iterrows():
ode_str += '\t{ode}\n'.format(ode=ode['ode'])
model['ode_text'] = ode_str
return genrate_file_ame(model)
#------------------------------------------------------
# Solve ODE
#------------------------------------------------------
def solve_ode(model):
from time import sleep
logger.info('Start ODE solver.')
folderpath = model['output_dir']
filename = model['output_name']
sys.path.append(folderpath)
exec('import {} as odecode'.format(filename[:-3]), globals())
results, t, time_elapsed = odecode.plot()
model['trajectories'] = results
model['time'] = t
model['time_elapsed'] = time_elapsed
logger.info('ODE solver done.')
#------------------------------------------------------
# Main
#------------------------------------------------------
def generate_and_solve(model, autorun, unbinned):
model_parser.set_modelpaths(model, overwrite_dir=False) # to make paths consistent
if unbinned:
model['bin_num'] = -1
logger.info('Generate ODEs.')
odes, beta_exprs = generate_odes(model)
logger.info('Generate ODEs finished.')
ode_frame = to_dataframe(odes)
model['neighborhood'] = list(set(ode_frame['neighborhood']))
normalize_init(ode_frame, model)
ode_lumpy = apply_lumping(ode_frame, model)
logger.info('Write File.')
outpath = write_data(ode_lumpy, beta_exprs, model)
logger.info('Filepath:\t'+outpath)
if autorun:
sol = solve_ode(model)
model['sol'] = sol
logger.info('Done.')
return model
def main(modelpath, autorun, unbinned):
model = read_model(modelpath)
if model['bin_num'] == -1 and not unbinned:
return stopping_heuristic(modelpath)
return generate_and_solve(model, autorun, unbinned)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model', help="path to modelfile")
parser.add_argument('--noautorun', action='store_true', help="generate code without executing it")
parser.add_argument('--nolumping', action='store_true', help="generate original equations without lumping")
args = parser.parse_args()
main(args.model, not args.noautorun, args.nolumping)
| gpl-3.0 |
vinodkc/spark | python/pyspark/pandas/typedef/typehints.py | 3 | 18999 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utilities to deal with types. This is mostly focused on python3.
"""
import datetime
import decimal
from inspect import getfullargspec, isclass
from typing import ( # noqa: F401
Any,
Callable,
Dict,
Generic,
List,
Optional,
Tuple,
TypeVar,
Union,
cast,
)
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype, pandas_dtype
from pandas.api.extensions import ExtensionDtype
try:
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
extension_dtypes_available = True
extension_dtypes = (Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype) # type: Tuple
try:
from pandas import BooleanDtype, StringDtype
extension_object_dtypes_available = True
extension_dtypes += (BooleanDtype, StringDtype)
except ImportError:
extension_object_dtypes_available = False
try:
from pandas import Float32Dtype, Float64Dtype
extension_float_dtypes_available = True
extension_dtypes += (Float32Dtype, Float64Dtype)
except ImportError:
extension_float_dtypes_available = False
except ImportError:
extension_dtypes_available = False
extension_object_dtypes_available = False
extension_float_dtypes_available = False
extension_dtypes = ()
import pyarrow as pa
import pyspark.sql.types as types
from pyspark.sql.pandas.types import to_arrow_type, from_arrow_type
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas.typedef.string_typehints import resolve_string_type_hint
T = TypeVar("T")
Scalar = Union[
int, float, bool, str, bytes, decimal.Decimal, datetime.date, datetime.datetime, None
]
Dtype = Union[np.dtype, ExtensionDtype]
# A column of data, with the data type.
class SeriesType(Generic[T]):
def __init__(self, dtype: Dtype, spark_type: types.DataType):
self.dtype = dtype
self.spark_type = spark_type
def __repr__(self) -> str:
return "SeriesType[{}]".format(self.spark_type)
class DataFrameType(object):
def __init__(
self, dtypes: List[Dtype], spark_types: List[types.DataType], names: List[Optional[str]]
):
from pyspark.pandas.internal import InternalField
from pyspark.pandas.utils import name_like_string
self.fields = [
InternalField(
dtype=dtype,
struct_field=types.StructField(
name=(name_like_string(name) if name is not None else ("c%s" % i)),
dataType=spark_type,
),
)
for i, (name, dtype, spark_type) in enumerate(zip(names, dtypes, spark_types))
]
@property
def dtypes(self) -> List[Dtype]:
return [field.dtype for field in self.fields]
@property
def spark_type(self) -> types.StructType:
return types.StructType([field.struct_field for field in self.fields])
def __repr__(self) -> str:
return "DataFrameType[{}]".format(self.spark_type)
# The type is a scalar type that is furthermore understood by Spark.
class ScalarType(object):
def __init__(self, dtype: Dtype, spark_type: types.DataType):
self.dtype = dtype
self.spark_type = spark_type
def __repr__(self) -> str:
return "ScalarType[{}]".format(self.spark_type)
# The type is left unspecified or we do not know about this type.
class UnknownType(object):
def __init__(self, tpe: Any):
self.tpe = tpe
def __repr__(self) -> str:
return "UnknownType[{}]".format(self.tpe)
class NameTypeHolder(object):
name = None
tpe = None
def as_spark_type(tpe: Union[str, type, Dtype], *, raise_error: bool = True) -> types.DataType:
"""
Given a Python type, returns the equivalent spark type.
Accepts:
- the built-in types in Python
- the built-in types in numpy
- list of pairs of (field_name, type)
- dictionaries of field_name -> type
- Python3's typing system
"""
if isinstance(tpe, np.dtype) and tpe == np.dtype("object"):
pass
# ArrayType
elif tpe in (np.ndarray,):
return types.ArrayType(types.StringType())
elif hasattr(tpe, "__origin__") and issubclass(tpe.__origin__, list): # type: ignore
element_type = as_spark_type(tpe.__args__[0], raise_error=raise_error) # type: ignore
if element_type is None:
return None
return types.ArrayType(element_type)
# BinaryType
elif tpe in (bytes, np.character, np.bytes_, np.string_):
return types.BinaryType()
# BooleanType
elif tpe in (bool, np.bool, "bool", "?"):
return types.BooleanType()
# DateType
elif tpe in (datetime.date,):
return types.DateType()
# NumericType
elif tpe in (np.int8, np.byte, "int8", "byte", "b"):
return types.ByteType()
elif tpe in (decimal.Decimal,):
# TODO: considering about the precision & scale for decimal type.
return types.DecimalType(38, 18)
elif tpe in (float, np.float, np.float64, "float", "float64", "double"):
return types.DoubleType()
elif tpe in (np.float32, "float32", "f"):
return types.FloatType()
elif tpe in (np.int32, "int32", "i"):
return types.IntegerType()
elif tpe in (int, np.int, np.int64, "int", "int64", "long"):
return types.LongType()
elif tpe in (np.int16, "int16", "short"):
return types.ShortType()
# StringType
elif tpe in (str, np.unicode_, "str", "U"):
return types.StringType()
# TimestampType
elif tpe in (datetime.datetime, np.datetime64, "datetime64[ns]", "M"):
return types.TimestampType()
# categorical types
elif isinstance(tpe, CategoricalDtype) or (isinstance(tpe, str) and type == "category"):
return types.LongType()
# extension types
elif extension_dtypes_available:
# IntegralType
if isinstance(tpe, Int8Dtype) or (isinstance(tpe, str) and tpe == "Int8"):
return types.ByteType()
elif isinstance(tpe, Int16Dtype) or (isinstance(tpe, str) and tpe == "Int16"):
return types.ShortType()
elif isinstance(tpe, Int32Dtype) or (isinstance(tpe, str) and tpe == "Int32"):
return types.IntegerType()
elif isinstance(tpe, Int64Dtype) or (isinstance(tpe, str) and tpe == "Int64"):
return types.LongType()
if extension_object_dtypes_available:
# BooleanType
if isinstance(tpe, BooleanDtype) or (isinstance(tpe, str) and tpe == "boolean"):
return types.BooleanType()
# StringType
elif isinstance(tpe, StringDtype) or (isinstance(tpe, str) and tpe == "string"):
return types.StringType()
if extension_float_dtypes_available:
# FractionalType
if isinstance(tpe, Float32Dtype) or (isinstance(tpe, str) and tpe == "Float32"):
return types.FloatType()
elif isinstance(tpe, Float64Dtype) or (isinstance(tpe, str) and tpe == "Float64"):
return types.DoubleType()
if raise_error:
raise TypeError("Type %s was not understood." % tpe)
else:
return None
def spark_type_to_pandas_dtype(
spark_type: types.DataType, *, use_extension_dtypes: bool = False
) -> Dtype:
"""Return the given Spark DataType to pandas dtype."""
if use_extension_dtypes and extension_dtypes_available:
# IntegralType
if isinstance(spark_type, types.ByteType):
return Int8Dtype()
elif isinstance(spark_type, types.ShortType):
return Int16Dtype()
elif isinstance(spark_type, types.IntegerType):
return Int32Dtype()
elif isinstance(spark_type, types.LongType):
return Int64Dtype()
if extension_object_dtypes_available:
# BooleanType
if isinstance(spark_type, types.BooleanType):
return BooleanDtype()
# StringType
elif isinstance(spark_type, types.StringType):
return StringDtype()
# FractionalType
if extension_float_dtypes_available:
if isinstance(spark_type, types.FloatType):
return Float32Dtype()
elif isinstance(spark_type, types.DoubleType):
return Float64Dtype()
if isinstance(
spark_type,
(
types.DateType,
types.NullType,
types.ArrayType,
types.MapType,
types.StructType,
types.UserDefinedType,
),
):
return np.dtype("object")
elif isinstance(spark_type, types.TimestampType):
return np.dtype("datetime64[ns]")
else:
return np.dtype(to_arrow_type(spark_type).to_pandas_dtype())
def pandas_on_spark_type(tpe: Union[str, type, Dtype]) -> Tuple[Dtype, types.DataType]:
"""
Convert input into a pandas only dtype object or a numpy dtype object,
and its corresponding Spark DataType.
Parameters
----------
tpe : object to be converted
Returns
-------
tuple of np.dtype or a pandas dtype, and Spark DataType
Raises
------
TypeError if not a dtype
Examples
--------
>>> pandas_on_spark_type(int)
(dtype('int64'), LongType)
>>> pandas_on_spark_type(str)
(dtype('<U'), StringType)
>>> pandas_on_spark_type(datetime.date)
(dtype('O'), DateType)
>>> pandas_on_spark_type(datetime.datetime)
(dtype('<M8[ns]'), TimestampType)
>>> pandas_on_spark_type(List[bool])
(dtype('O'), ArrayType(BooleanType,true))
"""
try:
dtype = pandas_dtype(tpe)
spark_type = as_spark_type(dtype)
except TypeError:
spark_type = as_spark_type(tpe)
dtype = spark_type_to_pandas_dtype(spark_type)
return dtype, spark_type
def infer_pd_series_spark_type(pser: pd.Series, dtype: Dtype) -> types.DataType:
"""Infer Spark DataType from pandas Series dtype.
:param pser: :class:`pandas.Series` to be inferred
:param dtype: the Series' dtype
:return: the inferred Spark data type
"""
if dtype == np.dtype("object"):
if len(pser) == 0 or pser.isnull().all():
return types.NullType()
elif hasattr(pser.iloc[0], "__UDT__"):
return pser.iloc[0].__UDT__
else:
return from_arrow_type(pa.Array.from_pandas(pser).type)
elif isinstance(dtype, CategoricalDtype):
if isinstance(pser.dtype, CategoricalDtype):
return as_spark_type(pser.cat.codes.dtype)
else:
# `pser` must already be converted to codes.
return as_spark_type(pser.dtype)
else:
return as_spark_type(dtype)
def infer_return_type(f: Callable) -> Union[SeriesType, DataFrameType, ScalarType, UnknownType]:
"""
Infer the return type from the return type annotation of the given function.
The returned type class indicates both dtypes (a pandas only dtype object
or a numpy dtype object) and its corresponding Spark DataType.
>>> def func() -> int:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtype
dtype('int64')
>>> inferred.spark_type
LongType
>>> def func() -> ps.Series[int]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtype
dtype('int64')
>>> inferred.spark_type
LongType
>>> def func() -> ps.DataFrame[np.float, str]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('float64'), dtype('<U')]
>>> inferred.spark_type
StructType(List(StructField(c0,DoubleType,true),StructField(c1,StringType,true)))
>>> def func() -> ps.DataFrame[np.float]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('float64')]
>>> inferred.spark_type
StructType(List(StructField(c0,DoubleType,true)))
>>> def func() -> 'int':
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtype
dtype('int64')
>>> inferred.spark_type
LongType
>>> def func() -> 'ps.Series[int]':
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtype
dtype('int64')
>>> inferred.spark_type
LongType
>>> def func() -> 'ps.DataFrame[np.float, str]':
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('float64'), dtype('<U')]
>>> inferred.spark_type
StructType(List(StructField(c0,DoubleType,true),StructField(c1,StringType,true)))
>>> def func() -> 'ps.DataFrame[np.float]':
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('float64')]
>>> inferred.spark_type
StructType(List(StructField(c0,DoubleType,true)))
>>> def func() -> ps.DataFrame['a': np.float, 'b': int]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('float64'), dtype('int64')]
>>> inferred.spark_type
StructType(List(StructField(a,DoubleType,true),StructField(b,LongType,true)))
>>> def func() -> "ps.DataFrame['a': np.float, 'b': int]":
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('float64'), dtype('int64')]
>>> inferred.spark_type
StructType(List(StructField(a,DoubleType,true),StructField(b,LongType,true)))
>>> pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
>>> def func() -> ps.DataFrame[pdf.dtypes]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('int64'), dtype('int64')]
>>> inferred.spark_type
StructType(List(StructField(c0,LongType,true),StructField(c1,LongType,true)))
>>> pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
>>> def func() -> ps.DataFrame[zip(pdf.columns, pdf.dtypes)]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('int64'), dtype('int64')]
>>> inferred.spark_type
StructType(List(StructField(a,LongType,true),StructField(b,LongType,true)))
>>> pdf = pd.DataFrame({("x", "a"): [1, 2, 3], ("y", "b"): [3, 4, 5]})
>>> def func() -> ps.DataFrame[zip(pdf.columns, pdf.dtypes)]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('int64'), dtype('int64')]
>>> inferred.spark_type
StructType(List(StructField((x, a),LongType,true),StructField((y, b),LongType,true)))
>>> pdf = pd.DataFrame({"a": [1, 2, 3], "b": pd.Categorical([3, 4, 5])})
>>> def func() -> ps.DataFrame[pdf.dtypes]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('int64'), CategoricalDtype(categories=[3, 4, 5], ordered=False)]
>>> inferred.spark_type
StructType(List(StructField(c0,LongType,true),StructField(c1,LongType,true)))
>>> def func() -> ps.DataFrame[zip(pdf.columns, pdf.dtypes)]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtypes
[dtype('int64'), CategoricalDtype(categories=[3, 4, 5], ordered=False)]
>>> inferred.spark_type
StructType(List(StructField(a,LongType,true),StructField(b,LongType,true)))
>>> def func() -> ps.Series[pdf.b.dtype]:
... pass
>>> inferred = infer_return_type(func)
>>> inferred.dtype
CategoricalDtype(categories=[3, 4, 5], ordered=False)
>>> inferred.spark_type
LongType
"""
# We should re-import to make sure the class 'SeriesType' is not treated as a class
# within this module locally. See Series.__class_getitem__ which imports this class
# canonically.
from pyspark.pandas.typedef import SeriesType, NameTypeHolder
spec = getfullargspec(f)
tpe = spec.annotations.get("return", None)
if isinstance(tpe, str):
# This type hint can happen when given hints are string to avoid forward reference.
tpe = resolve_string_type_hint(tpe)
if hasattr(tpe, "__origin__") and (
tpe.__origin__ == ps.DataFrame or tpe.__origin__ == ps.Series
):
# When Python version is lower then 3.7. Unwrap it to a Tuple/SeriesType type hints.
tpe = tpe.__args__[0]
if hasattr(tpe, "__origin__") and issubclass(tpe.__origin__, SeriesType):
tpe = tpe.__args__[0]
if issubclass(tpe, NameTypeHolder):
tpe = tpe.tpe
dtype, spark_type = pandas_on_spark_type(tpe)
return SeriesType(dtype, spark_type)
# Note that, DataFrame type hints will create a Tuple.
# Python 3.6 has `__name__`. Python 3.7 and 3.8 have `_name`.
# Check if the name is Tuple.
name = getattr(tpe, "_name", getattr(tpe, "__name__", None))
if name == "Tuple":
tuple_type = tpe
if hasattr(tuple_type, "__tuple_params__"):
# Python 3.5.0 to 3.5.2 has '__tuple_params__' instead.
# See https://github.com/python/cpython/blob/v3.5.2/Lib/typing.py
parameters = getattr(tuple_type, "__tuple_params__")
else:
parameters = getattr(tuple_type, "__args__")
dtypes, spark_types = zip(
*(
pandas_on_spark_type(p.tpe)
if isclass(p) and issubclass(p, NameTypeHolder)
else pandas_on_spark_type(p)
for p in parameters
)
)
names = [
p.name if isclass(p) and issubclass(p, NameTypeHolder) else None for p in parameters
]
return DataFrameType(list(dtypes), list(spark_types), names)
types = pandas_on_spark_type(tpe)
if types is None:
return UnknownType(tpe)
else:
return ScalarType(*types)
def _test() -> None:
import doctest
import sys
import pyspark.pandas.typedef.typehints
globs = pyspark.pandas.typedef.typehints.__dict__.copy()
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.typedef.typehints,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
KrisCheng/ML-Learning | archive/MOOC/Deeplearning_AI/NeuralNetworksandDeepLearning/LogisticRegressionWithaNeuralNetworkMindset/Logistic+Regression+with+a+Neural+Network+mindset+v3.py | 1 | 29124 |
# coding: utf-8
# # Logistic Regression with a Neural Network mindset
#
# Welcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.
#
# **Instructions:**
# - Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.
#
# **You will learn to:**
# - Build the general architecture of a learning algorithm, including:
# - Initializing parameters
# - Calculating the cost function and its gradient
# - Using an optimization algorithm (gradient descent)
# - Gather all three functions above into a main model function, in the right order.
# ## 1 - Packages ##
#
# First, let's run the cell below to import all the packages that you will need during this assignment.
# - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.
# - [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.
# - [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.
# - [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.
# In[62]:
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
get_ipython().magic('matplotlib inline')
# ## 2 - Overview of the Problem set ##
#
# **Problem Statement**: You are given a dataset ("data.h5") containing:
# - a training set of m_train images labeled as cat (y=1) or non-cat (y=0)
# - a test set of m_test images labeled as cat or non-cat
# - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).
#
# You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.
#
# Let's get more familiar with the dataset. Load the data by running the following code.
# In[63]:
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
# We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).
#
# Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images.
# In[83]:
# Example of a picture
index = 80
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
# Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs.
#
# **Exercise:** Find the values for:
# - m_train (number of training examples)
# - m_test (number of test examples)
# - num_px (= height = width of a training image)
# Remember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`.
# In[65]:
### START CODE HERE ### (≈ 3 lines of code)
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
# **Expected Output for m_train, m_test and num_px**:
# <table style="width:15%">
# <tr>
# <td>**m_train**</td>
# <td> 209 </td>
# </tr>
#
# <tr>
# <td>**m_test**</td>
# <td> 50 </td>
# </tr>
#
# <tr>
# <td>**num_px**</td>
# <td> 64 </td>
# </tr>
#
# </table>
#
# For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.
#
# **Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\_px $*$ num\_px $*$ 3, 1).
#
# A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use:
# ```python
# X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X
# ```
# In[66]:
# Reshape the training and test examples
### START CODE HERE ### (≈ 2 lines of code)
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
### END CODE HERE ###
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
# **Expected Output**:
#
# <table style="width:35%">
# <tr>
# <td>**train_set_x_flatten shape**</td>
# <td> (12288, 209)</td>
# </tr>
# <tr>
# <td>**train_set_y shape**</td>
# <td>(1, 209)</td>
# </tr>
# <tr>
# <td>**test_set_x_flatten shape**</td>
# <td>(12288, 50)</td>
# </tr>
# <tr>
# <td>**test_set_y shape**</td>
# <td>(1, 50)</td>
# </tr>
# <tr>
# <td>**sanity check after reshaping**</td>
# <td>[17 31 56 22 33]</td>
# </tr>
# </table>
# To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.
#
# One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel).
#
# <!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !-->
#
# Let's standardize our dataset.
# In[67]:
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
# <font color='blue'>
# **What you need to remember:**
#
# Common steps for pre-processing a new dataset are:
# - Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)
# - Reshape the datasets such that each example is now a vector of size (num_px \* num_px \* 3, 1)
# - "Standardize" the data
# ## 3 - General Architecture of the learning algorithm ##
#
# It's time to design a simple algorithm to distinguish cat images from non-cat images.
#
# You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!**
#
# <img src="images/LogReg_kiank.png" style="width:650px;height:400px;">
#
# **Mathematical expression of the algorithm**:
#
# For one example $x^{(i)}$:
# $$z^{(i)} = w^T x^{(i)} + b \tag{1}$$
# $$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$
# $$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$
#
# The cost is then computed by summing over all training examples:
# $$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$
#
# **Key steps**:
# In this exercise, you will carry out the following steps:
# - Initialize the parameters of the model
# - Learn the parameters for the model by minimizing the cost
# - Use the learned parameters to make predictions (on the test set)
# - Analyse the results and conclude
# ## 4 - Building the parts of our algorithm ##
#
# The main steps for building a Neural Network are:
# 1. Define the model structure (such as number of input features)
# 2. Initialize the model's parameters
# 3. Loop:
# - Calculate current loss (forward propagation)
# - Calculate current gradient (backward propagation)
# - Update parameters (gradient descent)
#
# You often build 1-3 separately and integrate them into one function we call `model()`.
#
# ### 4.1 - Helper functions
#
# **Exercise**: Using your code from "Python Basics", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().
# In[68]:
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (≈ 1 line of code)
s = 1/(1+np.exp(-z))
### END CODE HERE ###
return s
# In[69]:
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
# **Expected Output**:
#
# <table>
# <tr>
# <td>**sigmoid([0, 2])**</td>
# <td> [ 0.5 0.88079708]</td>
# </tr>
# </table>
# ### 4.2 - Initializing parameters
#
# **Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.
# In[70]:
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
### START CODE HERE ### (≈ 1 line of code)
w = np.zeros([dim,1])
b = 0
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
# In[71]:
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
# **Expected Output**:
#
#
# <table style="width:15%">
# <tr>
# <td> ** w ** </td>
# <td> [[ 0.]
# [ 0.]] </td>
# </tr>
# <tr>
# <td> ** b ** </td>
# <td> 0 </td>
# </tr>
# </table>
#
# For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1).
# ### 4.3 - Forward and Backward propagation
#
# Now that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters.
#
# **Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.
#
# **Hints**:
#
# Forward Propagation:
# - You get X
# - You compute $A = \sigma(w^T X + b) = (a^{(0)}, a^{(1)}, ..., a^{(m-1)}, a^{(m)})$
# - You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$
#
# Here are the two formulas you will be using:
#
# $$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$
# $$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$
# In[73]:
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (≈ 2 lines of code)
A = sigmoid(np.dot(w.T,X)+b) # compute activation
cost = -(1/m)*np.sum(Y*np.log(A)+(1-Y)*np.log(1-A),1) # compute cost
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (≈ 2 lines of code)
dw = (1/m) * np.dot(X,(A-Y).T)
db = (1/m) * np.sum((A-Y),1)
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
# In[74]:
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
# **Expected Output**:
#
# <table style="width:50%">
# <tr>
# <td> ** dw ** </td>
# <td> [[ 0.99993216]
# [ 1.99980262]]</td>
# </tr>
# <tr>
# <td> ** db ** </td>
# <td> 0.499935230625 </td>
# </tr>
# <tr>
# <td> ** cost ** </td>
# <td> 6.000064773192205</td>
# </tr>
#
# </table>
# ### d) Optimization
# - You have initialized your parameters.
# - You are also able to compute a cost function and its gradient.
# - Now, you want to update the parameters using gradient descent.
#
# **Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate.
# In[75]:
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
### START CODE HERE ###
grads, cost = propagate(w, b, X, Y)
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
### START CODE HERE ###
w = w - learning_rate * dw
b = b - learning_rate * db
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training examples
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
# In[76]:
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
# **Expected Output**:
#
# <table style="width:40%">
# <tr>
# <td> **w** </td>
# <td>[[ 0.1124579 ]
# [ 0.23106775]] </td>
# </tr>
#
# <tr>
# <td> **b** </td>
# <td> 1.55930492484 </td>
# </tr>
# <tr>
# <td> **dw** </td>
# <td> [[ 0.90158428]
# [ 1.76250842]] </td>
# </tr>
# <tr>
# <td> **db** </td>
# <td> 0.430462071679 </td>
# </tr>
#
# </table>
# **Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There is two steps to computing predictions:
#
# 1. Calculate $\hat{Y} = A = \sigma(w^T X + b)$
#
# 2. Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this).
# In[77]:
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (≈ 1 line of code)
A = np.dot(w.T, X) + b
### END CODE HERE ###
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (≈ 4 lines of code)
Y_prediction[A>=0.5] = 1
Y_prediction[A<0.5] = 0
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
# In[78]:
print ("predictions = " + str(predict(w, b, X)))
# **Expected Output**:
#
# <table style="width:30%">
# <tr>
# <td>
# **predictions**
# </td>
# <td>
# [[ 1. 1.]]
# </td>
# </tr>
#
# </table>
#
# <font color='blue'>
# **What to remember:**
# You've implemented several functions that:
# - Initialize (w,b)
# - Optimize the loss iteratively to learn parameters (w,b):
# - computing the cost and its gradient
# - updating the parameters using gradient descent
# - Use the learned (w,b) to predict the labels for a given set of examples
# ## 5 - Merge all functions into a model ##
#
# You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.
#
# **Exercise:** Implement the model function. Use the following notation:
# - Y_prediction for your predictions on the test set
# - Y_prediction_train for your predictions on the train set
# - w, costs, grads for the outputs of optimize()
# In[79]:
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
### START CODE HERE ###
# initialize parameters with zeros (≈ 1 line of code)
w, b = initialize_with_zeros(X_train.shape[0])
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
# Run the following cell to train your model.
# In[80]:
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
# **Expected Output**:
#
# <table style="width:40%">
#
# <tr>
# <td> **Train Accuracy** </td>
# <td> 99.04306220095694 % </td>
# </tr>
#
# <tr>
# <td>**Test Accuracy** </td>
# <td> 70.0 % </td>
# </tr>
# </table>
#
#
#
# **Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test error is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!
#
# Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set.
# In[60]:
# Example of a picture that was wrongly classified.
index = 2
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.")
# Let's also plot the cost function and the gradients.
# In[61]:
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
# **Interpretation**:
# You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting.
# ## 6 - Further analysis (optional/ungraded exercise) ##
#
# Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$.
# #### Choice of learning rate ####
#
# **Reminder**:
# In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.
#
# Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens.
# In[ ]:
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
# **Interpretation**:
# - Different learning rates give different costs and thus different predictions results.
# - If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost).
# - A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.
# - In deep learning, we usually recommend that you:
# - Choose the learning rate that better minimizes the cost function.
# - If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.)
#
# ## 7 - Test with your own image (optional/ungraded exercise) ##
#
# Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:
# 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
# 2. Add your image to this Jupyter Notebook's directory, in the "images" folder
# 3. Change your image's name in the following code
# 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
# In[86]:
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "cat_in_iran.jpg" # change this to the name of your image file
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
# <font color='blue'>
# **What to remember from this assignment:**
# 1. Preprocessing the dataset is important.
# 2. You implemented each function separately: initialize(), propagate(), optimize(). Then you built a model().
# 3. Tuning the learning rate (which is an example of a "hyperparameter") can make a big difference to the algorithm. You will see more examples of this later in this course!
#
# Finally, if you'd like, we invite you to try different things on this Notebook. Make sure you submit before trying anything. Once you submit, things you can play with include:
# - Play with the learning rate and the number of iterations
# - Try different initialization methods and compare the results
# - Test other preprocessings (center the data, or divide each row by its standard deviation)
# Bibliography:
# - http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/
# - https://stats.stackexchange.com/questions/211436/why-do-we-normalize-images-by-subtracting-the-datasets-image-mean-and-not-the-c
| mit |
djgagne/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 159 | 7852 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
kruegg21/casino_analytics | src/visualizations.py | 1 | 4703 | # -*- coding: utf-8 -*-
# Copyright 2016 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from sqlalchemy import create_engine
# import json
from mpld3 import plugins
import matplotlib.pyplot as plt
import mpld3
import numpy as np
import seaborn
from translation_dictionaries import *
# # Read password from external file
# with open('passwords.json') as data_file:
# data = json.load(data_file)
#
# DATABASE_HOST = 'soft-feijoa.db.elephantsql.com'
# DATABASE_PORT = '5432'
# DATABASE_NAME = 'ohdimqey'
# DATABASE_USER = 'ohdimqey'
# DATABASE_PASSWORD = data['DATABASE_PASSWORD']
#
# # Connect to database
# database_string = 'postgres://{}:{}@{}:{}/{}'.format(DATABASE_USER,
# DATABASE_PASSWORD,
# DATABASE_HOST,
# DATABASE_PORT,
# DATABASE_NAME)
# engine = create_engine(database_string)
def makeplot(p_type, df, query_params, text):
'''
INPUT: string, pandas dataframe, object
OUTPUT: plot as html string
Takes in a string for a type of plot and and a dataframe and makes an html
string for a plot of that dataframe
'''
plot = {"line": line_plot, "hbar": hbar_plot}
return mpld3.fig_to_html(plot[p_type](df, query_params, text))
def line_plot(df, query_params, text):
'''
INPUT: pandas dataframe
OUTPUT: matplotlib figure
'''
fig, ax = plt.subplots()
if 'factor' not in df.columns:
# Make plot for single-line graph
plt.plot(df.tmstmp, df.metric)
plt.xlabel('Time')
plt.ylabel(human_readable_translation[query_params.sql_metric])
# Shade under curve
min_y = ax.get_ylim()[0]
plt.fill_between(df.tmstmp.values, df.metric.values, min_y, alpha=0.5)
# Add text box
ctr = 1
for key, value in text.iteritems():
textstr = str(key)
textstr += ': '
textstr += str(value)
textstr += '\n'
plt.annotate(textstr, xy=(1,ctr * 12))
ctr += 1
else:
# Add text box
print "adding text box"
ctr = 1
for key, value in text.iteritems():
textstr = str(key)
textstr += ': '
textstr += str(value)
textstr += '\n'
plt.annotate(textstr, xy=(1,ctr * 12))
ctr += 1
# Plot multi-line graph
for unique_item in df['factor'].unique():
df_subgroup = df[df['factor'] == unique_item]
# Make plot
plt.plot(df_subgroup.tmstmp, df_subgroup.metric, label=unique_item)
plt.legend()
# Make interactive legend
# handles, labels = ax.get_legend_handles_labels()
# interactive_legend = plugins.InteractiveLegendPlugin(zip(
# handles, ax.collections), labels, alpha_unsel=0.5, alpha_over=1.5, start_visible=True)
# plugins.connect(fig, interactive_legend)
return fig
def hbar_plot(df, query_params, text):
"""
INPUT: dataframe with metric and factor columns
OUTPUT: matplotlib figure
Returns a horizontal bar plot
"""
fig, ax = plt.subplots()
# Add label for x-axis
plt.xlabel(human_readable_translation[query_params.sql_metric])
y_pos = range(df.shape[0])
# ax.barh(y_pos, df.metric, align="center", tick_label=df.factor)
ax.barh(y_pos, df.metric)
label_locations = [x + 0.4 for x in xrange(len(df))]
plt.yticks(label_locations, df.factor)
for i, bar in enumerate(ax.get_children()[:df.shape[0]]):
tooltip = mpld3.plugins.LineLabelTooltip(bar, label=df.metric[i])
mpld3.plugins.connect(fig, tooltip)
return fig
def hist_plot(df, query_params, text):
"""
INPUT: dataframe with metric and factor columns
OUTPUT: matplotlib figure
"""
fig, ax = plt.subplots()
bins = np.floor(
min(max(20, np.sqrt(df.metric.max() - df.metric.min())), 50))
ax.hist(df.metric, bins=bins)
return fig
if __name__ == "__main__":
pass
| apache-2.0 |
e-q/scipy | scipy/stats/kde.py | 1 | 21567 | #-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to SciPy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
# Standard library imports.
import warnings
# SciPy imports.
from scipy import linalg, special
from scipy.special import logsumexp
from scipy._lib._util import check_random_state
from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi,
sqrt, ravel, power, atleast_1d, squeeze, sum, transpose,
ones, cov)
import numpy as np
# Local imports.
from . import mvn
from ._stats import gaussian_kernel_estimate
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, optional
weights of datapoints. This must be the same shape as dataset.
If None (default), the samples are assumed to be equally weighted
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : int
Effective number of datapoints.
.. versionadded:: 1.2.0
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
In the case of unequally weighted points, `scotts_factor` becomes::
neff**(-1./(d+4)),
with ``neff`` the effective number of datapoints.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
or in the case of unequally weighted points::
(neff * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
With a set of weighted samples, the effective number of datapoints ``neff``
is defined by::
neff = sum(weights)^2 / sum(weights^2)
as detailed in [5]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
.. [5] Gray P. G., 1969, Journal of the Royal Statistical Society.
Series A (General), 132, 272
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None, weights=None):
self.dataset = atleast_2d(asarray(dataset))
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
self._weights = atleast_1d(weights).astype(float)
self._weights /= sum(self._weights)
if self.weights.ndim != 1:
raise ValueError("`weights` input should be one-dimensional.")
if len(self._weights) != self.n:
raise ValueError("`weights` input should be of length n")
self._neff = 1/sum(self._weights**2)
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(asarray(points))
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
output_dtype = np.common_type(self.covariance, points)
itemsize = np.dtype(output_dtype).itemsize
if itemsize == 4:
spec = 'float'
elif itemsize == 8:
spec = 'double'
elif itemsize in (12, 16):
spec = 'long double'
else:
raise TypeError('%s has unexpected item size %d' %
(output_dtype, itemsize))
result = gaussian_kernel_estimate[spec](self.dataset.T, self.weights[:, None],
points.T, self.inv_cov, output_dtype)
return result[:, 0]
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
# This will raise LinAlgError if the new cov matrix is not s.p.d
# cho_factor returns (ndarray, bool) where bool is a flag for whether
# or not ndarray is upper or lower triangular
sum_cov_chol = linalg.cho_factor(sum_cov)
diff = self.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies)*self.weights, axis=0) / norm_const
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.sum(self.weights*(
special.ndtr(normalized_high) -
special.ndtr(normalized_low)))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun_weighted(low_bounds, high_bounds,
self.dataset, self.weights,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i]
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
result /= norm_const
return result
def resample(self, size=None, seed=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the effective number of samples in the underlying
dataset.
seed : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional
This parameter defines the object to use for drawing random
variates.
If `seed` is `None` the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is None.
Specify `seed` for reproducible drawing of random variates.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = int(self.neff)
random_state = check_random_state(seed)
norm = transpose(random_state.multivariate_normal(
zeros((self.d,), float), self.covariance, size=size
))
indices = random_state.choice(self.n, size=size, p=self.weights)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
"""Compute Scott's factor.
Returns
-------
s : float
Scott's factor.
"""
return power(self.neff, -1./(self.d+4))
def silverman_factor(self):
"""Compute the Silverman factor.
Returns
-------
s : float
The silverman factor.
"""
return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.full(x1.shape, 1 / (4. * x1.size)), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, str):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
bias=False,
aweights=self.weights))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
"""
points = atleast_2d(x)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
if m >= self.n:
# there are more points than data, so loop over data
energy = np.empty((self.n, m), dtype=float)
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy[i] = sum(diff*tdiff, axis=0) / 2.0
result = logsumexp(-energy.T,
b=self.weights / self._norm_factor, axis=1)
else:
# loop over points
result = np.empty((m,), dtype=float)
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = logsumexp(-energy, b=self.weights /
self._norm_factor)
return result
@property
def weights(self):
try:
return self._weights
except AttributeError:
self._weights = ones(self.n)/self.n
return self._weights
@property
def neff(self):
try:
return self._neff
except AttributeError:
self._neff = 1/sum(self.weights**2)
return self._neff
| bsd-3-clause |
rendrom/rosreestr2coord | rosreestr2coord/parser.py | 1 | 14095 | # coding: utf-8
import copy
import json
import os
import re
import string
from rosreestr2coord.merge_tiles import PkkAreaMerger
from .export import coords2geojson
from .logger import logger
from .utils import xy2lonlat, make_request, TimeoutException
from rosreestr2coord.export import coords2kml
try:
import urllib.parse
from urllib.parse import urlencode
except ImportError: # For Python 3
import urllib.parse as urlparse
from urllib.parse import urlencode
##############
# SEARCH URL #
##############
# https://pkk.rosreestr.ru/api/features/1
# ?text=38:36:000021:1106
# &tolerance=4
# &limit=11
SEARCH_URL = 'https://pkk.rosreestr.ru/api/features/$area_type'
############################
# URL to get area metainfo #
############################
# https://pkk.rosreestr.ru/api/features/1/38:36:21:1106
FEATURE_INFO_URL = 'https://pkk.rosreestr.ru/api/features/$area_type/'
#########################
# URL to get area image #
#########################
# https://pkk.rosreestr.ru/arcgis/rest/services/PKK6/CadastreSelected/MapServer/export
# ?dpi=96
# &transparent=true
# &format=png32
# &layers=show%3A6%2C7
# &bbox=11612029.005008286%2C6849457.6834302815%2C11612888.921576614%2C6849789.706771941
# &bboxSR=102100
# &imageSR=102100
# &size=1440%2C556
# &layerDefs=%7B%226%22%3A%22ID%20%3D%20%2738%3A36%3A21%3A1106%27%22%2C%227%22%3A%22ID%20%3D%20%2738%3A36%3A21%3A1106%27%22%7D
# &f=image
# WHERE:
# 'layerDefs' decode to {'6':'ID = '38:36:21:1106'','7':'ID = '38:36:21:1106''}
# 'f' may be `json` or `html`
# set `&format=svg&f=json` to export image in svg !closed by rosreestr, now only PNG
TYPES = {
'Участки': 1,
'ОКС': 5,
'Кварталы': 2,
'Районы': 3,
'Округа': 4,
'Границы': 7,
'ЗОУИТ': 10,
'Тер. зоны': 6,
'Красные линии': 13,
'Лес': 12,
'СРЗУ': 15,
'ОЭЗ': 16,
'ГОК': 9,
}
class NoCoordinatesException(Exception):
pass
class Area:
code = ''
code_id = '' # from feature info attr id
buffer = 10
xy = [] # [[[area1], [hole1], [holeN]], [[area2]]]
image_xy_corner = [] # cartesian coord from image, for draw plot
width = 0
height = 0
image_path = ''
extent = {}
image_extent = {}
center = {'x': None, 'y': None}
attrs = {}
def __init__(self,
code='',
area_type=1,
epsilon=5,
media_path='',
with_log=True,
coord_out='EPSG:4326',
center_only=False,
with_proxy=False,
use_cache=True
):
self.with_log = with_log
self.area_type = area_type
self.media_path = media_path
self.center_only = center_only
self.epsilon = epsilon
self.code = code
self.file_name = self.code[:].replace(':', '_')
self.with_proxy = with_proxy
self.use_cache = use_cache
self.coord_out = coord_out
t = string.Template(SEARCH_URL)
self.search_url = t.substitute({'area_type': area_type})
t = string.Template(FEATURE_INFO_URL)
self.feature_info_url = t.substitute({'area_type': area_type})
if not code:
return
self.workspace = self.create_workspace()
feature_info = self.download_feature_info()
if feature_info:
self.get_geometry()
else:
self.log('Nothing found')
def create_workspace(self):
if not self.media_path:
self.media_path = os.getcwd()
area_path_name = self.clear_code(self.code).replace(':', '_')
workspace = os.path.join(
self.media_path, 'tmp', area_path_name)
if not os.path.isdir(workspace):
os.makedirs(workspace)
return workspace
def get_coord(self):
if self.xy:
return self.xy
center = self.get_center_xy()
if center:
return center
return []
def get_attrs(self):
return self.attrs
def _prepare_attrs(self):
if self.attrs:
for a in self.attrs:
attr = self.attrs[a]
if isinstance(attr, str):
try:
attr = attr.strip()
self.attrs[a] = attr
except Exception:
pass
return self.attrs
def to_geojson_poly(self, with_attrs=True, dumps=True):
return self.to_geojson('polygon', with_attrs, dumps)
def to_geojson_center(self, with_attrs=True, dumps=True):
current_center_status = self.center_only
self.center_only = True
to_return = self.to_geojson('point', with_attrs, dumps)
self.center_only = current_center_status
return to_return
def to_geojson(self, geom_type='point', with_attrs=True, dumps=True):
attrs = False
if with_attrs:
attrs = self._prepare_attrs()
xy = []
if self.center_only:
xy = self.get_center_xy()
geom_type = 'point'
else:
xy = self.xy
if xy and len(xy):
feature_collection = coords2geojson(
xy, geom_type, self.coord_out, attrs=attrs)
if feature_collection:
if dumps:
return json.dumps(feature_collection)
return feature_collection
return False
def to_kml(self):
return coords2kml(self.xy, self._prepare_attrs())
def get_center_xy(self):
center = self.attrs.get('center')
if center:
xy = [[[[center['x'], center['y']]]]]
return xy
return False
def make_request(self, url):
response = make_request(url, self.with_proxy)
return response
def download_feature_info(self):
feature_info_path = os.path.join(self.workspace, 'feature_info.json')
data = False
if self.use_cache:
try:
with open(feature_info_path, 'r') as data_file:
data = json.loads(data_file.read())
except Exception:
pass
try:
if not data:
search_url = self.feature_info_url + self.clear_code(self.code)
self.log('Start downloading area info: %s' % search_url)
resp = self.make_request(search_url)
data = json.loads(resp.decode('utf-8'))
self.log('Area info downloaded.')
with open(feature_info_path, 'w') as outfile:
json.dump(data, outfile)
else:
self.log(
'Area info loaded from file: {}'.format(feature_info_path))
if data:
feature = data.get('feature')
if feature:
attrs = feature.get('attrs')
if attrs:
self.attrs = attrs
self.code_id = attrs['id']
if feature.get('extent'):
self.extent = feature['extent']
if feature.get('center'):
x = feature['center']['x']
y = feature['center']['y']
if self.coord_out == 'EPSG:4326':
(x, y) = xy2lonlat(x, y)
self.center = {'x': x, 'y': y}
self.attrs['center'] = self.center
return feature
except TimeoutException:
raise TimeoutException()
except Exception as error:
self.error(error)
return False
@staticmethod
def clear_code(code):
'''remove first nulls from code xxxx:00xx >> xxxx:xx'''
if re.match(r'^\d+(\:\d+)', code):
return ':'.join([str(int(x)) for x in code.split(':')])
return code
@staticmethod
def get_extent_list(extent):
'''convert extent dick to ordered array'''
return [extent['xmin'], extent['ymin'], extent['xmax'], extent['ymax']]
def get_buffer_extent_list(self):
'''add some buffer to ordered extent array'''
ex = self.extent
buf = self.buffer
if ex and ex['xmin']:
ex = [ex['xmin'] - buf, ex['ymin'] - buf,
ex['xmax'] + buf, ex['ymax'] + buf]
else:
self.log('Area has no coordinates')
# raise NoCoordinatesException()
return ex
def get_geometry(self):
if self.center_only:
return self.get_center_xy()
else:
return self.parse_geometry_from_image()
def parse_geometry_from_image(self):
formats = ['png']
for f in formats:
bbox = self.get_buffer_extent_list()
if bbox:
image = PkkAreaMerger(bbox=self.get_buffer_extent_list(),
output_format=f, with_log=self.with_log,
clear_code=self.clear_code(self.code_id),
output_dir=self.workspace,
requester=self.make_request,
use_cache=self.use_cache,
area_type=self.area_type
)
image.download()
self.image_path = image.merge_tiles()
self.width = image.real_width
self.height = image.real_height
self.image_extent = image.image_extent
if image:
return self.get_image_geometry()
def get_image_geometry(self):
'''
get corner geometry array from downloaded image
[area1],[area2] - may be multipolygon geometry
|
[self],[hole_1],[hole_N] - holes is optional
|
[coord1],[coord2],[coord3] - min 3 coord for polygon
|
[x,y] - coordinate pair
Example:
[[ [ [x,y],[x,y],[x,y] ], [ [x,y],[x,y],[x,y] ], ], [ [x,y],[x,y],[x,y] ], [ [x,y],[x,y],[x,y] ] ]
-----------------first polygon----------------- ----------------second polygon--------------
----outer contour--- --first hole contour-
'''
image_xy_corner = self.image_xy_corner = self.get_image_xy_corner()
if image_xy_corner:
self.xy = copy.deepcopy(image_xy_corner)
for geom in self.xy:
for p in range(len(geom)):
geom[p] = self.image_corners_to_coord(geom[p])
return self.xy
return []
def get_image_xy_corner(self):
'''get сartesian coordinates from raster'''
import cv2
import numpy
if not self.image_path:
return False
image_xy_corners = []
try:
# img = cv2.imread(self.image_path, cv2.IMREAD_GRAYSCALE)
stream = open(self.image_path, "rb")
bytes = bytearray(stream.read())
numpyarray = numpy.asarray(bytes, dtype=numpy.uint8)
img = cv2.imdecode(numpyarray, cv2.IMREAD_GRAYSCALE)
imagem = (255 - img)
ret, thresh = cv2.threshold(imagem, 10, 128, cv2.THRESH_BINARY)
try:
contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
except Exception:
im2, contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
hierarchy = hierarchy[0]
hierarchy_contours = [[] for _ in range(len(hierarchy))]
for fry in range(len(contours)):
currentContour = contours[fry]
currentHierarchy = hierarchy[fry]
cc = []
perimeter = cv2.arcLength(currentContour, True)
# epsilon = 0.001 * cv2.arcLength(currentContour, True)
# epsilon = epsilon * self.epsilon
epsilon = self.epsilon
approx = cv2.approxPolyDP(currentContour, epsilon, True)
if len(approx) > 2:
for c in approx:
cc.append([c[0][0], c[0][1]])
parent_index = currentHierarchy[3]
index = fry if parent_index < 0 else parent_index
hierarchy_contours[index].append(cc)
image_xy_corners = [c for c in hierarchy_contours if len(c) > 0]
return image_xy_corners
except Exception as ex:
self.error(ex)
return image_xy_corners
def image_corners_to_coord(self, image_xy_corners):
'''calculate spatial coordinates from cartesian'''
ex = self.get_extent_list(self.image_extent)
dx = ((ex[2] - ex[0]) / self.width)
dy = ((ex[3] - ex[1]) / self.height)
xy_corners = []
for im_x, im_y in image_xy_corners:
x = ex[0] + (im_x * dx)
y = ex[3] - (im_y * dy)
if self.coord_out == 'EPSG:4326':
(x, y) = xy2lonlat(x, y)
xy_corners.append([x, y])
return xy_corners
def show_plot(self):
'''Development tool'''
import cv2
try:
from matplotlib import pyplot as plt
except ImportError:
self.error('Matplotlib is not installed.')
raise ImportError('matplotlib is not installed.')
img = cv2.imread(self.image_path)
for polygones in self.image_xy_corner:
for corners in polygones:
for x, y in corners:
cv2.circle(img, (x, y), 3, 255, -1)
plt.imshow(img), plt.show()
def log(self, msg):
if self.with_log:
print(msg)
def error(self, msg):
if self.with_log:
logger.warning(msg)
| mit |
ricorx7/rti-python | tests/test_bokeh_wamp_df.py | 2 | 6298 | import json
import sys
from twisted.logger import Logger
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.wamp import ApplicationRunner
from bokeh.client import push_session
from bokeh.plotting import figure, curdoc
from bokeh.models.widgets import Panel, Tabs
from bokeh.models import Range1d
from Frontend.qt.test.browser import Browser
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QUrl
import numpy as np
import pandas as pd
class test_bokeh_wamp_df(ApplicationSession):
def __init__(self, config=None):
ApplicationSession.__init__(self, config)
@inlineCallbacks
def onJoin(self, details):
"""
Initialize the WAMP settings. This is called before everything is setup to ensure
the WAMP settings are initialized.
:return:
"""
self.log.info("WAMP connected")
yield self.subscribe(self.on_ens_json_data, u"com.rti.data.ens")
self.log.info("test Bokehs WAMP init")
def on_ens_json_data(self, data):
"""
Called when JSON Ensemble data is received from WAMP.
:param data: JSON object containing serial data.
:return:
"""
json_data = json.loads(data) # convert to JSON
self.amp = json_data['Amplitude'] # Get the amplitude data
amp_np = np.array(json_data['Amplitude']['Amplitude']) # Create a numpy array from the amplitude data
df = pd.DataFrame(columns=['AmpB0', 'AmpB1', 'AmpB2', 'AmpB3'], data=amp_np) # Create a description(name) for the columns
corr_np = np.array(json_data['Correlation']['Correlation']) # Get the correlation data
corr_df = pd.DataFrame(columns=['CorrB0', 'CorrB1', 'CorrB2', 'CorrB3'], data=corr_np) # Create a numpy array from the correlation data
corr_scale = lambda x: x*100 # Mulitply by 100 to make percent
corr_df = corr_df.applymap(corr_scale) # Scale from 0% to 100% # Apply lambda function
df = df.join(corr_df) # Combine the amplitude and correlation dataframe
#print(df.shape)
#print(df)
self.config.extra['ampB0'].data_source.data["y"] = df.index
self.config.extra['ampB0'].data_source.data["x"] = df.loc[:, 'AmpB0']
self.config.extra['ampB1'].data_source.data["y"] = df.index
self.config.extra['ampB1'].data_source.data["x"] = df.loc[:, 'AmpB1']
self.config.extra['ampB2'].data_source.data["y"] = df.index
self.config.extra['ampB2'].data_source.data["x"] = df.loc[:, 'AmpB2']
self.config.extra['ampB3'].data_source.data["y"] = df.index
self.config.extra['ampB3'].data_source.data["x"] = df.loc[:, 'AmpB3']
self.config.extra['corrB0'].data_source.data["y"] = df.index
self.config.extra['corrB0'].data_source.data["x"] = df.loc[:, 'CorrB0']
self.config.extra['corrB1'].data_source.data["y"] = df.index
self.config.extra['corrB1'].data_source.data["x"] = df.loc[:, 'CorrB1']
self.config.extra['corrB2'].data_source.data["y"] = df.index
self.config.extra['corrB2'].data_source.data["x"] = df.loc[:, 'CorrB2']
self.config.extra['corrB3'].data_source.data["y"] = df.index
self.config.extra['corrB3'].data_source.data["x"] = df.loc[:, 'CorrB3']
# self.config.extra['corrB3'].y_range = Range1d(df.index.max, df.index.min) # Invert axis
if __name__ == '__main__':
x = np.array([1])
y = np.array([1])
TOOLS = 'pan,box_zoom,wheel_zoom,box_select,crosshair,resize,reset,save,hover'
ampPlot = figure(plot_width=600, plot_height=800, tools=TOOLS, x_range=Range1d(0, 140))
ampPlot.legend.location = "top_left"
ampPlot.legend.click_policy = "hide"
ampPlot.xaxis[0].axis_label="dB"
ampPlot.yaxis[0].axis_label = "Bin"
ampB0 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='red', legend="B0")
ampB1 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='green', legend="B1")
ampB2 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='blue', legend="B2")
ampB3 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='orange', legend="B3")
tabAmp = Panel(child=ampPlot, title="Amplitude")
corrPlot = figure(plot_width=600, plot_height=800, tools=TOOLS, x_range=Range1d(0, 100))
corrPlot.legend.location = "top_left"
corrPlot.legend.click_policy = "hide"
corrPlot.xaxis[0].axis_label = "% (percent)"
corrPlot.yaxis[0].axis_label = "Bin"
corrB0 = corrPlot.line(x=x, y=y, line_width=2, alpha=.85, color='red', legend="B0")
corrB1 = corrPlot.line(x=x, y=y, line_width=2, alpha=.85, color='green', legend="B1")
corrB2 = corrPlot.line(x=x, y=y, line_width=2, alpha=.85, color='blue', legend="B2")
corrB3 = corrPlot.line(x=x, y=y, line_width=2, alpha=.85, color='orange', legend="B3")
tabCorr = Panel(child=corrPlot, title="Correlation")
tabs = Tabs(tabs=[tabAmp, tabCorr])
doc = curdoc()
doc.title = "Amplitude and Correlation Plot"
# open a session to keep our local document in sync with server
session = push_session(doc)
print("Session ID: ", session)
session.show(tabs) # open the document in a browser
app = QApplication(sys.argv)
brow = Browser()
brow.setUrl(QUrl("http://localhost:5006/?bokeh-session-id=" + str(session.id)))
brow.setMinimumHeight(900)
brow.show()
import qt5reactor
# Add PyQT5 to twisted reactor
qt5reactor.install()
# Start the WAMP connection
# Connect the main window to the WAMP connection
runner = ApplicationRunner(url=u"ws://localhost:55058/ws", realm=u"realm1",
extra={'ampB0': ampB0, 'ampB1': ampB1, 'ampB2': ampB2, 'ampB3': ampB3,
'corrB0': corrB0, 'corrB1': corrB1, 'corrB2': corrB2, 'corrB3': corrB3})
runner.run(test_bokeh_wamp_df)
session.loop_until_closed() # run forever | bsd-3-clause |
xuewei4d/scikit-learn | benchmarks/bench_lof.py | 23 | 3492 | """
============================
LocalOutlierFactor benchmark
============================
A test of LocalOutlierFactor on classical anomaly detection datasets.
Note that LocalOutlierFactor is not meant to predict on a test set and its
performance is assessed in an outlier detection context:
1. The model is trained on the whole dataset which is assumed to contain
outliers.
2. The ROC curve is computed on the same dataset using the knowledge of the
labels.
In this context there is no need to shuffle the dataset because the model
is trained and tested on the whole dataset. The randomness of this benchmark
is only caused by the random selection of anomalies in the SA dataset.
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_openml
from sklearn.preprocessing import LabelBinarizer
print(__doc__)
random_state = 2 # to control the random selection of anomalies in SA
# datasets available: ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
datasets = ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
plt.figure()
for dataset_name in datasets:
# loading and vectorization
print('loading data')
if dataset_name in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dataset_name, percent10=True,
random_state=random_state)
X = dataset.data
y = dataset.target
if dataset_name == 'shuttle':
dataset = fetch_openml('shuttle')
X = dataset.data
y = dataset.target
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dataset_name == 'forestcover':
dataset = fetch_covtype()
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dataset_name == 'SF':
lb = LabelBinarizer()
x1 = lb.fit_transform(X[:, 1].astype(str))
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != b'normal.').astype(int)
if dataset_name == 'SA':
lb = LabelBinarizer()
x1 = lb.fit_transform(X[:, 1].astype(str))
x2 = lb.fit_transform(X[:, 2].astype(str))
x3 = lb.fit_transform(X[:, 3].astype(str))
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != b'normal.').astype(int)
if dataset_name == 'http' or dataset_name == 'smtp':
y = (y != b'normal.').astype(int)
X = X.astype(float)
print('LocalOutlierFactor processing...')
model = LocalOutlierFactor(n_neighbors=20)
tstart = time()
model.fit(X)
fit_time = time() - tstart
scoring = -model.negative_outlier_factor_ # the lower, the more normal
fpr, tpr, thresholds = roc_curve(y, scoring)
AUC = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1,
label=('ROC for %s (area = %0.3f, train-time: %0.2fs)'
% (dataset_name, AUC, fit_time)))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
johnmgregoire/PythonCompositionPlots | myternaryutility.py | 1 | 13257 | import pylab
import matplotlib.cm as cm
import numpy
from colorsys import hsv_to_rgb, rgb_to_hsv
class TernaryPlot:
def __init__(self, ax_subplottriplet, offset=.02, minlist=[0., 0., 0.], ellabels=['A', 'B', 'C'], allowoutofboundscomps=True, outline=True):
self.offset=offset
self.cartendpts=numpy.float32([[0, 0], [.5, numpy.sqrt(3.)/2.], [1, 0]])
self.ellabels=ellabels
if not ax_subplottriplet is None:
if isinstance(ax_subplottriplet, int):
self.ax=pylab.subplot(ax_subplottriplet)
elif isinstance(ax_subplottriplet, tuple):
a, b, c=ax_subplottriplet
self.ax=pylab.subplot(a, b, c)
else:
self.ax=ax_subplottriplet
self.allowoutofboundscomps=allowoutofboundscomps
minlist=numpy.float32(minlist)
self.rangelist=numpy.float32([[m, 1.-numpy.concatenate([minlist[:i], minlist[i+1:]]).sum()] for i, m in enumerate(minlist)])
self.prepax(outline=outline)
self.mappable=None
def prepax(self, outline=True):
self.ax.set_axis_off()
self.ax.set_aspect('equal')
self.ax.figure.hold('True')
self.ax.set_xlim(-.10, 1.10)
self.ax.set_ylim(-.10, 1.10)
if outline:
self.outline()
def processterncoord(self, terncoordlist, removepoints=True):
terncoordlist=numpy.float32(terncoordlist)
if len(terncoordlist.shape)==1:
terncoordlist=numpy.float32([terncoordlist])
if removepoints and not self.allowoutofboundscomps:
terncoordlist=numpy.float32([t for t in terncoordlist if (not removepoints) or numpy.all(t>=self.rangelist[:, 0]) and numpy.all(t<=self.rangelist[:, 1])])
return terncoordlist
def afftrans(self, terncoordlist):
terncoordlist=self.processterncoord(terncoordlist)
diff=self.rangelist[:, 1]-self.rangelist[:, 0]
mn=self.rangelist[:, 0]
return numpy.float32([(tc-mn)/diff for tc in terncoordlist])
def invafftrans(self, terncoordlist):
terncoordlist=self.processterncoord(terncoordlist, removepoints=False)
diff=self.rangelist[:, 1]-self.rangelist[:, 0]
mn=self.rangelist[:, 0]
return numpy.float32([c*diff+mn for c in terncoordlist])
def toCart(self, terncoordlist):
'Given an array of triples of coords in 0-100, returns arrays of Cartesian x- and y- coords'
terncoordlist=self.processterncoord(terncoordlist)
aff_tcl=self.afftrans(terncoordlist)
cartxs = 1.-aff_tcl[:, 0]-aff_tcl[:, 1]/2.
cartys = numpy.sqrt(3) * aff_tcl[:, 1] / 2.0
return (cartxs, cartys)
def toComp(self, xycoordlist, process=True):
'Given an array of triples of coords in 0-100, returns arrays of Cartesian x- and y- coords'
# print '*', xycoordlist
xycoordlist=numpy.float32(xycoordlist)
if len(xycoordlist.shape)==1:
xycoordlist=numpy.float32([xycoordlist])
b=xycoordlist[:, 1]*2./numpy.sqrt(3.)
a=1.-xycoordlist[:, 0]-b/2.
c=1.-a-b
terncoordlist=self.invafftrans(numpy.float32([a, b, c]).T)
# print 'a', a
# print 'b', b
# print 'c', c
# print numpy.float32([a, b, c]).T
# print 'tcl', terncoordlist
if process:
terncoordlist=self.processterncoord(terncoordlist)
# print 'ptcl', terncoordlist
return terncoordlist
def scatter(self, terncoordlist, **kwargs):
'Scatterplots data given in triples, with the matplotlib keyword arguments'
(xs, ys) = self.toCart(terncoordlist)
self.mappable=self.ax.scatter(xs, ys, **kwargs)
# def plot(self, terncoordlist, descriptor, **kwargs):
# (xs, ys) = self.toCart(terncoordlist)
# self.ax.plot(xs, ys, descriptor, **kwargs)
def color_comp_calc(self, terncoordlist, rangelist=None):#could be made more general to allow for endpoint colors other than RGB
if rangelist is None:
rangelist=self.rangelist
return numpy.array([[(c-minc)/(maxc-minc) for c, (minc, maxc) in zip(tc, rangelist)] for tc in terncoordlist])
def colorcompplot(self, terncoordlist, descriptor, colors=None, hollow=False, **kwargs):
(xs, ys) = self.toCart(terncoordlist)
if colors is None:
colors=self.color_comp_calc(terncoordlist)
for col, x, y in zip(colors, xs, ys):
if hollow:
self.ax.plot([x], [y], descriptor, markeredgecolor=col, markerfacecolor='None', **kwargs)
else:
self.ax.plot([x], [y], descriptor, color=col, **kwargs)
def colorbar(self, label='', axrect=[0.86, 0.1, 0.04, 0.8], **kwargs):
'Draws the colorbar and labels it'
if self.mappable is None:
print 'no mappable to create colorbar'
return
else:
self.ax.figure.subplots_adjust(right=axrect[0]-.01)
self.cbax=self.ax.figure.add_axes(axrect)
f=self.ax.figure.colorbar
try:
cb=self.ax.figure.colorbar(self.mappable, cax=self.cbax, **kwargs)
except:
cb=self.ax.figure.colorbar(self.mappable, cax=self.cbax)
try:
cb.set_label(label, **kwargs)
except:
cb.set_label(label)
return cb
def compdist(self, c1, c2):
return ((c1-c2)**2).sum()/2.**.5
def compdist_cart(self, c1, c2):
return self.compdist(self.toCart([c1])[0], self.toCart([c2])[0])
def line(self, begin, end, fmt='k-', **kwargs):
(xs, ys) = self.toCart([begin, end])
self.ax.plot(xs, ys, fmt, **kwargs)
def outline(self):
for i, ep in enumerate(self.cartendpts):
for ep2 in self.cartendpts[i+1:]:
self.ax.plot([ep[0], ep2[0]], [ep[1], ep2[1]], 'k-')
def label(self, fmtstr='%.2f', takeabs=True, ternarylabels=False, hidezerocomp=False, **kwargs):#takeabs is to avoid a negative sign for ~0 negative compositions
hal=['right', 'center', 'left']
val=['top', 'bottom', 'top']
xdel=[-1.*self.offset, 0, self.offset]
ydel=[0, self.offset, 0]
for i, ((x, y), ha, va, t, xd, yd) in enumerate(zip(self.cartendpts, hal, val, self.ellabels, xdel, ydel)):
c=self.toComp([x, y], process=False)[0]
if takeabs:
c=numpy.abs(c)
cs=None
ternarylabels=ternarylabels or (c!=0).sum()>1
#print c, c!=0, (c!=0).sum()>1
if not ternarylabels:
cs=t
elif not self.ellabels is None:
f=fmtstr
cs=''.join([('%s$_{'+f+'}$') %t for t in zip(self.ellabels, c) if not (hidezerocomp and ((f %numpy.abs(t[1]))==(f %0.)))])
#cs=(r'%s$_{'+f+r'}$%s$_{'+f+r'}$%s$_{'+f+r'}$') %tuple([t[ind] for t in zip(self.ellabels, c) for ind in range(2)])
if not cs is None:
self.ax.text(x+xd, y+yd, cs, ha=ha, va=va, **kwargs)
def grid(self, nintervals=4, fmtstr='%0.2f', takeabs=True, ternarylabels=False, printticklabels=True, **kwargs):#takeabs is to avoid a negative sign for ~0 negative compositions
lstyle = {'color': '0.6',
#'dashes': (1, 1),
'linewidth': 1.}
rot=[60, 0, 300]
hal=['right', 'left', 'center']
val=['center', 'center', 'top']
xdel=[-1.*self.offset, self.offset, 0]
ydel=[0, 0, -1.*self.offset]
side=[1, 2, 0]
if isinstance(printticklabels, bool):
if printticklabels:
printticklabels=[True]*(nintervals-1)
else:
printticklabels=[False]*(nintervals-1)
elif isinstance(printticklabels, list) and not isinstance(printticklabels, bool):
printticklabels=[i in printticklabels for i in range(nintervals-1)]
n=nintervals
ep=self.cartendpts
for i, j, k, r, ha, va, xd, yd, s in zip([0, 1, 2], [1, 2, 0], [2, 0, 1], rot, hal, val, xdel, ydel, side):
for m, b in zip(range(1, n), printticklabels):
x, y=((n-m)*ep[i]+m*ep[j])/n
xe, ye=((n-m)*ep[k]+m*ep[j])/n
self.ax.plot([x, xe], [y, ye], **lstyle)
if not b:
continue
c=self.toComp([x, y], process=False)[0]
if takeabs:
c=numpy.abs(c)
cs=None
ternarylabels=ternarylabels or numpy.all(c>1.e-6)
#ternarylabels=ternarylabels or numpy.all(c!=0)
#print c, c!=0, numpy.all(c!=0)
if not ternarylabels:
cs=fmtstr %c[s]
elif not self.ellabels is None:
f=fmtstr
cs=(r'%s$_{'+f+r'}$%s$_{'+f+r'}$%s$_{'+f+r'}$') %tuple([t[ind] for t in zip(self.ellabels, c) for ind in range(2)])
if not cs is None:
self.ax.text(x+xd, y+yd, cs, ha=ha, va=va, **kwargs)
def patch(self,coords, limits=[], **kwargs):
'''Fill the area bounded by limits.
Limits format: [[bmin,bmax],[lmin,lmax],[rmin,rmax]]
Other arguments as for pylab.fill()'''
# coords = []
# bounds = [[1,-1,1],[1,0,-1],[-1,0,0],[1,-1,0],[1,1,-1],[-1,1,0],[0,-1,0],
# [0,1,-1],[-1,1,1],[0,-1,1],[0,0,-1],[-1,0,1]]
# for pt in bounds: #plug in values for these limits
# for i in [0,1,2]:
# if pt[i] == 1:
# pt[i] = limits[i][1]
# else:
# if pt[i] == 0:pt[i] = limits[i][0]
# for i in [0,1,2]:
# if pt[i] == -1: pt[i] = 99 - sum(pt)
# if self.satisfies_bounds(pt, limits): coords.append(pt)
# coords.append(coords[0]) #close the loop
xs, ys = self.toCart(coords)
self.ax.fill(xs, ys, **kwargs)
def text(self, loctriple, word, **kwargs):
(x, y) = self.toCart([loctriple])
self.ax.text(x[0], y[0], word, **kwargs)
def show(self):
self.ax.legend(loc=1)
self.ax.set_xlim(-.10, 1.10)
self.ax.set_ylim(-.10, 1.00)
def rgb_comp(self, terncoordlist, affine=True):
if affine:
aff_tcl=self.afftrans(terncoordlist)
else:
aff_tcl=terncoordlist
return aff_tcl
def plotpoints_rgb(self, terncoordlist, affine=True, **kwargs):
cols=self.rgb_comp(terncoordlist, affine)
for comp, c in zip(terncoordlist, cols):
self.scatter([comp], color=c, **kwargs)
return cols
def complex_to_rgb_grid(self, complex_data, invert=False):
from numpy import angle, max, pi, sin, zeros
phase = angle(complex_data)
amplitude = abs(complex_data)
amplitude = amplitude/max(max(amplitude))
A = zeros((complex_data.shape[0], complex_data.shape[1], 3))
A[:,:,0] = .5*(sin(phase)+1)*amplitude
A[:,:,1] = .5*(sin(phase+pi/2)+1)*amplitude
A[:,:,2] = .5*(-sin(phase)+1)*amplitude
if(invert):
return 1-A
else:
return A
def rgb_compdiff(self, compdiffarr, maxcompdist=None):
sat = ((compdiffarr**2).sum(axis=1)/2.)**.5
huelist=[0. if cd.sum()==0. else rgb_to_hsv(*(cd/cd.sum()))[0] for cd in numpy.abs(compdiffarr)]
if maxcompdist is None:
sat_norm=sat/max(sat)
else:
sat_norm=sat/maxcompdist
sat_norm[sat_norm>1.]=1.
rgbarr=numpy.array([hsv_to_rgb(h, s, 1) for h, s in zip(huelist, sat_norm)])
return rgbarr
def hsdiffplot(self, terncomps, terncomps2, descriptor='o', **kwargs):
comps=numpy.float64(terncomps)
comps2=numpy.float64(terncomps2)
compsdiff=comps2-comps
rgb_arr=self.rgb_compdiff(compsdiff)
compdist = ((compsdiff**2).sum(axis=1)/2.)**.5
self.colorcompplot(comps, descriptor=descriptor, colors=rgb_arr, hollow=False, markeredgecolor='none', **kwargs)
# color wheel axes
self.ax.figure.subplots_adjust(left=.05, right=.7)
self.cwax=self.ax.figure.add_axes([0.6, 0.45, 0.3, 0.45], projection='polar')
N = 1024
x = numpy.linspace(-1, 1, N)
y = numpy.linspace(-1, 1, N)
X,Y = numpy.meshgrid(x,y)
R = numpy.sqrt(X*X + Y*Y)
PHI = numpy.arctan2(Y, X) - numpy.pi/2
colorgrid=self.complex_to_rgb_grid(R*numpy.exp(-1j*PHI) * (R<1), invert=True)
self.cwax.imshow(colorgrid, extent=[0,2*numpy.pi, 0,1024])
self.cwax.set_rgrids([1,N/3,2*N/3], angle=45)
self.cwax.set_xticks([numpy.pi/2, 7*numpy.pi/6, 11*numpy.pi/6])
self.cwax.set_yticks([N/3, 2*N/3, N])
self.cwax.set_xticklabels(['%s' % ('G'),\
'%s' % ('R'),\
'%s' % ('B')])
self.cwax.set_yticklabels([\
'%.3f' % (max(compdist)/3.),\
'%.3f' % (2.*max(compdist)/3.),\
'%.3f' % (max(compdist))])
| bsd-3-clause |
astroML/astroML | examples/learning/plot_neighbors_photoz.py | 2 | 2066 | """
K-Neighbors for Photometric Redshifts
-------------------------------------
Estimate redshifts from the colors of sdss galaxies and quasars.
This uses colors from a sample of 50,000 objects with SDSS photometry
and ugriz magnitudes. The example shows how far one can get with an
extremely simple machine learning approach to the photometric redshift
problem.
The function :func:`fetch_sdss_galaxy_colors` used below actually queries
the SDSS CASjobs server for the colors of the 50,000 galaxies.
"""
# Author: Jake VanderPlas <[email protected]>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
import numpy as np
from matplotlib import pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
from astroML.datasets import fetch_sdss_galaxy_colors
from astroML.plotting import scatter_contour
n_neighbors = 1
data = fetch_sdss_galaxy_colors()
N = len(data)
# shuffle data
np.random.seed(0)
np.random.shuffle(data)
# put colors in a matrix
X = np.zeros((N, 4))
X[:, 0] = data['u'] - data['g']
X[:, 1] = data['g'] - data['r']
X[:, 2] = data['r'] - data['i']
X[:, 3] = data['i'] - data['z']
z = data['redshift']
# divide into training and testing data
Ntrain = N // 2
Xtrain = X[:Ntrain]
ztrain = z[:Ntrain]
Xtest = X[Ntrain:]
ztest = z[Ntrain:]
knn = KNeighborsRegressor(n_neighbors, weights='uniform')
zpred = knn.fit(Xtrain, ztrain).predict(Xtest)
axis_lim = np.array([-0.1, 2.5])
rms = np.sqrt(np.mean((ztest - zpred) ** 2))
print("RMS error = %.2g" % rms)
ax = plt.axes()
plt.scatter(ztest, zpred, c='k', lw=0, s=4)
plt.plot(axis_lim, axis_lim, '--k')
plt.plot(axis_lim, axis_lim + rms, ':k')
plt.plot(axis_lim, axis_lim - rms, ':k')
plt.xlim(axis_lim)
plt.ylim(axis_lim)
plt.text(0.98, 0.02, "RMS error = %.2g" % rms,
ha='right', va='bottom', transform=ax.transAxes,
bbox=dict(ec='w', fc='w'), fontsize=12)
plt.title('Photo-z: Nearest Neigbor Regression')
plt.xlabel(r'$\mathrm{z_{spec}}$', fontsize=14)
plt.ylabel(r'$\mathrm{z_{phot}}$', fontsize=14)
plt.show()
| bsd-2-clause |
andrewmchen/incubator-airflow | airflow/hooks/base_hook.py | 23 | 2895 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import logging
import os
import random
from airflow import settings
from airflow.models import Connection
from airflow.exceptions import AirflowException
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(object):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
def _get_connections_from_db(cls, conn_id):
session = settings.Session()
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
session.expunge_all()
session.close()
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
return db
@classmethod
def _get_connection_from_env(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
return conn
@classmethod
def get_connections(cls, conn_id):
conn = cls._get_connection_from_env(conn_id)
if conn:
conns = [conn]
else:
conns = cls._get_connections_from_db(conn_id)
return conns
@classmethod
def get_connection(cls, conn_id):
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
logging.info("Using connection to: " + conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
| apache-2.0 |
fzalkow/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
ChinaQuants/zipline | zipline/assets/futures.py | 1 | 5861 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pandas import Timestamp, Timedelta
from pandas.tseries.tools import normalize_date
class FutureChain(object):
""" Allows users to look up future contracts.
Parameters
----------
asset_finder : AssetFinder
An AssetFinder for future contract lookups, in particular the
AssetFinder of the TradingAlgorithm instance.
get_datetime : function
A function that returns the simulation datetime, in particular
the get_datetime method of the TradingAlgorithm instance.
root_symbol : str
The root symbol of a future chain.
as_of_date : pandas.Timestamp, optional
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this date is
the primary contract, etc. If not provided, the current
simulation date is used as the as_of_date.
Attributes
----------
root_symbol : str
The root symbol of the future chain.
as_of_date
The current as-of date of this future chain.
Methods
-------
as_of(dt)
offset(time_delta)
Raises
------
RootSymbolNotFound
Raised when the FutureChain is initialized with a root symbol for which
a future chain could not be found.
"""
def __init__(self, asset_finder, get_datetime, root_symbol,
as_of_date=None):
self.root_symbol = root_symbol
# Reference to the algo's AssetFinder for contract lookups
self._asset_finder = asset_finder
# Reference to the algo's get_datetime to know the current dt
self._algorithm_get_datetime = get_datetime
# If an as_of_date is provided, self._as_of_date uses that
# value, otherwise None. This attribute backs the as_of_date property.
if as_of_date:
self._as_of_date = normalize_date(as_of_date)
else:
self._as_of_date = None
# Attribute to cache the most up-to-date chain, and the dt when it was
# last updated.
self._current_chain = []
self._last_updated = None
# Get the initial chain, since self._last_updated is None.
self._maybe_update_current_chain()
def __repr__(self):
# NOTE: The string returned cannot be used to instantiate this
# exact FutureChain, since we don't want to display the asset
# finder and get_datetime function to the user.
if self._as_of_date:
return "FutureChain(root_symbol='%s', as_of_date='%s')" % (
self.root_symbol, self.as_of_date)
else:
return "FutureChain(root_symbol='%s')" % self.root_symbol
def _get_datetime(self):
"""
Returns the normalized simulation datetime.
Returns
-------
pandas.Timestamp
The normalized datetime of FutureChain's TradingAlgorithm.
"""
return normalize_date(
Timestamp(self._algorithm_get_datetime(), tz='UTC')
)
@property
def as_of_date(self):
"""
The current as-of date of this future chain.
Returns
-------
pandas.Timestamp
The user-provided as_of_date if given, otherwise the
current datetime of the simulation.
"""
if self._as_of_date is not None:
return self._as_of_date
else:
return self._get_datetime()
def _maybe_update_current_chain(self):
""" Updates the current chain if it's out of date, then returns
it.
Returns
-------
list
The up-to-date current chain, a list of Future objects.
"""
if (self._last_updated is None)\
or (self._last_updated != self.as_of_date):
self._current_chain = self._asset_finder.lookup_future_chain(
self.root_symbol,
self.as_of_date
)
self._last_updated = self.as_of_date
return self._current_chain
def __getitem__(self, key):
return self._maybe_update_current_chain()[key]
def __len__(self):
return len(self._maybe_update_current_chain())
def __iter__(self):
return iter(self._maybe_update_current_chain())
def as_of(self, dt):
""" Get the future chain for this root symbol as of a specific date.
Parameters
----------
dt : datetime.datetime or pandas.Timestamp or str, optional
The as_of_date for the new chain.
Returns
-------
FutureChain
"""
return FutureChain(
asset_finder=self._asset_finder,
get_datetime=self._algorithm_get_datetime,
root_symbol=self.root_symbol,
as_of_date=dt
)
def offset(self, time_delta):
""" Get the future chain for this root symbol with a given
offset from the current as_of_date.
Parameters
----------
time_delta : datetime.timedelta or pandas.Timedelta or str
The offset from the current as_of_date for the new chain.
Returns
-------
FutureChain
"""
return self.as_of(self.as_of_date + Timedelta(time_delta))
| apache-2.0 |
evgchz/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
smcantab/pele | pele/transition_states/tests/test_transition_state_search.py | 5 | 5273 | import unittest
from itertools import izip
import numpy as np
import matplotlib.pyplot as plt
from pele.transition_states import FindTransitionState, findTransitionState
from pele.potentials import BasePotential
from pele.potentials.tests import _base_test
from pele.potentials.tests._base_test import assert_arrays_almost_equal
class SimpleTSPot(BasePotential):
nfev = 0
# def __init__(self):
# self.nfcalls = 0
def getEnergyGradient(self, x):
self.nfev += 1
grad = np.zeros(x.size)
dx = x.copy()
dx[0] += 1
e1 = -np.exp(-np.dot(dx, dx))
grad -= 2 * dx * e1
dx = x.copy()
dx[0] -= 1
e2 = -np.exp(-np.dot(dx, dx))
grad -= 2 * dx * e2
return e1 + e2, grad
def getEnergy(self, x):
return self.getEnergyGradient(x)[0]
class HarmonicPot(BasePotential):
def getEnergy(self, x):
return np.dot(x, x) + x.sum()
def getEnergyGradient(self, x):
e = self.getEnergy(x)
return e, 2 * x + 1
def plot_pot():
x, y = np.meshgrid(np.arange(-2, 2, .1), np.arange(-2, 2, .1))
pot = SimpleTSPot()
energies = [pot.getEnergy(np.array([xi, yi])) for xi, yi in izip(x.reshape(-1), y.reshape(-1))]
energies = np.array(energies).reshape(x.shape)
plt.contourf(x, y, energies)
plt.show()
class TestSimpleTSPot(_base_test._TestConfiguration):
def setUp(self):
self.pot = SimpleTSPot()
self.x0 = np.array([.1, 1])
self.e0 = -0.27335478531821539
class TestHarmonicPot(_base_test._TestConfiguration):
def setUp(self):
self.pot = HarmonicPot()
self.x0 = np.array([.1, 1])
self.e0 = 2.11
def print_event(coords=None, **kwargs):
print "coords", coords
class TestFindTransitionStateSimplePot(unittest.TestCase):
def setUp(self):
self.pot = SimpleTSPot()
self.x0 = np.array([.1, .1])
self.xts = np.zeros(self.x0.size)
self.ets = self.pot.getEnergy(self.xts)
def test1(self):
# plot_pot()
opt = FindTransitionState(self.x0, self.pot, orthogZeroEigs=None,
# iprint=1,
# verbosity=10, event=print_event,
# tol=1e-3,
# lowestEigenvectorQuenchParams=dict(iprint=1, events=[print_event])
)
ret = opt.run()
self.assertTrue(ret.success)
assert_arrays_almost_equal(self, ret.coords, self.xts, places=3)
self.assertAlmostEqual(ret.energy, self.ets, delta=1e-3)
self.assertLess(ret.rms, 1e-3)
self.assertEqual(ret.nfev + 1, self.pot.nfev)
def test_wrapper(self):
ret = findTransitionState(self.x0, self.pot, orthogZeroEigs=None)
self.assertTrue(ret.success)
assert_arrays_almost_equal(self, ret.coords, self.xts, places=3)
def test_2(self):
self.called = False
def event(**kwargs):
self.called = True
opt = FindTransitionState(self.x0, self.pot, orthogZeroEigs=None,
tangentSpaceQuenchParams=dict(maxstep=1.),
event=event)
ret = opt.run()
self.assertTrue(ret.success)
self.assertTrue(self.called)
def test_from_near_minimum(self):
print "\n\nstarting from a minimum"
x0 = np.array([.6, .1])
opt = FindTransitionState(x0, self.pot, orthogZeroEigs=None,
iprint=1,
verbosity=10, # event=print_event,
# tol=1e-3,
# lowestEigenvectorQuenchParams=dict(iprint=1, events=[print_event])
)
ret = opt.run()
print ret
self.assertTrue(ret.success)
assert_arrays_almost_equal(self, ret.coords, self.xts, places=3)
def test_from_near_minimum_demand_negative_eigenvalue(self):
print "\n\nstarting from a minimum demand"
# demand that the eigenvalue is negative initially.
# this should fail right away
x0 = np.array([.6, .1])
opt = FindTransitionState(x0, self.pot, orthogZeroEigs=None,
demand_initial_negative_vec=True,
iprint=1,
verbosity=10, # event=print_event,
# tol=1e-3,
# lowestEigenvectorQuenchParams=dict(iprint=1, events=[print_event])
)
ret = opt.run()
print ret
self.assertFalse(ret.success)
self.assertEqual(ret.nsteps, 0)
class TestFindTS_BadPotential(unittest.TestCase):
def test1(self):
print "\n\ntesting find ts with harmonic potential"
pot = HarmonicPot()
x0 = np.array([.2, 0])
opt = FindTransitionState(x0, pot, orthogZeroEigs=None,
iprint=1,
verbosity=10, # event=print_event,
hessian_diagonalization=True
)
ret = opt.run()
self.assertFalse(ret.success)
print ret
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.