repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
geomf/omf-fork | omf/models/cvrStatic.py | 1 | 26781 | # Portions Copyrights (C) 2015 Intel Corporation
''' Calculate CVR impacts using a targetted set of static loadflows. '''
import sys
import os
import math
import datetime
import json
import multiprocessing
from copy import copy
from jinja2 import Template
from matplotlib import pyplot as plt
from os.path import join as pJoin
import __metaModel__
from __metaModel__ import renderAndShow, getStatus as getStatusMeta, cancel
import logging
# OMF imports
import omf.feeder
from omf.solvers import gridlabd
from omf.common.plot import Plot
logger = logging.getLogger(__name__)
sys.path.append(__metaModel__._omfDir)
template = None
def renderTemplate(template, fs, modelDir="MyModel", absolutePaths=False, datastoreNames={}):
# Our HTML template for the interface:
with fs.open("models/cvrStatic.html") as tempFile:
template = Template(tempFile.read())
return __metaModel__.renderTemplate(template, fs, modelDir, absolutePaths, datastoreNames)
def _roundOne(x, direc):
''' Round x in direc (up/down) to 1 sig fig. '''
thou = 10.0**math.floor(math.log10(x))
decForm = x / thou
if direc == 'up':
return math.ceil(decForm) * thou
elif direc == 'down':
return math.floor(decForm) * thou
else:
raise Exception
def getStatus(modelDir, fs):
return getStatusMeta(modelDir, fs)
def run(modelDir, inputDict, fs):
''' Run the model in a separate process. web.py calls this to run the model.
This function will return fast, but results take a while to hit the file system.'''
logger.info("Running cvrStatic model... modelDir: %s; inputDict: %s", modelDir, inputDict)
if not os.path.isdir(modelDir):
os.makedirs(modelDir)
inputDict["created"] = str(datetime.datetime.now())
# MAYBEFIX: remove this data dump. Check showModel in web.py and
# renderTemplate()
fs.save(pJoin(modelDir, "allInputData.json"), json.dumps(inputDict, indent=4))
feederDir, feederName = inputDict["feederName"].split("___")
fs.export_from_fs_to_local(pJoin("data", "Feeder", feederDir, feederName + ".json"),
pJoin(modelDir, "feeder.json"))
# If we are re-running, remove output:
try:
fs.remove(pJoin(modelDir, "allOutputData.json"))
except:
pass
# Start the computation.
backProc = multiprocessing.Process(
target=runForeground, args=(modelDir, inputDict, fs))
backProc.start()
print "SENT TO BACKGROUND", modelDir
with open(pJoin(modelDir, "PPID.txt"), "w") as pPidFile:
pPidFile.write(str(backProc.pid))
def runForeground(modelDir, inputDict, fs):
''' Run the model in the foreground. WARNING: can take about a minute. '''
# Global vars, and load data from the model directory.
print "STARTING TO RUN", modelDir
try:
startTime = datetime.datetime.now()
feederJson = json.load(open(pJoin(modelDir, "feeder.json")))
tree = feederJson.get("tree", {})
attachments = feederJson.get("attachments", {})
allOutput = {}
''' Run CVR analysis. '''
# Reformate monthData and rates.
rates = {k: float(inputDict[k]) for k in ["capitalCost", "omCost", "wholesaleEnergyCostPerKwh",
"retailEnergyCostPerKwh", "peakDemandCostSpringPerKw", "peakDemandCostSummerPerKw",
"peakDemandCostFallPerKw", "peakDemandCostWinterPerKw"]}
# print "RATES", rates
monthNames = ["January", "February", "March", "April", "May", "June", "July", "August",
"September", "October", "November", "December"]
monthToSeason = {'January': 'Winter', 'February': 'Winter', 'March': 'Spring', 'April': 'Spring',
'May': 'Spring', 'June': 'Summer', 'July': 'Summer', 'August': 'Summer',
'September': 'Fall', 'October': 'Fall', 'November': 'Fall', 'December': 'Winter'}
monthData = []
for i, x in enumerate(monthNames):
monShort = x[0:3].lower()
season = monthToSeason[x]
histAvg = float(inputDict.get(monShort + "Avg", 0))
histPeak = float(inputDict.get(monShort + "Peak", 0))
monthData.append({"monthId": i, "monthName": x, "histAverage": histAvg,
"histPeak": histPeak, "season": season})
# for row in monthData:
# print row
# Graph the SCADA data.
fig = plt.figure(figsize=(10, 6))
indices = [r['monthName'] for r in monthData]
d1 = [r['histPeak'] / (10**3) for r in monthData]
d2 = [r['histAverage'] / (10**3) for r in monthData]
ticks = range(len(d1))
bar_peak = plt.bar(ticks, d1, color='gray')
bar_avg = plt.bar(ticks, d2, color='dimgray')
plt.legend([bar_peak[0], bar_avg[0]], ['histPeak', 'histAverage'], bbox_to_anchor=(0., 1.015, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.1)
plt.xticks([t + 0.5 for t in ticks], indices)
plt.ylabel('Mean and peak historical power consumptions (kW)')
fig.autofmt_xdate()
Plot.save_fig(plt, pJoin(modelDir, "scadaChart.png"))
allOutput["histPeak"] = d1
allOutput["histAverage"] = d2
allOutput["monthName"] = [name[0:3] for name in monthNames]
# Graph feeder.
fig = plt.figure(figsize=(10, 10))
myGraph = omf.feeder.treeToNxGraph(tree)
omf.feeder.latLonNxGraph(myGraph, neatoLayout=False)
Plot.save_fig(plt, pJoin(modelDir, "feederChart.png"))
with open(pJoin(modelDir, "feederChart.png"), "rb") as inFile:
allOutput["feederChart"] = inFile.read().encode("base64")
# Get the load levels we need to test.
allLoadLevels = [x.get(
'histPeak', 0) for x in monthData] + [y.get('histAverage', 0) for y in monthData]
maxLev = _roundOne(max(allLoadLevels), 'up')
minLev = _roundOne(min(allLoadLevels), 'down')
tenLoadLevels = range(
int(minLev), int(maxLev), int((maxLev - minLev) / 10))
# Gather variables from the feeder.
for key in tree.keys():
# Set clock to single timestep.
if tree[key].get('clock', '') == 'clock':
tree[key] = {"timezone": "PST+8PDT",
"stoptime": "'2013-01-01 00:00:00'",
"starttime": "'2013-01-01 00:00:00'",
"clock": "clock"}
# Save swing node index.
if tree[key].get('bustype', '').lower() == 'swing':
swingIndex = key
swingName = tree[key].get('name')
# Remove all includes.
if tree[key].get('omftype', '') == '#include':
del key
# Find the substation regulator and config.
for key in tree:
if tree[key].get('object', '') == 'regulator' and tree[key].get('from', '') == swingName:
regIndex = key
regConfName = tree[key]['configuration']
for key in tree:
if tree[key].get('name', '') == regConfName:
regConfIndex = key
# Set substation regulator to manual operation.
# GLOBAL VARIABLE FOR DEFAULT TAP POSITION
baselineTap = int(inputDict.get("baselineTap"))
tree[regConfIndex] = {
'name': tree[regConfIndex]['name'],
'object': 'regulator_configuration',
'connect_type': '1',
'raise_taps': '10',
'lower_taps': '10',
'CT_phase': 'ABC',
'PT_phase': 'ABC',
# Yo, 0.10 means at tap_pos 10 we're 10% above 120V.
'regulation': '0.10',
'Control': 'MANUAL',
'control_level': 'INDIVIDUAL',
'Type': 'A',
'tap_pos_A': str(baselineTap),
'tap_pos_B': str(baselineTap),
'tap_pos_C': str(baselineTap)}
# Attach recorders relevant to CVR.
recorders = [
{'object': 'collector',
'file': 'ZlossesTransformer.csv',
'group': 'class=transformer',
'limit': '0',
'property': 'sum(power_losses_A.real),sum(power_losses_A.imag),sum(power_losses_B.real),sum(power_losses_B.imag),sum(power_losses_C.real),sum(power_losses_C.imag)'},
{'object': 'collector',
'file': 'ZlossesUnderground.csv',
'group': 'class=underground_line',
'limit': '0',
'property': 'sum(power_losses_A.real),sum(power_losses_A.imag),sum(power_losses_B.real),sum(power_losses_B.imag),sum(power_losses_C.real),sum(power_losses_C.imag)'},
{'object': 'collector',
'file': 'ZlossesOverhead.csv',
'group': 'class=overhead_line',
'limit': '0',
'property': 'sum(power_losses_A.real),sum(power_losses_A.imag),sum(power_losses_B.real),sum(power_losses_B.imag),sum(power_losses_C.real),sum(power_losses_C.imag)'},
{'object': 'recorder',
'file': 'Zregulator.csv',
'limit': '0',
'parent': tree[regIndex]['name'],
'property': 'tap_A,tap_B,tap_C,power_in.real,power_in.imag'},
{'object': 'collector',
'file': 'ZvoltageJiggle.csv',
'group': 'class=triplex_meter',
'limit': '0',
'property': 'min(voltage_12.mag),mean(voltage_12.mag),max(voltage_12.mag),std(voltage_12.mag)'},
{'object': 'recorder',
'file': 'ZsubstationTop.csv',
'limit': '0',
'parent': tree[swingIndex]['name'],
'property': 'voltage_A,voltage_B,voltage_C'},
{'object': 'recorder',
'file': 'ZsubstationBottom.csv',
'limit': '0',
'parent': tree[regIndex]['to'],
'property': 'voltage_A,voltage_B,voltage_C'}]
biggest = 1 + max([int(k) for k in tree.keys()])
for index, rec in enumerate(recorders):
tree[biggest + index] = rec
# Change constant PF loads to ZIP loads. (See evernote for rationale
# about 50/50 power/impedance mix.)
blankZipModel = {'object': 'triplex_load',
'name': 'NAMEVARIABLE',
'base_power_12': 'POWERVARIABLE',
'power_fraction_12': str(inputDict.get("p_percent")),
'impedance_fraction_12': str(inputDict.get("z_percent")),
'current_fraction_12': str(inputDict.get("i_percent")),
# MAYBEFIX: we can probably get this PF data from the
# Milsoft loads.
'power_pf_12': str(inputDict.get("power_factor")),
'impedance_pf_12': str(inputDict.get("power_factor")),
'current_pf_12': str(inputDict.get("power_factor")),
'nominal_voltage': '120',
'phases': 'PHASESVARIABLE',
'parent': 'PARENTVARIABLE'}
def powerClean(powerStr):
''' take 3339.39+1052.29j to 3339.39 '''
return powerStr[0:powerStr.find('+')]
for key in tree:
if tree[key].get('object', '') == 'triplex_node':
# Get existing variables.
name = tree[key].get('name', '')
power = tree[key].get('power_12', '')
parent = tree[key].get('parent', '')
phases = tree[key].get('phases', '')
# Replace object and reintroduce variables.
tree[key] = copy(blankZipModel)
tree[key]['name'] = name
tree[key]['base_power_12'] = powerClean(power)
tree[key]['parent'] = parent
tree[key]['phases'] = phases
# Function to determine how low we can tap down in the CVR case:
def loweringPotential(baseLine):
''' Given a baseline end of line voltage, how many more percent can we shave off the substation voltage? '''
''' testsWePass = [122.0,118.0,200.0,110.0] '''
lower = int(math.floor((baseLine / 114.0 - 1) * 100)) - 1
# If lower is negative, we can't return it because we'd be
# undervolting beyond what baseline already was!
if lower < 0:
return baselineTap
else:
return baselineTap - lower
# Run all the powerflows.
powerflows = []
for doingCvr in [False, True]:
# For each load level in the tenLoadLevels, run a powerflow with
# the load objects scaled to the level.
for desiredLoad in tenLoadLevels:
# Find the total load that was defined in Milsoft:
loadList = []
for key in tree:
if tree[key].get('object', '') == 'triplex_load':
loadList.append(tree[key].get('base_power_12', ''))
totalLoad = sum([float(x) for x in loadList])
# Rescale each triplex load:
for key in tree:
if tree[key].get('object', '') == 'triplex_load':
currentPow = float(tree[key]['base_power_12'])
ratio = desiredLoad / totalLoad
tree[key]['base_power_12'] = str(currentPow * ratio)
# If we're doing CVR then lower the voltage.
if doingCvr:
# Find the minimum voltage we can tap down to:
newTapPos = baselineTap
for row in powerflows:
if row.get('loadLevel', '') == desiredLoad:
newTapPos = loweringPotential(
row.get('lowVoltage', 114))
# Tap it down to there.
# MAYBEFIX: do each phase separately because that's how
# it's done in the field... Oof.
tree[regConfIndex]['tap_pos_A'] = str(newTapPos)
tree[regConfIndex]['tap_pos_B'] = str(newTapPos)
tree[regConfIndex]['tap_pos_C'] = str(newTapPos)
# Run the model through gridlab and put outputs in the table.
output = gridlabd.runInFilesystem(tree, attachments=attachments,
keepFiles=True, workDir=modelDir)
os.remove(pJoin(modelDir, "PID.txt"))
p = output['Zregulator.csv']['power_in.real'][0]
q = output['Zregulator.csv']['power_in.imag'][0]
s = math.sqrt(p**2 + q**2)
lossTotal = 0.0
for device in ['ZlossesOverhead.csv', 'ZlossesTransformer.csv', 'ZlossesUnderground.csv']:
for letter in ['A', 'B', 'C']:
r = output[device][
'sum(power_losses_' + letter + '.real)'][0]
i = output[device][
'sum(power_losses_' + letter + '.imag)'][0]
lossTotal += math.sqrt(r**2 + i**2)
# Entire output:
powerflows.append({
'doingCvr': doingCvr,
'loadLevel': desiredLoad,
'realPower': p,
'powerFactor': p / s,
'losses': lossTotal,
'subVoltage': (
output['ZsubstationBottom.csv']['voltage_A'][0] +
output['ZsubstationBottom.csv']['voltage_B'][0] +
output['ZsubstationBottom.csv']['voltage_C'][0]) / 3 / 60,
'lowVoltage': output['ZvoltageJiggle.csv']['min(voltage_12.mag)'][0] / 2,
'highVoltage': output['ZvoltageJiggle.csv']['max(voltage_12.mag)'][0] / 2})
# For a given load level, find two points to interpolate on.
def getInterpPoints(t):
''' Find the two points we can interpolate from. '''
''' tests pass on [tenLoadLevels[0],tenLoadLevels[5]+499,tenLoadLevels[-1]-988] '''
loc = sorted(tenLoadLevels + [t]).index(t)
if loc == 0:
return (tenLoadLevels[0], tenLoadLevels[1])
elif loc > len(tenLoadLevels) - 2:
return (tenLoadLevels[-2], tenLoadLevels[-1])
else:
return (tenLoadLevels[loc - 1], tenLoadLevels[loc + 1])
# Calculate peak reduction.
for row in monthData:
peak = row['histPeak']
peakPoints = getInterpPoints(peak)
peakTopBase = [x for x in powerflows if x.get(
'loadLevel', '') == peakPoints[-1] and x.get('doingCvr', '') == False][0]
peakTopCvr = [x for x in powerflows if x.get(
'loadLevel', '') == peakPoints[-1] and x.get('doingCvr', '') == True][0]
peakBottomBase = [x for x in powerflows if x.get(
'loadLevel', '') == peakPoints[0] and x.get('doingCvr', '') == False][0]
peakBottomCvr = [x for x in powerflows if x.get(
'loadLevel', '') == peakPoints[0] and x.get('doingCvr', '') == True][0]
# Linear interpolation so we aren't running umpteen million
# loadflows.
x = (peakPoints[0], peakPoints[1])
y = (peakTopBase['realPower'] - peakTopCvr['realPower'],
peakBottomBase['realPower'] - peakBottomCvr['realPower'])
peakRed = y[0] + (y[1] - y[0]) * (peak - x[0]) / (x[1] - x[0])
row['peakReduction'] = peakRed
# Calculate energy reduction and loss reduction based on average load.
for row in monthData:
avgEnergy = row['histAverage']
energyPoints = getInterpPoints(avgEnergy)
avgTopBase = [x for x in powerflows if x.get(
'loadLevel', '') == energyPoints[-1] and x.get('doingCvr', '') == False][0]
avgTopCvr = [x for x in powerflows if x.get(
'loadLevel', '') == energyPoints[-1] and x.get('doingCvr', '') == True][0]
avgBottomBase = [x for x in powerflows if x.get(
'loadLevel', '') == energyPoints[0] and x.get('doingCvr', '') == False][0]
avgBottomCvr = [x for x in powerflows if x.get(
'loadLevel', '') == energyPoints[0] and x.get('doingCvr', '') == True][0]
# Linear interpolation so we aren't running umpteen million
# loadflows.
x = (energyPoints[0], energyPoints[1])
y = (avgTopBase['realPower'] - avgTopCvr['realPower'],
avgBottomBase['realPower'] - avgBottomCvr['realPower'])
energyRed = y[0] + \
(y[1] - y[0]) * (avgEnergy - x[0]) / (x[1] - x[0])
row['energyReduction'] = energyRed
lossY = (avgTopBase['losses'] - avgTopCvr['losses'],
avgBottomBase['losses'] - avgBottomCvr['losses'])
lossRed = lossY[0] + (lossY[1] - lossY[0]) * \
(avgEnergy - x[0]) / (x[1] - x[0])
row['lossReduction'] = lossRed
# Multiply by dollars.
for row in monthData:
row['energyReductionDollars'] = row['energyReduction'] / 1000 * \
(rates['wholesaleEnergyCostPerKwh'] -
rates['retailEnergyCostPerKwh'])
row['peakReductionDollars'] = row['peakReduction'] / \
1000 * rates['peakDemandCost' + row['season'] + 'PerKw']
row['lossReductionDollars'] = row['lossReduction'] / \
1000 * rates['wholesaleEnergyCostPerKwh']
# Pretty output
def plotTable(inData):
fig = plt.figure(figsize=(10, 5))
plt.axis('off')
plt.tight_layout()
plt.table(cellText=[row for row in inData[1:]],
loc='center',
rowLabels=range(len(inData) - 1),
colLabels=inData[0])
def dictalToMatrix(dictList):
''' Take our dictal format to a matrix. '''
matrix = [dictList[0].keys()]
for row in dictList:
matrix.append(row.values())
return matrix
# Powerflow results.
plotTable(dictalToMatrix(powerflows))
Plot.save_fig(plt, pJoin(modelDir, "powerflowTable.png"))
# Monetary results.
# To print partial money table
monthDataMat = dictalToMatrix(monthData)
dimX = len(monthDataMat)
dimY = len(monthDataMat[0])
monthDataPart = []
for k in range(0, dimX):
monthDatatemp = []
for m in range(4, dimY):
monthDatatemp.append(monthDataMat[k][m])
monthDataPart.append(monthDatatemp)
plotTable(monthDataPart)
Plot.save_fig(plt, pJoin(modelDir, "moneyTable.png"))
allOutput["monthDataMat"] = dictalToMatrix(monthData)
allOutput["monthDataPart"] = monthDataPart
# Graph the money data.
fig = plt.figure(figsize=(10, 8))
indices = [r['monthName'] for r in monthData]
d1 = [r['energyReductionDollars'] for r in monthData]
d2 = [r['lossReductionDollars'] for r in monthData]
d3 = [r['peakReductionDollars'] for r in monthData]
ticks = range(len(d1))
bar_erd = plt.bar(ticks, d1, color='red')
bar_lrd = plt.bar(ticks, d2, color='green')
bar_prd = plt.bar(ticks, d3, color='blue', yerr=d2)
plt.legend([bar_prd[0], bar_lrd[0], bar_erd[0]], ['peakReductionDollars', 'lossReductionDollars', 'energyReductionDollars'], bbox_to_anchor=(0., 1.015, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.1)
plt.xticks([t + 0.5 for t in ticks], indices)
plt.ylabel('Utility Savings ($)')
plt.tight_layout(5.5, 1.3, 1.2)
fig.autofmt_xdate()
Plot.save_fig(plt, pJoin(modelDir, "spendChart.png"))
allOutput["energyReductionDollars"] = d1
allOutput["lossReductionDollars"] = d2
allOutput["peakReductionDollars"] = d3
# Graph the cumulative savings.
fig = plt.figure(figsize=(10, 5))
annualSavings = sum(d1) + sum(d2) + sum(d3)
annualSave = lambda x: (
annualSavings - rates['omCost']) * x - rates['capitalCost']
simplePayback = rates['capitalCost'] / \
(annualSavings - rates['omCost'])
plt.xlabel('Year After Installation')
plt.xlim(0, 30)
plt.ylabel('Cumulative Savings ($)')
plt.plot([0 for x in range(31)], c='gray')
plt.axvline(x=simplePayback, ymin=0, ymax=1, c='gray', linestyle='--')
plt.plot([annualSave(x) for x in range(31)], c='green')
Plot.save_fig(plt, pJoin(modelDir, "savingsChart.png"))
allOutput["annualSave"] = [annualSave(x) for x in range(31)]
# Update the runTime in the input file.
endTime = datetime.datetime.now()
inputDict["runTime"] = str(
datetime.timedelta(seconds=int((endTime - startTime).total_seconds())))
fs.save(pJoin(modelDir, "allInputData.json"), json.dumps(inputDict, indent=4))
# Write output file.
fs.save(pJoin(modelDir, "allOutputData.json"), json.dumps(allOutput, indent=4))
# For autotest, there won't be such file.
try:
os.remove(pJoin(modelDir, "PPID.txt"))
except:
pass
print "DONE RUNNING", modelDir
except Exception as e:
print "Oops, Model Crashed!!!"
print e
cancel(modelDir)
def _tests():
# Variables
import shutil
from .. import filesystem
fs = filesystem.Filesystem().fs
workDir = pJoin(__metaModel__._omfDir, "data", "Model")
friendshipTree = json.load(fs.open(pJoin(
__metaModel__._omfDir, "data", "Feeder", "public", "ABEC Frank LO.json")))["tree"]
colomaTree = json.load(fs.open(pJoin(
__metaModel__._omfDir, "data", "Feeder", "public", "ABEC Columbia.json")))["tree"]
colomaMonths = {"janAvg": 914000.0, "janPeak": 1290000.0,
"febAvg": 897000.00, "febPeak": 1110000.0,
"marAvg": 731000.00, "marPeak": 1030000.0,
"aprAvg": 864000.00, "aprPeak": 2170000.0,
"mayAvg": 1620000.0, "mayPeak": 4580000.0,
"junAvg": 2210000.0, "junPeak": 5550000.0,
"julAvg": 3570000.0, "julPeak": 6260000.0,
"augAvg": 3380000.0, "augPeak": 5610000.0,
"sepAvg": 1370000.0, "sepPeak": 3740000.0,
"octAvg": 1030000.0, "octPeak": 1940000.0,
"novAvg": 1020000.0, "novPeak": 1340000.0,
"decAvg": 1030000.0, "decPeak": 1280000.0}
# friendshipMonths = {"janAvg": 2740000.0, "janPeak": 4240000.0,
# "febAvg": 2480000.0, "febPeak": 3310000.0,
# "marAvg": 2030000.0, "marPeak": 2960000.0,
# "aprAvg": 2110000.0, "aprPeak": 3030000.0,
# "mayAvg": 2340000.0, "mayPeak": 4080000.0,
# "junAvg": 2770000.0, "junPeak": 5810000.0,
# "julAvg": 3970000.0, "julPeak": 6750000.0,
# "augAvg": 3270000.0, "augPeak": 5200000.0,
# "sepAvg": 2130000.0, "sepPeak": 4900000.0,
# "octAvg": 1750000.0, "octPeak": 2340000.0,
# "novAvg": 2210000.0, "novPeak": 3550000.0,
# "decAvg": 2480000.0, "decPeak": 3370000.0}
inData = {"modelType": "cvrStatic",
"feederName": "public___ABEC Columbia",
"runTime": "",
"capitalCost": 30000,
"omCost": 1000,
"wholesaleEnergyCostPerKwh": 0.06,
"retailEnergyCostPerKwh": 0.10,
"peakDemandCostSpringPerKw": 5.0,
"peakDemandCostSummerPerKw": 10.0,
"peakDemandCostFallPerKw": 6.0,
"peakDemandCostWinterPerKw": 8.0,
"baselineTap": 3.0,
"z_percent": 0.5,
"i_percent": 0.0,
"p_percent": 0.5,
"power_factor": 0.9}
for key in colomaMonths:
inData[key] = colomaMonths[key]
modelLoc = pJoin(workDir, "admin", "Automated staticCVR Testing")
# Blow away old test results if necessary.
try:
shutil.rmtree(modelLoc)
except:
pass
# No-input template.
renderAndShow(template, fs)
# Run the model.
run(modelLoc, inData, fs)
# # Show the output.
renderAndShow(template, fs, modelDir=modelLoc)
# # # Delete the model.
# # time.sleep(2)
# # shutil.rmtree(modelLoc)
if __name__ == '__main__':
_tests()
| gpl-2.0 |
cybernet14/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
JaviMerino/trappy | tests/test_baretrace.py | 2 | 3406 | # Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import trappy
import unittest
class TestBareTrace(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestBareTrace, self).__init__(*args, **kwargs)
dfr0 = pd.DataFrame({"l1_misses": [24, 535, 41],
"l2_misses": [155, 11, 200],
"cpu": [ 0, 1, 0]},
index=pd.Series([1.020, 1.342, 1.451], name="Time"))
dfr1 = pd.DataFrame({"load": [ 35, 16, 21, 28],
"util": [279, 831, 554, 843]},
index=pd.Series([1.279, 1.718, 2.243, 2.465], name="Time"))
self.dfr = [dfr0, dfr1]
def test_bare_trace_accepts_name(self):
"""The BareTrace() accepts a name parameter"""
trace = trappy.BareTrace(name="foo")
self.assertEquals(trace.name, "foo")
def test_bare_trace_can_add_parsed_event(self):
"""The BareTrace() class can add parsed events to its collection of trace events"""
trace = trappy.BareTrace()
trace.add_parsed_event("pmu_counters", self.dfr[0])
self.assertEquals(len(trace.pmu_counters.data_frame), 3)
self.assertEquals(trace.pmu_counters.data_frame["l1_misses"].iloc[0], 24)
trace.add_parsed_event("pivoted_counters", self.dfr[0], pivot="cpu")
self.assertEquals(trace.pivoted_counters.pivot, "cpu")
def test_bare_trace_get_duration(self):
"""BareTrace.get_duration() works for a simple case"""
trace = trappy.BareTrace()
trace.add_parsed_event("pmu_counter", self.dfr[0])
trace.add_parsed_event("load_event", self.dfr[1])
self.assertEquals(trace.get_duration(), self.dfr[1].index[-1])
def test_bare_trace_get_duration_normalized(self):
"""BareTrace.get_duration() works if the trace has been normalized"""
trace = trappy.BareTrace()
trace.add_parsed_event("pmu_counter", self.dfr[0].copy())
trace.add_parsed_event("load_event", self.dfr[1].copy())
basetime = self.dfr[0].index[0]
trace.normalize_time(basetime)
expected_duration = self.dfr[1].index[-1] - basetime
self.assertEquals(trace.get_duration(), expected_duration)
def test_bare_trace_normalize_time_accepts_basetime(self):
"""BareTrace().normalize_time() accepts an arbitrary basetime"""
trace = trappy.BareTrace()
trace.add_parsed_event("pmu_counter", self.dfr[0].copy())
prev_first_time = trace.pmu_counter.data_frame.index[0]
basetime = 3
trace.normalize_time(basetime)
self.assertEquals(trace.basetime, basetime)
exp_first_time = prev_first_time - basetime
self.assertEquals(round(trace.pmu_counter.data_frame.index[0] - exp_first_time, 7), 0)
| apache-2.0 |
Funtimezzhou/TradeBuildTools | Document/szse/Quantitative Trading/sat-ebook-and-full-source-20150618/algo-ebook-full-source-code-20150618/chapter12/sharpe.py | 2 | 2780 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# sharpe.py
from __future__ import print_function
import datetime
import numpy as np
import pandas as pd
import pandas.io.data as web
def annualised_sharpe(returns, N=252):
"""
Calculate the annualised Sharpe ratio of a returns stream
based on a number of trading periods, N. N defaults to 252,
which then assumes a stream of daily returns.
The function assumes that the returns are the excess of
those compared to a benchmark.
"""
return np.sqrt(N) * returns.mean() / returns.std()
def equity_sharpe(ticker):
"""
Calculates the annualised Sharpe ratio based on the daily
returns of an equity ticker symbol listed in Google Finance.
The dates have been hardcoded here for brevity.
"""
start = datetime.datetime(2000,1,1)
end = datetime.datetime(2013,1,1)
# Obtain the equities daily historic data for the desired time period
# and add to a pandas DataFrame
pdf = web.DataReader(ticker, 'google', start, end)
# Use the percentage change method to easily calculate daily returns
pdf['daily_ret'] = pdf['Close'].pct_change()
# Assume an average annual risk-free rate over the period of 5%
pdf['excess_daily_ret'] = pdf['daily_ret'] - 0.05/252
# Return the annualised Sharpe ratio based on the excess daily returns
return annualised_sharpe(pdf['excess_daily_ret'])
def market_neutral_sharpe(ticker, benchmark):
"""
Calculates the annualised Sharpe ratio of a market
neutral long/short strategy inolving the long of 'ticker'
with a corresponding short of the 'benchmark'.
"""
start = datetime.datetime(2000, 1, 1)
end = datetime.datetime(2013, 1, 1)
# Get historic data for both a symbol/ticker and a benchmark ticker
# The dates have been hardcoded, but you can modify them as you see fit!
tick = web.DataReader(ticker, 'google', start, end)
bench = web.DataReader(benchmark, 'google', start, end)
# Calculate the percentage returns on each of the time series
tick['daily_ret'] = tick['Close'].pct_change()
bench['daily_ret'] = bench['Close'].pct_change()
# Create a new DataFrame to store the strategy information
# The net returns are (long - short)/2, since there is twice
# the trading capital for this strategy
strat = pd.DataFrame(index=tick.index)
strat['net_ret'] = (tick['daily_ret'] - bench['daily_ret'])/2.0
# Return the annualised Sharpe ratio for this strategy
return annualised_sharpe(strat['net_ret'])
if __name__ == "__main__":
print(
"Google Sharpe Ratio: %s" %
equity_sharpe('GOOG')
)
print(
"Google Market Neutral Sharpe Ratio: %s" %
market_neutral_sharpe('GOOG', 'SPY')
) | gpl-3.0 |
runt18/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_emf.py | 1 | 22723 | """
Enhanced Metafile backend. See http://pyemf.sourceforge.net for the EMF
driver library.
"""
from __future__ import division
try:
import pyemf
except ImportError:
raise ImportError('You must first install pyemf from http://pyemf.sf.net')
import os,sys,math,re
from matplotlib import verbose, __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox
from matplotlib.font_manager import findfont, FontProperties
from matplotlib.ft2font import FT2Font, KERNING_UNFITTED, KERNING_DEFAULT, KERNING_UNSCALED
# Font handling stuff snarfed from backend_ps, but only using TTF fonts
_fontd = {}
# Debug print stuff
debugHandle = False
debugPrint = False
debugText = False
# Hashable font properties class. In EMF, angle of rotation is a part
# of the font properties, so a handle to a new font must be obtained
# if the rotation changes.
class EMFFontProperties(FontProperties):
def __init__(self,other,angle):
FontProperties.__init__(self,other.get_family(),
other.get_style(),
other.get_variant(),
other.get_weight(),
other.get_stretch(),
other.get_size())
self.__angle=angle
def __hash__(self):
return hash( (FontProperties.__hash__(self), self.__angle))
def __str__(self):
return str( (FontProperties.__str__(self), self.__angle))
def set_angle(self,angle):
self.__angle=angle
# Hashable pen (line style) properties.
class EMFPen:
def __init__(self,emf,gc):
self.emf=emf
self.gc=gc
r,g,b=gc.get_rgb()
self.r=int(r*255)
self.g=int(g*255)
self.b=int(b*255)
self.width=int(gc.get_linewidth())
self.style=0
self.set_linestyle()
if debugHandle: print "EMFPen: style={0:d} width={1:d} rgb=({2:d},{3:d},{4:d})".format(self.style, self.width, self.r, self.g, self.b)
def __hash__(self):
return hash((self.style,self.width,self.r,self.g,self.b))
def set_linestyle(self):
# Hack. Negative width lines will not get drawn.
if self.width<0:
self.style=pyemf.PS_NULL
else:
styles={'solid':pyemf.PS_SOLID, 'dashed':pyemf.PS_DASH,
'dashdot':pyemf.PS_DASHDOT, 'dotted':pyemf.PS_DOT}
#style=styles.get(self.gc.get_linestyle('solid'))
style=self.gc.get_linestyle('solid')
if debugHandle: print "EMFPen: style={0:d}".format(style)
if style in styles:
self.style=styles[style]
else:
self.style=pyemf.PS_SOLID
def get_handle(self):
handle=self.emf.CreatePen(self.style,self.width,(self.r,self.g,self.b))
return handle
# Hashable brush (fill style) properties.
class EMFBrush:
def __init__(self,emf,rgb):
self.emf=emf
r,g,b=rgb
self.r=int(r*255)
self.g=int(g*255)
self.b=int(b*255)
if debugHandle: print "EMFBrush: rgb=({0:d},{1:d},{2:d})".format(self.r, self.g, self.b)
def __hash__(self):
return hash((self.r,self.g,self.b))
def get_handle(self):
handle=self.emf.CreateSolidBrush((self.r,self.g,self.b))
return handle
class RendererEMF(RendererBase):
"""
The renderer handles drawing/rendering operations through a
pyemf.EMF instance.
"""
def __init__(self, outfile, width, height, dpi):
"Initialize the renderer with a gd image instance"
self.outfile = outfile
# a map from get_color args to colors
self._cached = {}
# dict of hashed properties to already created font handles
self._fontHandle = {}
self.lastHandle = {'font':-1, 'pen':-1, 'brush':-1}
self.emf=pyemf.EMF(width,height,dpi,'in')
self.width=int(width*dpi)
self.height=int(height*dpi)
self.dpi = dpi
self.pointstodpi = dpi/72.0
self.hackPointsForMathExponent = 2.0
# set background transparent for text
self.emf.SetBkMode(pyemf.TRANSPARENT)
# set baseline for text to be bottom left corner
self.emf.SetTextAlign( pyemf.TA_BOTTOM|pyemf.TA_LEFT)
if debugPrint: print "RendererEMF: ({0:f},{1:f}) {2!s} dpi={3:f}".format(self.width, self.height, outfile, dpi)
def save(self):
self.emf.save(self.outfile)
def draw_arc(self, gcEdge, rgbFace, x, y, width, height, angle1, angle2, rotation):
"""
Draw an arc using GraphicsContext instance gcEdge, centered at x,y,
with width and height and angles from 0.0 to 360.0
0 degrees is at 3-o'clock
positive angles are anti-clockwise
If the color rgbFace is not None, fill the arc with it.
"""
if debugPrint: print "draw_arc: ({0:f},{1:f}) angles=({2:f},{3:f}) w,h=({4:f},{5:f})".format(x, y, angle1, angle2, width, height)
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
# This algorithm doesn't work very well on small circles
# because of rounding error. This shows up most obviously on
# legends where the circles are small anyway, and it is
# compounded by the fact that it puts several circles right
# next to each other so the differences are obvious.
hw=width/2
hh=height/2
x1=int(x-width/2)
y1=int(y-height/2)
if brush:
self.emf.Pie(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle1*math.pi/180.0)*hh)),int(x+math.cos(angle2*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle2*math.pi/180.0)*hh)))
else:
self.emf.Arc(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle1*math.pi/180.0)*hh)),int(x+math.cos(angle2*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle2*math.pi/180.0)*hh)))
def draw_image(self, x, y, im, bbox):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas. y is
the distance from the origin. That is, if origin is upper, y
is the distance from top. If origin is lower, y is the
distance from bottom
bbox is a matplotlib.transforms.BBox instance for clipping, or
None
"""
# pyemf2 currently doesn't support bitmaps.
pass
def draw_line(self, gc, x1, y1, x2, y2):
"""
Draw a single line from x1,y1 to x2,y2
"""
if debugPrint: print "draw_line: ({0:f},{1:f}) - ({2:f},{3:f})".format(x1, y1, x2, y2)
if self.select_pen(gc):
self.emf.Polyline([(long(x1),long(self.height-y1)),(long(x2),long(self.height-y2))])
else:
if debugPrint: print "draw_line: optimizing away ({0:f},{1:f}) - ({2:f},{3:f})".format(x1, y1, x2, y2)
def draw_lines(self, gc, x, y):
"""
x and y are equal length arrays, draw lines connecting each
point in x, y
"""
if debugPrint: print "draw_lines: {0:d} points".format(len(str(x)))
# optimize away anything that won't actually be drawn. Edge
# style must not be PS_NULL for it to appear on screen.
if self.select_pen(gc):
points = [(long(x[i]), long(self.height-y[i])) for i in range(len(x))]
self.emf.Polyline(points)
def draw_point(self, gc, x, y):
"""
Draw a single point at x,y
Where 'point' is a device-unit point (or pixel), not a matplotlib point
"""
if debugPrint: print "draw_point: ({0:f},{1:f})".format(x, y)
# don't cache this pen
pen=EMFPen(self.emf,gc)
self.emf.SetPixel(long(x),long(self.height-y),(pen.r,pen.g,pen.b))
def draw_polygon(self, gcEdge, rgbFace, points):
"""
Draw a polygon using the GraphicsContext instance gc.
points is a len vertices tuple, each element
giving the x,y coords a vertex
If the color rgbFace is not None, fill the polygon with it
"""
if debugPrint: print "draw_polygon: {0:d} points".format(len(points))
# optimize away anything that won't actually draw. Either a
# face color or edge style must be defined
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
if pen or brush:
points = [(long(x), long(self.height-y)) for x,y in points]
self.emf.Polygon(points)
else:
points = [(long(x), long(self.height-y)) for x,y in points]
if debugPrint: print "draw_polygon: optimizing away polygon: {0:d} points = {1!s}".format(len(points), str(points))
def draw_rectangle(self, gcEdge, rgbFace, x, y, width, height):
"""
Draw a non-filled rectangle using the GraphicsContext instance gcEdge,
with lower left at x,y with width and height.
If rgbFace is not None, fill the rectangle with it.
"""
if debugPrint: print "draw_rectangle: ({0:f},{1:f}) w={2:f},h={3:f}".format(x, y, width, height)
# optimize away anything that won't actually draw. Either a
# face color or edge style must be defined
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
if pen or brush:
self.emf.Rectangle(int(x),int(self.height-y),int(x)+int(width),int(self.height-y)-int(height))
else:
if debugPrint: print "draw_rectangle: optimizing away ({0:f},{1:f}) w={2:f},h={3:f}".format(x, y, width, height)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
"""
Draw the text.Text instance s at x,y (display coords) with font
properties instance prop at angle in degrees, using GraphicsContext gc
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be blotted along with
your text.
"""
if debugText: print "draw_text: ({0:f},{1:f}) {2:d} degrees: '{3!s}'".format(x, y, angle, s)
if ismath:
self.draw_math_text(gc,x,y,s,prop,angle)
else:
self.draw_plain_text(gc,x,y,s,prop,angle)
def draw_plain_text(self, gc, x, y, s, prop, angle):
"""
Draw a text string verbatim; no conversion is done.
"""
if debugText: print "draw_plain_text: ({0:f},{1:f}) {2:d} degrees: '{3!s}'".format(x, y, angle, s)
if debugText: print " properties:\n"+str(prop)
self.select_font(prop,angle)
# haxor follows! The subtleties of text placement in EMF
# still elude me a bit. It always seems to be too high on the
# page, about 10 pixels too high on a 300dpi resolution image.
# So, I'm adding this hack for the moment:
hackoffsetper300dpi=10
xhack=math.sin(angle*math.pi/180.0)*hackoffsetper300dpi*self.dpi/300.0
yhack=math.cos(angle*math.pi/180.0)*hackoffsetper300dpi*self.dpi/300.0
self.emf.TextOut(long(x+xhack),long(y+yhack),s)
def draw_math_text(self, gc, x, y, s, prop, angle):
"""
Draw a subset of TeX, currently handles exponents only. Since
pyemf doesn't have any raster functionality yet, the
texmanager.get_rgba won't help.
"""
if debugText: print "draw_math_text: ({0:f},{1:f}) {2:d} degrees: '{3!s}'".format(x, y, angle, s)
s = s[1:-1] # strip the $ from front and back
match=re.match("10\^\{(.+)\}",s)
if match:
exp=match.group(1)
if debugText: print " exponent={0!s}".format(exp)
font = self._get_font_ttf(prop)
font.set_text("10", 0.0)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
self.draw_plain_text(gc,x,y,"10",prop,angle)
propexp=prop.copy()
propexp.set_size(prop.get_size_in_points()*.8)
self.draw_plain_text(gc,x+w+self.points_to_pixels(self.hackPointsForMathExponent),y-(h/2),exp,propexp,angle)
else:
# if it isn't an exponent, then render the raw TeX string.
self.draw_plain_text(gc,x,y,s,prop,angle)
def get_math_text_width_height(self, s, prop):
"""
get the width and height in display coords of the string s
with FontPropertry prop, ripped right out of backend_ps. This
method must be kept in sync with draw_math_text.
"""
if debugText: print "get_math_text_width_height:"
s = s[1:-1] # strip the $ from front and back
match=re.match("10\^\{(.+)\}",s)
if match:
exp=match.group(1)
if debugText: print " exponent={0!s}".format(exp)
font = self._get_font_ttf(prop)
font.set_text("10", 0.0)
w1, h1 = font.get_width_height()
propexp=prop.copy()
propexp.set_size(prop.get_size_in_points()*.8)
fontexp=self._get_font_ttf(propexp)
fontexp.set_text(exp, 0.0)
w2, h2 = fontexp.get_width_height()
w=w1+w2
h=h1+(h2/2)
w /= 64.0 # convert from subpixels
h /= 64.0
w+=self.points_to_pixels(self.hackPointsForMathExponent)
if debugText: print " math string={0!s} w,h=({1:f},{2:f})".format(s, w, h)
else:
w,h=self.get_text_width_height(s,prop,False)
return w, h
def flipy(self):
"""return true if y small numbers are top for renderer
Is used for drawing text (text.py) and images (image.py) only
"""
return True
def get_canvas_width_height(self):
"""
return the canvas width and height in display coords
"""
return self.width,self.height
def set_handle(self,type,handle):
"""
Update the EMF file with the current handle, but only if it
isn't the same as the last one. Don't want to flood the file
with duplicate info.
"""
if self.lastHandle[type] != handle:
self.emf.SelectObject(handle)
self.lastHandle[type]=handle
def get_font_handle(self, prop, angle):
"""
Look up the handle for the font based on the dict of
properties *and* the rotation angle, since in EMF the font
rotation is a part of the font definition.
"""
prop=EMFFontProperties(prop,angle)
size=int(prop.get_size_in_points()*self.pointstodpi)
face=prop.get_name()
key = hash(prop)
handle = self._fontHandle.get(key)
if handle is None:
handle=self.emf.CreateFont(-size, 0, int(angle)*10, int(angle)*10,
pyemf.FW_NORMAL, 0, 0, 0,
pyemf.ANSI_CHARSET, pyemf.OUT_DEFAULT_PRECIS,
pyemf.CLIP_DEFAULT_PRECIS, pyemf.DEFAULT_QUALITY,
pyemf.DEFAULT_PITCH | pyemf.FF_DONTCARE, face);
if debugHandle: print "get_font_handle: creating handle={0:d} for face={1!s} size={2:d}".format(handle, face, size)
self._fontHandle[key]=handle
if debugHandle: print " found font handle {0:d} for face={1!s} size={2:d}".format(handle, face, size)
self.set_handle("font",handle)
return handle
def select_font(self,prop,angle):
handle=self.get_font_handle(prop,angle)
self.set_handle("font",handle)
def select_pen(self, gc):
"""
Select a pen that includes the color, line width and line
style. Return the pen if it will draw a line, or None if the
pen won't produce any output (i.e. the style is PS_NULL)
"""
pen=EMFPen(self.emf,gc)
key=hash(pen)
handle=self._fontHandle.get(key)
if handle is None:
handle=pen.get_handle()
self._fontHandle[key]=handle
if debugHandle: print " found pen handle {0:d}".format(handle)
self.set_handle("pen",handle)
if pen.style != pyemf.PS_NULL:
return pen
else:
return None
def select_brush(self, rgb):
"""
Select a fill color, and return the brush if the color is
valid or None if this won't produce a fill operation.
"""
if rgb is not None:
brush=EMFBrush(self.emf,rgb)
key=hash(brush)
handle=self._fontHandle.get(key)
if handle is None:
handle=brush.get_handle()
self._fontHandle[key]=handle
if debugHandle: print " found brush handle {0:d}".format(handle)
self.set_handle("brush",handle)
return brush
else:
return None
def _get_font_ttf(self, prop):
"""
get the true type font properties, used because EMFs on
windows will use true type fonts.
"""
key = hash(prop)
font = _fontd.get(key)
if font is None:
fname = findfont(prop)
if debugText: print "_get_font_ttf: name={0!s}".format(fname)
font = FT2Font(str(fname))
_fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def get_text_width_height(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop, ripped right out of backend_ps
"""
if debugText: print "get_text_width_height: ismath={0!s} properties: {1!s}".format(str(ismath), str(prop))
if ismath:
if debugText: print " MATH TEXT! = {0!s}".format(str(ismath))
w,h = self.get_math_text_width_height(s, prop)
return w,h
font = self._get_font_ttf(prop)
font.set_text(s, 0.0)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
if debugText: print " text string={0!s} w,h=({1:f},{2:f})".format(s, w, h)
return w, h
def new_gc(self):
return GraphicsContextEMF()
def points_to_pixels(self, points):
# if backend doesn't have dpi, eg, postscript or svg
#return points
# elif backend assumes a value for pixels_per_inch
#return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
return points/72.0 * self.dpi
class GraphicsContextEMF(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc... See the gtk
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In GTK this is done by wrapping a gtk.gdk.GC object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the GTK backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, eg, (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
pass
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For image backends - is not required
For GUI backends - this should be overriden if drawing should be done in
interactive python mode
"""
pass
def show():
"""
For image backends - is not required
For GUI backends - show() is usually the last line of a pylab script and
tells the backend that it is time to draw. In interactive mode, this may
be a do nothing func. See the GTK backend for an example of how to handle
interactive versus batch mode
"""
for manager in Gcf.get_all_fig_managers():
# do something to display the GUI
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasEMF(thisFig)
manager = FigureManagerEMF(canvas, num)
return manager
class FigureCanvasEMF(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def draw(self):
"""
Draw the figure using the renderer
"""
pass
filetypes = {'emf': 'Enhanced Metafile'}
def print_emf(self, filename, dpi=300, **kwargs):
width, height = self.figure.get_size_inches()
renderer = RendererEMF(filename,width,height,dpi)
self.figure.draw(renderer)
renderer.save()
def get_default_filetype(self):
return 'emf'
class FigureManagerEMF(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerEMF
| agpl-3.0 |
Windy-Ground/scikit-learn | sklearn/tests/test_kernel_approximation.py | 244 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
nlhepler/freetype-py3 | examples/wordle.py | 1 | 4455 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
import math
import numpy as np
from freetype import *
import matplotlib.pyplot as plt
def make_label(text, filename, size=12, angle=0):
'''
Parameters:
-----------
text : string
Text to be displayed
filename : string
Path to a font
size : int
Font size in 1/64th points
angle : float
Text angle in degrees
'''
face = Face(filename)
face.set_char_size( size*64 )
angle = (angle/180.0)*math.pi
matrix = FT_Matrix( (int)( math.cos( angle ) * 0x10000 ),
(int)(-math.sin( angle ) * 0x10000 ),
(int)( math.sin( angle ) * 0x10000 ),
(int)( math.cos( angle ) * 0x10000 ))
flags = FT_LOAD_RENDER
pen = FT_Vector(0,0)
FT_Set_Transform( face._FT_Face, byref(matrix), byref(pen) )
previous = 0
xmin, xmax = 0, 0
ymin, ymax = 0, 0
for c in text:
face.load_char(c, flags)
kerning = face.get_kerning(previous, c)
previous = c
bitmap = face.glyph.bitmap
pitch = face.glyph.bitmap.pitch
width = face.glyph.bitmap.width
rows = face.glyph.bitmap.rows
top = face.glyph.bitmap_top
left = face.glyph.bitmap_left
pen.x += kerning.x
x0 = (pen.x >> 6) + left
x1 = x0 + width
y0 = (pen.y >> 6) - (rows - top)
y1 = y0 + rows
xmin, xmax = min(xmin, x0), max(xmax, x1)
ymin, ymax = min(ymin, y0), max(ymax, y1)
pen.x += face.glyph.advance.x
pen.y += face.glyph.advance.y
L = np.zeros((ymax-ymin, xmax-xmin),dtype=np.ubyte)
previous = 0
pen.x, pen.y = 0, 0
for c in text:
face.load_char(c, flags)
kerning = face.get_kerning(previous, c)
previous = c
bitmap = face.glyph.bitmap
pitch = face.glyph.bitmap.pitch
width = face.glyph.bitmap.width
rows = face.glyph.bitmap.rows
top = face.glyph.bitmap_top
left = face.glyph.bitmap_left
pen.x += kerning.x
x = (pen.x >> 6) - xmin + left
y = (pen.y >> 6) - ymin - (rows - top)
data = []
for j in range(rows):
data.extend(bitmap.buffer[j*pitch:j*pitch+width])
if len(data):
Z = np.array(data,dtype=np.ubyte).reshape(rows, width)
L[y:y+rows,x:x+width] |= Z[::-1,::1]
pen.x += face.glyph.advance.x
pen.y += face.glyph.advance.y
return L
if __name__ == '__main__':
import Image
n_words = 100
H, W = 600, 800
I = np.zeros((H, W, 3), dtype=np.ubyte)
S = np.random.normal(0,1,n_words)
S = (S-S.min())/(S.max()-S.min())
S = np.sort(1-np.sqrt(S))[::-1]
sizes = (12 + S*48).astype(int).tolist()
def spiral():
eccentricity = 1.5
radius = 8
step = 0.1
t = 0
while True:
t += step
yield eccentricity*radius*t*math.cos(t), radius*t*math.sin(t)
fails = 0
for size in sizes:
angle = np.random.randint(-25,25)
L = make_label('Hello', './Vera.ttf', size, angle=angle)
h,w = L.shape
if h < H and w < W:
x0 = W//2 + (np.random.uniform()-.1)*50
y0 = H//2 + (np.random.uniform()-.1)*50
for dx,dy in spiral():
c = .25+.75*np.random.random()
x = int(x0+dx)
y = int(y0+dy)
if x <= w//2 or y <= h//2 or x >= (W-w//2) or y >= (H-h//2):
fails += 1
break
if (I[y-h//2:y-h//2+h, x-w//2:x-w//2+w,0] * L).sum() == 0:
I[y-h//2:y-h//2+h, x-w//2:x-w//2+w,0] |= (c * L).astype(int)
I[y-h//2:y-h//2+h, x-w//2:x-w//2+w,1] |= (c * L).astype(int)
I[y-h//2:y-h//2+h, x-w//2:x-w//2+w,2] |= (c * L).astype(int)
break
print(("Number of fails:", fails))
plt.imshow(I, interpolation='nearest', cmap=plt.cm.gray, origin='lower')
plt.show()
I = Image.fromarray(I[::-1,::1,::1], mode='RGB')
I.save('wordle.png')
| bsd-3-clause |
co-ncrn/co-ncrn.github.io | code/data_prep_master.py | 1 | 11142 | import pandas as pd
import numpy as np
import copy
import cenpy as cen
#import get_codes
data_path = 'C:/Users/Becky/Documents/acs_research/B_files/censusData/'
def get_data(scenario, fips, geo_name, geos='tract', api=True):
#general scenario variables
if scenario == 'gen_count':
cols = ['B01003_001']
elif scenario == 'gen_prop':
cols = ['B25002_002', 'B25002_001', 'B11001_003', 'B11001_001', 'B16010_041', 'B16010_001', 'B07003_004', 'B07003_001',
'B03002_003', 'B03002_001', 'B03002_004', 'B03002_001', 'B03002_012', 'B03002_001', 'B09001_001', 'B01003_001',
'B09020_001', 'B01003_001']
elif scenario == 'gen_ratio':
cols = ['B25019_001', 'B25001_001', 'B19025_001', 'B25002_002', 'B25008_001', 'B25002_002']
elif scenario == 'trans_count':
cols = ['B01003_001']
elif scenario == 'trans_prop':
cols = ['B08101_009', 'B08101_001', 'B08101_025', 'B08101_001']
elif scenario == 'trans_ratio':
cols = ['B25046_001', 'B01003_001', 'B08013_001', 'B08134_001']
elif scenario == 'hous_count':
cols = ['B01003_001']
elif scenario == 'hous_prop':
cols = ['B25002_002', 'B25002_001', 'B25003_002', 'B25003_001', 'B25003_003', 'B25003_001', 'B25024_002', 'B25024_001']
elif scenario == 'hous_ratio':
cols = ['B25019_001', 'B25001_001', 'B25082_001', 'B25003_002', 'B25065_001', 'B25003_003']
elif scenario == 'pov_count':
cols = ['B01003_001']
elif scenario == 'pov_prop':
cols = ['B17006_016', 'B17006_001', 'B17001_031', 'B17001_001', 'B23025_004', 'B23025_003']
elif scenario == 'pov_ratio':
cols = ['B25089_001', 'B25120_002', 'B25065_001', 'B25120_005']
elif scenario == 'sovi_count':
cols = []
elif scenario == 'sovi_ratio':
cols = ['B25008_001', 'B25002_002', 'B19025_001', 'B25008_001', 'B19001_017', 'B11001_001',
'B25079_001', 'B25080_001', 'B25082_001', 'B25108_001', 'B25110_001', 'B25003_002',
'B25065_001', 'B25066_001', 'B25067_001', 'B25112_001', 'B25114_001', 'B25070_001', 'B01003_001']
#'B01003_001']
#Final ratio pair variable is added from geographic data below
elif scenario == 'sovi_prop':
cols = ['B03002_004', 'B03002_001', 'B03002_005', 'B03002_001', 'B03002_006', 'B03002_001', 'B03002_012', 'B03002_001',
'B25003_003', 'B25002_001', 'B09020_021', 'B01003_001', 'B01001_026', 'B01003_001', 'B11001_006', 'B11001_001',
'B25002_003', 'B25002_001', 'B16010_002', 'B16010_001', 'C24050_002', 'C24050_001', 'C24050_029', 'C24050_001',
'B08201_002', 'B08201_001', 'B17021_002', 'B17021_001', 'B25024_010', 'B25024_001', 'C24010_038', 'C24010_001',
'B19055_002', 'B19055_001', 'B09002_002', 'B09002_001', 'B06001_002', 'B09020_001', 'B01003_001',
'B06007_005', 'B06007_008', 'B06007_001', 'B23022_025', 'B23022_049', 'B23022_001']
elif scenario == 'income':
cols = ['B06011_001']
base_file_name = data_path+geo_name+'/'+geo_name+'_'+geos+'_'+scenario
if api:
# sort out the column names
cols_ests = [i+'E' for i in cols]
cols_moes = [i+'M' for i in cols]
cols_all_data = cols_ests + cols_moes
cols_all_data.append('B11001_001E') # always pull total households
cols_all_data = list(set(cols_all_data)) # get the unique column names
cols_all = copy.copy(cols_all_data)
cols_all.extend(['NAME', 'GEOID'])
# API connection
api_key = '19809d94f2ffead8a50814ec118dce7c5a987bfd' # rebecca's census API key
#api_key = '2ea3c1b17b3c1b907849cfaa1a04c51f4d0a19bf' # folch's census API key
api_database = 'ACSSF5Y2012' # ACS 2008-2012
api_conn = cen.base.Connection(api_database)
api_conn.set_mapservice('tigerWMS_Census2010')
api_conn.mapservice
# pull column info from API
cols_detail = api_conn.variables.ix[cols_all].label.to_dict()
cols_detail = pd.DataFrame.from_dict(cols_detail, orient='index')
# pull the data from the API
data = pd.DataFrame()
geodata = pd.DataFrame()
for state, county in fips:
if county == 'all':
data = data.append(api_conn.query(cols_all, geo_unit=geos+':*', geo_filter={'state':state}, apikey = api_key))
if scenario == 'sovi_ratio':
geodata = geodata.append(api_conn.mapservice.query(layer = 14, where = 'STATE=%s' % state, pkg='geopandas', apikey=api_key))
else:
data = data.append(api_conn.query(cols_all, geo_unit=geos+':*', geo_filter={'state':state, 'county':county}, apikey = api_key))
if scenario == 'sovi_ratio':
geodata = geodata.append(api_conn.mapservice.query(layer = 14, where = 'STATE=%s' % state + ' and COUNTY=%s' % county, pkg='geopandas', apikey=api_key))
# convert dataframe index to the clean FIPS code prepended with 'g'
index = 'g' + data.GEOID
if geos=='tract':
index = index.str.replace('14000US','')
elif geos=='county':
index = index.str.replace('05000US','')
data.index = index
if scenario == 'sovi_ratio':
geodata = geodata[['AREALAND', 'GEOID']]
geodata.AREALAND = geodata.AREALAND / 2589988.11
geodata['GEOID'] = 'g' + geodata.GEOID
data['agg_hs_val'] = data['B25079_001E']
data['agg_hs_val'] = data['agg_hs_val'].fillna(data['B25080_001E'])
data['agg_hs_val'] = data['agg_hs_val'].fillna(data['B25082_001E'])
data['agg_hs_val'] = data['agg_hs_val'].fillna(data['B25108_001E'])
data['agg_hs_val'] = data['agg_hs_val'].fillna(data['B25110_001E'])
data['agg_rent'] = data['B25065_001E']
data['agg_rent'] = data['agg_rent'].fillna(data['B25066_001E'])
data['agg_rent'] = data['agg_rent'].fillna(data['B25067_001E'])
data['agg_rent'] = data['agg_rent'].fillna(data['B25112_001E'])
data['agg_rent'] = data['agg_rent'].fillna(data['B25114_001E'])
data['agg_hs_val'] = data['B25079_001M']
data['agg_hs_val'] = data['agg_hs_val'].fillna(data['B25080_001M'])
data['agg_hs_val'] = data['agg_hs_val'].fillna(data['B25082_001M'])
data['agg_hs_val'] = data['agg_hs_val'].fillna(data['B25108_001M'])
data['agg_hs_val'] = data['agg_hs_val'].fillna(data['B25110_001M'])
data['agg_rent'] = data['B25065_001M']
data['agg_rent'] = data['agg_rent'].fillna(data['B25066_001M'])
data['agg_rent'] = data['agg_rent'].fillna(data['B25067_001M'])
data['agg_rent'] = data['agg_rent'].fillna(data['B25112_001M'])
data['agg_rent'] = data['agg_rent'].fillna(data['B25114_001M'])
data[cols_all_data] = data[cols_all_data].apply(pd.to_numeric)
# organize output dataframes and add multiindex column headers
if scenario == 'sovi_prop':
data['B06001_002E'] = data['B06001_002E'] + data['B09020_001E']
data['B06007_005E'] = data['B06007_005E'] + data['B06007_008E']
data['B23022_025E'] = data['B23022_025E'] + data['B23022_049E']
data['B06001_002M'] = np.sqrt((data['B06001_002M'])**2 + (data['B09020_001M'])**2)
data['B06007_005M'] = np.sqrt((data['B06007_005M'])**2 + (data['B06007_008M'])**2)
data['B23022_025M'] = np.sqrt((data['B23022_025M'])**2 + (data['B23022_049M'])**2)
output_ests = data[cols_ests]
output_moes = data[cols_moes]
if scenario == 'sovi_prop':
output_ests = output_ests.rename(columns = {'B23022_025E':'B23022_025E_B23022_049E',
'B06007_005E':'B06007_005E_B06007_008E',
'B06001_002E':'B06001_002E_B09020_001E'})
output_moes = output_moes.rename(columns = {'B23022_025M':'B23022_025M_B23022_049M',
'B06007_005M':'B06007_005M_B06007_008M',
'B06001_002M':'B06001_002M_B09020_001M'})
output_ests = output_ests.drop(['B09020_001E', 'B06007_008E', 'B23022_049E'], axis=1)
output_moes = output_moes.drop(['B09020_001M', 'B06007_008M', 'B23022_049M'], axis=1)
#identify tracts without households
empty_tracts = data[data.B11001_001E == 0]
empty_tracts = empty_tracts.index.values
#adjust annual variables
if scenario == 'pov_ratio':
#multiply values by 12
output_ests['B25089_001E'] = output_ests['B25089_001E'].mul(12)
output_ests['B25065_001E'] = output_ests['B25065_001E'].mul(12)
output_moes['B25089_001M'] = output_moes['B25089_001M'].mul(12)
output_moes['B25065_001M'] = output_moes['B25065_001M'].mul(12)
#write results to hard drive
if scenario=='sovi_ratio':
output_ests = output_ests.merge(geodata, left_index=True, right_on='GEOID')
output_ests = output_ests.set_index('GEOID')
output_ests = output_ests.rename(columns={"AREALAND":"AREALANDE"})
output_moes['AREALANDM'] = 0
output_ests = output_ests.drop(['B25065_001E', 'B25066_001E', 'B25067_001E', 'B25112_001E', 'B25114_001E',
'B25079_001E', 'B25080_001E', 'B25082_001E', 'B25108_001E', 'B25110_001E'], axis=1)
output_moes = output_moes.drop(['B25079_001M', 'B25080_001M', 'B25082_001M', 'B25108_001M', 'B25110_001M',
'B25065_001M', 'B25066_001M', 'B25067_001M', 'B25112_001M', 'B25114_001M'], axis=1)
output_ests.to_csv(base_file_name+'_ests.csv')
output_moes.to_csv(base_file_name+'_moes.csv')
cols_detail.to_csv(base_file_name+'_columns.csv', header=False)
np.savetxt(base_file_name+'_empty.csv', empty_tracts, delimiter=',', fmt='%s')
# always return data read from hard drive
output_ests = pd.read_csv(base_file_name+'_ests.csv', index_col=0)
output_moes = pd.read_csv(base_file_name+'_moes.csv', index_col=0)
cols_detail = pd.read_csv(base_file_name+'_columns.csv', header=None, index_col=0)
empty_tracts = np.genfromtxt(base_file_name+'_empty.csv', delimiter=',', dtype=str)
empty_tracts = np.atleast_1d(empty_tracts)
empty_tracts = empty_tracts.tolist()
return output_ests, output_moes, cols_detail, empty_tracts
#manual version:
#input MSA code to get corresponding list of lists of state and county FIPS codes
#codes = get_codes.get_msa('38900')
#output
#output_ests, output_moes, cols_detail, empty_tracts = get_data('trans_prop',
# fips=codes,
# geo_name='38900',
# geos='tract', api=True)
| gpl-3.0 |
dtudares/hello-world | yardstick/setup.py | 1 | 2434 | from setuptools import setup, find_packages
setup(
name="yardstick",
version="0.1.dev0",
packages=find_packages(),
include_package_data=True,
package_data={
'yardstick': [
'benchmark/scenarios/availability/*.yaml',
'benchmark/scenarios/availability/attacker/*.yaml',
'benchmark/scenarios/availability/ha_tools/*.bash',
'benchmark/scenarios/availability/ha_tools/*/*.bash',
'benchmark/scenarios/availability/attacker/scripts/*.bash',
'benchmark/scenarios/availability/monitor/*.yaml',
'benchmark/scenarios/availability/monitor/script_tools/*.bash',
'benchmark/scenarios/compute/*.bash',
'benchmark/scenarios/networking/*.bash',
'benchmark/scenarios/networking/*.txt',
'benchmark/scenarios/parser/*.sh',
'benchmark/scenarios/storage/*.bash',
'resources/files/*',
'resources/scripts/install/*.bash',
'resources/scripts/remove/*.bash'
],
'etc': [
'yardstick/nodes/*/*.yaml'
],
'tests': [
'opnfv/*/*.yaml'
]
},
url="https://www.opnfv.org",
install_requires=["backport_ipaddress", # remove with python3
"coverage>=3.6",
"flake8",
"Jinja2>=2.6",
"lxml",
"PyYAML>=3.10",
"pbr<2.0,>=1.3",
"python-openstackclient>=2.1.0",
"python-glanceclient>=0.12.0",
"python-heatclient>=0.2.12",
"python-keystoneclient>=0.11.1",
"python-neutronclient>=2.3.9",
"python-novaclient>=2.24.1",
"mock>=1.0.1", # remove with python3
"paramiko",
"netifaces",
"scp",
"six",
"testrepository>=0.0.18",
"testtools>=1.4.0",
"nose"
],
extras_require={
'plot': ["matplotlib>=1.4.2"]
},
entry_points={
'console_scripts': [
'yardstick=yardstick.main:main',
'yardstick-plot=yardstick.plot.plotter:main [plot]'
],
},
scripts=['tools/yardstick-img-modify']
)
| apache-2.0 |
kjung/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
amolkahat/pandas | pandas/compat/numpy/__init__.py | 4 | 1982 | """ support numpy compatiblitiy across versions """
import re
import numpy as np
from distutils.version import LooseVersion
from pandas.compat import string_types, string_and_binary_types
# numpy versioning
_np_version = np.__version__
_nlv = LooseVersion(_np_version)
_np_version_under1p13 = _nlv < LooseVersion('1.13')
_np_version_under1p14 = _nlv < LooseVersion('1.14')
_np_version_under1p15 = _nlv < LooseVersion('1.15')
if _nlv < '1.12':
raise ImportError('this version of pandas is incompatible with '
'numpy < 1.12.0\n'
'your numpy version is {0}.\n'
'Please upgrade numpy to >= 1.12.0 to use '
'this pandas version'.format(_np_version))
_tz_regex = re.compile('[+-]0000$')
def tz_replacer(s):
if isinstance(s, string_types):
if s.endswith('Z'):
s = s[:-1]
elif _tz_regex.search(s):
s = s[:-5]
return s
def np_datetime64_compat(s, *args, **kwargs):
"""
provide compat for construction of strings to numpy datetime64's with
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
"""
s = tz_replacer(s)
return np.datetime64(s, *args, **kwargs)
def np_array_datetime64_compat(arr, *args, **kwargs):
"""
provide compat for construction of an array of strings to a
np.array(..., dtype=np.datetime64(..))
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
"""
# is_list_like
if (hasattr(arr, '__iter__')
and not isinstance(arr, string_and_binary_types)):
arr = [tz_replacer(s) for s in arr]
else:
arr = tz_replacer(arr)
return np.array(arr, *args, **kwargs)
__all__ = ['np',
'_np_version_under1p13',
'_np_version_under1p14',
'_np_version_under1p15'
]
| bsd-3-clause |
dsquareindia/scikit-learn | sklearn/decomposition/truncated_svd.py | 21 | 8337 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). Contrary to PCA, this
estimator does not center the data before computing the singular value
decomposition. This means it can work with scipy.sparse matrices
efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithms: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional (default 5)
Number of iterations for randomized SVD solver. Not used by ARPACK.
The default is larger than the default in `randomized_svd` to handle
sparse matrices that may have large slowly decaying spectrum.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ : array, shape (n_components,)
The variance of the training samples transformed by a projection to
each component.
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
singular_values_ : array, shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=7,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.0606... 0.0584... 0.0497... 0.0434... 0.0372...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.249...
>>> print(svd.singular_values_) # doctest: +ELLIPSIS
[ 2.5841... 2.5245... 2.3201... 2.1753... 2.0443...]
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = U * Sigma
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
self.singular_values_ = Sigma # Store the singular values.
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
sarahgrogan/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
sherpaman/MolToolPy | bin/calc_transfer_info_mdtraj.py | 1 | 2413 | #!/usr/bin/env python
import ts
import matplotlib.pyplot as plt
import numpy as np
import MDAnalysis as md
from argparse import ArgumentParser
parser = ArgumentParser( description = 'Calculate Mutual Information')
#
# INPUT FILES
#
parser.add_argument("-f","--traj",dest="traj",action="store",type=str,default=None,help="Input Trajectory",required=True,metavar="TRAJ FILE")
parser.add_argument("-t","--top",dest="top",action="store",type=str,default=None,help="Input Topology",required=True,metavar="TOPOL FILE")
#
# OUTPUT FILES
#
parser.add_argument("-o","--out",dest="out",action="store",type=str,default=None,required=True,help="Output File Name",metavar="DAT FILE")
#
# VAR ARGUMENTS
#
parser.add_argument("-b","--begin",dest="begin",action="store",type=int,default=0,help="Start reading the trajectory at this frame", metavar="INTEGER")
parser.add_argument("-e","--end",dest="end",action="store",type=int,default=-1,help="Stop reading the trajectory at this frame", metavar="INTEGER")
parser.add_argument("-s","--stride",dest="stride",action="store",type=int,default=1,help="time", metavar="INTEGER")
parser.add_argument("-n","--nbins",dest="nbins",action="store",type=int ,default=10,help="number of bins", metavar="INTEGER")
parser.add_argument("--opt",dest="opt",action="store_true",default=False,help="toggle bins optimization")
#
options = parser.parse_args()
f_traj = options.traj
f_top = options.top
f_out = options.out
begin = options.begin
end = options.end
stride = options.stride
t = md.Universe(f_top,f_traj)
sel = 'name CA'
CA = t.select_atoms(sel)
aver_str = np.zeros((len(CA),3))
n_fr = (end - begin)/int(stride)+1
print ("Calculating average structure over {0:d} frames".format(n_fr))
for i in t.trajectory[begin:end:stride]:
CA = t.select_atoms(sel)
aver_str = aver_str + CA.atoms.coordinates()/n_fr
print ("Done!")
dat1 = np.zeros((len(CA),3,n_fr))
print("Start Calculating Fluctuations")
for i in t.trajectory[begin:end:stride]:
CA = t.select_atoms(sel)
fr=(i.frame - begin) / stride
dat1[:,:,fr] = CA.atoms.coordinates() - aver_str
print("Done!")
del(t)
DATA1= ts.TimeSer(dat1,n_fr,dim=3,nbins=options.nbins,reshape=False)
DATA1.calc_bins(opt=options.opt)
M1,E1 = DATA1.mutual_info_omp()
T1,D1 = DATA1.transfer_entropy_omp(time=2)
np.savetxt(f_out+".MI.RMSF.dat",M1)
np.savetxt(f_out+".TI.RMSF.dat",T1)
np.savetxt(f_out+".DI.RMSF.dat",D1)
quit()
| gpl-2.0 |
jstooks9/mpp | mesytec_process_3_4.py | 1 | 3755 | # Convert a list of hex values into decimal values
# Update from labview_parse_1
# - now includes the option to keep only the first n columns of the original data file,
# where n is the second user input
# Arguments:
# 1 - unparsed input file name
# 2 - number of columns to keep from the original data file
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
import os
import warnings # DEBUGGING
warnings.simplefilter("error") # DEBUGGING
def file_end(l):
if l == '':
return True
else:
return False
def condense_file(filename,columnsKeep):
with open(filename) as f:
data = f.read()
data = data.split('\n') # parse based on newLines
newData = ''
for d in data:
i = 0
newLine = ''
currLine = d.split()
for curr in currLine:
i += 1
if i in columnsKeep:
newLine = newLine+curr+'\t'
newData += newLine+'\n'
with open(filename,'w') as f:
f.write(newData)
def mesytec_parse(filename,columns):
c = ','
FILEEXTENSIONLENGTH = 4
initialTime = time()
tempOutputs = []
for col in columns:
outfilename = filename[:-FILEEXTENSIONLENGTH]+str(col)+'_temp.txt'
tempOutputs.append(outfilename)
with open(filename) as f:
with open(outfilename,'w') as of:
previousLine = 'FFFF\n' # initialize previousLine
for line in f:
if line == '4040\n': # marks end of header
# convert last 2 bits to decimal,
# -1 because of end-header
numData = int(previousLine.split()[0][-2:],16) - 1
for i in range(numData):
try:
dataLine = next(f)
dataidLine = next(f)
except:
break
if file_end(dataLine) or file_end(dataidLine):
break
data = int(dataLine.split()[0],16)
dataid = int(dataidLine.split()[0][-2:],16)
if not dataid == col:
break
of.write(str(data)+c)
previousLine = line
outfilename = filename[:-FILEEXTENSIONLENGTH]+'_parsed.txt'
with open(outfilename,'w') as of:
for t in tempOutputs:
with open(t) as f:
of.write(f.read())
of.write('\n')
os.remove(t)
elapsedTime = time() - initialTime
print('File written to',outfilename)
print(round(elapsedTime,3),'seconds taken to parse.')
# input('check parsed file')
return outfilename
# -----------------------------------------------------------------
# create 2D histogram, where histogram height is displayed as color
def readParsedFile(filename,xcol,ycol):
numSTDs = 4
with open(filename) as f:
for i in range(xcol):
trash = f.readline()
xList = f.readline().split(',')[:-1]
yList = f.readline().split(',')[:-1]
xList = [int(x) for x in xList]
yList = [int(x) for x in yList]
return xList, yList
def histogram_2d(inputfilename,nbins,figureTitle,stds,xcol,ycol,detector):
FILEEXTENSIONLENGTH = 4
DEFAULTSTDS = 5
figureName = inputfilename[:-FILEEXTENSIONLENGTH]+'_'+detector+'_plot.png'
x, y = readParsedFile(inputfilename,xcol,ycol)
stdX = np.std(x)
meanX = np.mean(x)
maxX = meanX + (stdX * stds)
minX = meanX - (stdX * stds)
# maxX = 3000
# minX = 0
stdY = np.std(y)
meanY = np.mean(y)
maxY = meanY + (stdY * stds)
minY = meanY - (stdY * stds)
# maxY = 3000
# minY = 0
trimmedX = []
trimmedY = []
for i, j in zip(x,y):
if i < minX or i > maxX or j < minY or j > maxY:
continue
trimmedX.append(i)
trimmedY.append(j)
H, xedges, yedges = np.histogram2d(trimmedX, trimmedY, bins = nbins)
H = np.rot90(H)
H = np.flipud(H)
Hmasked = np.ma.masked_where(H==0,H)
fig = plt.figure()
plt.set_cmap("spectral")
plt.pcolormesh(xedges,yedges,Hmasked)
plt.ylabel('TAC')
plt.xlabel('Amplitude')
plt.title(figureTitle)
cbar = plt.colorbar()
plt.savefig(figureName)
plt.close(fig)
print('Figure saved as', figureName) | apache-2.0 |
neuroidss/nupic.research | projects/sequence_prediction/mackey_glass/nupic_output.py | 13 | 6035 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num
except ImportError:
pass
WINDOW = 100
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, names, showAnomalyScore=False):
self.names = names
self.showAnomalyScore = showAnomalyScore
@abstractmethod
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCounts = []
headerRow = ['timestamp', 'y', 'prediction']
for name in self.names:
self.lineCounts.append(0)
outputFileName = "out_%s" % name
print "Preparing to output %s data to %s" % (name, outputFileName)
outputFile = open(outputFileName, "w")
self.outputFiles.append(outputFile)
outputWriter = csv.writer(outputFile)
self.outputWriters.append(outputWriter)
outputWriter.writerow(headerRow)
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
for index in range(len(self.names)):
timestamp = timestamps[index]
actual = actualValues[index]
prediction = predictedValues[index]
writer = self.outputWriters[index]
if timestamp is not None:
outputRow = [timestamp, actual, prediction]
writer.writerow(outputRow)
self.lineCounts[index] += 1
def close(self):
for index, name in enumerate(self.names):
self.outputFiles[index].close()
print "Done. Wrote %i data lines to %s." % (self.lineCounts[index], name)
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.actualValues = []
self.predictedValues = []
self.actualLines = []
self.predictedLines = []
self.linesInitialized = False
self.graphs = []
plotCount = len(self.names)
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
for index in range(len(self.names)):
self.graphs.append(fig.add_subplot(gs[index, 0]))
plt.title(self.names[index])
plt.ylabel('KW Energy Consumption')
plt.xlabel('Date')
plt.tight_layout()
def initializeLines(self, timestamps):
for index in range(len(self.names)):
print "initializing %s" % self.names[index]
# graph = self.graphs[index]
self.dates.append(deque([timestamps[index]] * WINDOW, maxlen=WINDOW))
self.convertedDates.append(deque(
[date2num(date) for date in self.dates[index]], maxlen=WINDOW
))
self.actualValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
self.predictedValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
actualPlot, = self.graphs[index].plot(
self.dates[index], self.actualValues[index]
)
self.actualLines.append(actualPlot)
predictedPlot, = self.graphs[index].plot(
self.dates[index], self.predictedValues[index]
)
self.predictedLines.append(predictedPlot)
self.linesInitialized = True
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamps)
for index in range(len(self.names)):
self.dates[index].append(timestamps[index])
self.convertedDates[index].append(date2num(timestamps[index]))
self.actualValues[index].append(actualValues[index])
self.predictedValues[index].append(predictedValues[index])
# Update data
self.actualLines[index].set_xdata(self.convertedDates[index])
self.actualLines[index].set_ydata(self.actualValues[index])
self.predictedLines[index].set_xdata(self.convertedDates[index])
self.predictedLines[index].set_ydata(self.predictedValues[index])
self.graphs[index].relim()
self.graphs[index].autoscale_view(True, True, True)
plt.draw()
plt.legend(('actual','predicted'), loc=3)
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
| agpl-3.0 |
imaculate/scikit-learn | examples/neighbors/plot_species_kde.py | 16 | 4037 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.org/basemap>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
ZenDevelopmentSystems/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
geekcookiespodcast/geekcookiespodcast_scripts | archive_org_scraping.py | 1 | 1357 | #!/usr/bin/python
import json
import urllib2
import pandas as pd
import re
import time
### get the data as json
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
resp = opener.open('https://archive.org/advancedsearch.php?q=creator%3A%22Geekcookiespodcast%22&fl%5B%5D=downloads&fl%5B%5D=publicdate&fl%5B%5D=title&sort%5B%5D=addeddate+asc&sort%5B%5D=&sort%5B%5D=&rows=50&page=1&indent=yes&output=json')
data = resp.read()
result = json.loads(data)
# extract the results
output = {}
title_regex = '(geekcookies_ep_[0-9]+)(.+)$'
for tmp in result['response']['docs']:
# take care of new version of episodes with an extra _string at the end
#if '_new' in tmp['title']:
if re.match(title_regex ,tmp['title']):
title = re.compile(title_regex).search(tmp['title']).group(1)
#title = '_'.join(tmp['title'].split('_')[:-1])
else:
title = tmp['title']
if title not in output:
output[title] = 0
output[title]=output[title]+tmp['downloads']
#print tmp['title'],tmp['downloads']
print title,output[title]
print '-'*40
# cycle in the output dictionary
for name,val in output.iteritems():
with open(path+name+".csv", "ab") as f:
writer = csv.writer(f,delimiter=',',quotechar='"',lineterminator="\n")
writer.writerows([ output_row ])
f.close()
| cc0-1.0 |
bataeves/kaggle | instacart/imba/lgbm_cv.py | 2 | 16532 | import gc
from concurrent.futures import ThreadPoolExecutor
import pandas as pd
import numpy as np
import os
import arboretum
import lightgbm as lgb
import json
import sklearn.metrics
from sklearn.metrics import f1_score, roc_auc_score
from sklearn.model_selection import train_test_split
from scipy.sparse import dok_matrix, coo_matrix
from sklearn.utils.multiclass import type_of_target
if __name__ == '__main__':
path = "data"
aisles = pd.read_csv(os.path.join(path, "aisles.csv"), dtype={'aisle_id': np.uint8, 'aisle': 'category'})
departments = pd.read_csv(os.path.join(path, "departments.csv"),
dtype={'department_id': np.uint8, 'department': 'category'})
order_prior = pd.read_csv(os.path.join(path, "order_products__prior.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
order_train = pd.read_csv(os.path.join(path, "order_products__train.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
orders = pd.read_csv(os.path.join(path, "orders.csv"), dtype={'order_id': np.uint32,
'user_id': np.uint32,
'eval_set': 'category',
'order_number': np.uint8,
'order_dow': np.uint8,
'order_hour_of_day': np.uint8
})
product_embeddings = pd.read_pickle('data/product_embeddings.pkl')
embedings = list(range(32))
product_embeddings = product_embeddings[embedings + ['product_id']]
order_prev = pd.merge(order_train, orders, on='order_id')
order_prev.order_number -= 1
order_prev = pd.merge(order_prev[
['user_id', 'order_number', 'product_id', 'reordered', 'add_to_cart_order', 'order_dow',
'order_hour_of_day']], orders[['user_id', 'order_number', 'order_id']],
on=['user_id', 'order_number'])
order_prev.drop(['order_number', 'user_id'], axis=1, inplace=True)
order_prev.rename(columns={
'reordered': 'reordered_prev',
'add_to_cart_order': 'add_to_cart_order_prev',
'order_dow': 'order_dow_prev',
'order_hour_of_day': 'order_hour_of_day_prev'
}, inplace=True)
products = pd.read_csv(os.path.join(path, "products.csv"), dtype={'product_id': np.uint16,
'aisle_id': np.uint8,
'department_id': np.uint8})
order_train = pd.read_pickle(os.path.join(path, 'chunk_0.pkl'))
order_train = order_train.loc[order_train.eval_set == "train", ['order_id', 'product_id', 'reordered']]
product_periods = pd.read_pickle(os.path.join(path, 'product_periods_stat.pkl')).fillna(9999)
# product_periods.prev1 = product_periods['last'] / product_periods.prev1
# product_periods.prev2 = product_periods['last'] / product_periods.prev2
# product_periods['mean'] = product_periods['last'] / product_periods['mean']
# product_periods['median'] = product_periods['last'] / product_periods['median']
print(order_train.columns)
###########################
weights = order_train.groupby('order_id')['reordered'].sum().to_frame('weights')
weights.reset_index(inplace=True)
prob = pd.merge(order_prior, orders, on='order_id')
print(prob.columns)
prob = prob.groupby(['product_id', 'user_id'])\
.agg({'reordered':'sum', 'user_id': 'size'})
print(prob.columns)
prob.rename(columns={'sum': 'reordered',
'user_id': 'total'}, inplace=True)
prob.reordered = (prob.reordered > 0).astype(np.float32)
prob.total = (prob.total > 0).astype(np.float32)
prob['reorder_prob'] = prob.reordered / prob.total
prob = prob.groupby('product_id').agg({'reorder_prob': 'mean'}).rename(columns={'mean': 'reorder_prob'})\
.reset_index()
prod_stat = order_prior.groupby('product_id').agg({'reordered': ['sum', 'size'],
'add_to_cart_order':'mean'})
prod_stat.columns = prod_stat.columns.levels[1]
prod_stat.rename(columns={'sum':'prod_reorders',
'size':'prod_orders',
'mean': 'prod_add_to_card_mean'}, inplace=True)
prod_stat.reset_index(inplace=True)
prod_stat['reorder_ration'] = prod_stat['prod_reorders'] / prod_stat['prod_orders']
prod_stat = pd.merge(prod_stat, prob, on='product_id')
# prod_stat.drop(['prod_reorders'], axis=1, inplace=True)
user_stat = orders.loc[orders.eval_set == 'prior', :].groupby('user_id').agg({'order_number': 'max',
'days_since_prior_order': ['sum',
'mean',
'median']})
user_stat.columns = user_stat.columns.droplevel(0)
user_stat.rename(columns={'max': 'user_orders',
'sum': 'user_order_starts_at',
'mean': 'user_mean_days_since_prior',
'median': 'user_median_days_since_prior'}, inplace=True)
user_stat.reset_index(inplace=True)
orders_products = pd.merge(orders, order_prior, on="order_id")
user_order_stat = orders_products.groupby('user_id').agg({'user_id': 'size',
'reordered': 'sum',
"product_id": lambda x: x.nunique()})
user_order_stat.rename(columns={'user_id': 'user_total_products',
'product_id': 'user_distinct_products',
'reordered': 'user_reorder_ratio'}, inplace=True)
user_order_stat.reset_index(inplace=True)
user_order_stat.user_reorder_ratio = user_order_stat.user_reorder_ratio / user_order_stat.user_total_products
user_stat = pd.merge(user_stat, user_order_stat, on='user_id')
user_stat['user_average_basket'] = user_stat.user_total_products / user_stat.user_orders
########################### products
prod_usr = orders_products.groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr.rename(columns={'user_id':'prod_users_unq'}, inplace=True)
prod_usr.reset_index(inplace=True)
prod_usr_reordered = orders_products.loc[orders_products.reordered, :].groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr_reordered.rename(columns={'user_id': 'prod_users_unq_reordered'}, inplace=True)
prod_usr_reordered.reset_index(inplace=True)
order_stat = orders_products.groupby('order_id').agg({'order_id': 'size'})\
.rename(columns = {'order_id': 'order_size'}).reset_index()
orders_products = pd.merge(orders_products, order_stat, on='order_id')
orders_products['add_to_cart_order_inverted'] = orders_products.order_size - orders_products.add_to_cart_order
orders_products['add_to_cart_order_relative'] = orders_products.add_to_cart_order / orders_products.order_size
data_dow = orders_products.groupby(['user_id', 'product_id', 'order_dow']).agg({
'reordered': ['sum', 'size']})
data_dow.columns = data_dow.columns.droplevel(0)
data_dow.columns = ['reordered_dow', 'reordered_dow_size']
data_dow['reordered_dow_ration'] = data_dow.reordered_dow / data_dow.reordered_dow_size
data_dow.reset_index(inplace=True)
data = orders_products.groupby(['user_id', 'product_id']).agg({'user_id': 'size',
'order_number': ['min', 'max'],
'add_to_cart_order': ['mean', 'median'],
'days_since_prior_order': ['mean', 'median'],
'order_dow': ['mean', 'median'],
'order_hour_of_day': ['mean', 'median'],
'add_to_cart_order_inverted': ['mean', 'median'],
'add_to_cart_order_relative': ['mean', 'median'],
'reordered':['sum']})
data.columns = data.columns.droplevel(0)
data.columns = ['up_orders', 'up_first_order', 'up_last_order', 'up_mean_cart_position', 'up_median_cart_position',
'days_since_prior_order_mean', 'days_since_prior_order_median', 'order_dow_mean', 'order_dow_median',
'order_hour_of_day_mean', 'order_hour_of_day_median',
'add_to_cart_order_inverted_mean', 'add_to_cart_order_inverted_median',
'add_to_cart_order_relative_mean', 'add_to_cart_order_relative_median',
'reordered_sum'
]
data['user_product_reordered_ratio'] = (data.reordered_sum + 1.0) / data.up_orders
# data['first_order'] = data['up_orders'] > 0
# data['second_order'] = data['up_orders'] > 1
#
# data.groupby('product_id')['']
data.reset_index(inplace=True)
data = pd.merge(data, prod_stat, on='product_id')
data = pd.merge(data, user_stat, on='user_id')
data['up_order_rate'] = data.up_orders / data.user_orders
data['up_orders_since_last_order'] = data.user_orders - data.up_last_order
data['up_order_rate_since_first_order'] = data.user_orders / (data.user_orders - data.up_first_order + 1)
############################
user_dep_stat = pd.read_pickle('data/user_department_products.pkl')
user_aisle_stat = pd.read_pickle('data/user_aisle_products.pkl')
order_train = pd.merge(order_train, products, on='product_id')
order_train = pd.merge(order_train, orders, on='order_id')
order_train = pd.merge(order_train, user_dep_stat, on=['user_id', 'department_id'])
order_train = pd.merge(order_train, user_aisle_stat, on=['user_id', 'aisle_id'])
order_train = pd.merge(order_train, prod_usr, on='product_id')
order_train = pd.merge(order_train, prod_usr_reordered, on='product_id', how='left')
order_train.prod_users_unq_reordered.fillna(0, inplace=True)
order_train = pd.merge(order_train, data, on=['product_id', 'user_id'])
order_train = pd.merge(order_train, data_dow, on=['product_id', 'user_id', 'order_dow'], how='left')
order_train['aisle_reordered_ratio'] = order_train.aisle_reordered / order_train.user_orders
order_train['dep_reordered_ratio'] = order_train.dep_reordered / order_train.user_orders
order_train = pd.merge(order_train, product_periods, on=['user_id', 'product_id'])
order_train = pd.merge(order_train, product_embeddings, on=['product_id'])
# order_train = pd.merge(order_train, weights, on='order_id')
# order_train = pd.merge(order_train, order_prev, on=['order_id', 'product_id'], how='left')
# order_train.reordered_prev = order_train.reordered_prev.astype(np.float32) + 1.
# order_train['reordered_prev'].fillna(0, inplace=True)
# order_train[['add_to_cart_order_prev', 'order_dow_prev', 'order_hour_of_day_prev']].fillna(255, inplace=True)
print('data is joined')
# order_train.days_since_prior_order_mean -= order_train.days_since_prior_order
# order_train.days_since_prior_order_median -= order_train.days_since_prior_order
#
# order_train.order_dow_mean -= order_train.order_dow
# order_train.order_dow_median -= order_train.order_dow
#
# order_train.order_hour_of_day_mean -= order_train.order_hour_of_day
# order_train.order_hour_of_day_median -= order_train.order_hour_of_day
unique_orders = np.unique(order_train.order_id)
orders_train, orders_test = train_test_split(unique_orders, test_size=0.25, random_state=2017)
order_test = order_train.loc[np.in1d(order_train.order_id, orders_test)]
order_train = order_train.loc[np.in1d(order_train.order_id, orders_train)]
features = [
# 'reordered_dow_ration', 'reordered_dow', 'reordered_dow_size',
# 'reordered_prev', 'add_to_cart_order_prev', 'order_dow_prev', 'order_hour_of_day_prev',
'user_product_reordered_ratio', 'reordered_sum',
'add_to_cart_order_inverted_mean', 'add_to_cart_order_relative_mean',
'reorder_prob',
'last', 'prev1', 'prev2', 'median', 'mean',
'dep_reordered_ratio', 'aisle_reordered_ratio',
'aisle_products',
'aisle_reordered',
'dep_products',
'dep_reordered',
'prod_users_unq', 'prod_users_unq_reordered',
'order_number', 'prod_add_to_card_mean',
'days_since_prior_order',
'order_dow', 'order_hour_of_day',
'reorder_ration',
'user_orders', 'user_order_starts_at', 'user_mean_days_since_prior',
# 'user_median_days_since_prior',
'user_average_basket', 'user_distinct_products', 'user_reorder_ratio', 'user_total_products',
'prod_orders', 'prod_reorders',
'up_order_rate', 'up_orders_since_last_order', 'up_order_rate_since_first_order',
'up_orders', 'up_first_order', 'up_last_order', 'up_mean_cart_position',
# 'up_median_cart_position',
'days_since_prior_order_mean',
# 'days_since_prior_order_median',
'order_dow_mean',
# 'order_dow_median',
# 'order_hour_of_day_mean',
# 'order_hour_of_day_median'
]
categories = ['product_id', 'aisle_id', 'department_id']
features.extend(embedings)
cat_features = ','.join(map(lambda x: str(x + len(features)), range(len(categories))))
features.extend(categories)
print('not included', set(order_train.columns.tolist()) - set(features))
data = order_train[features]
labels = order_train[['reordered']].values.astype(np.float32).flatten()
data_val = order_test[features]
labels_val = order_test[['reordered']].values.astype(np.float32).flatten()
lgb_train = lgb.Dataset(data, labels, categorical_feature=cat_features)
lgb_eval = lgb.Dataset(data_val, labels_val, reference=lgb_train, categorical_feature=cat_features)
# specify your configurations as a dict
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': {'binary_logloss', 'auc'},
'num_leaves': 256,
'min_sum_hessian_in_leaf':20,
'max_depth': -12,
'learning_rate': 0.05,
'feature_fraction': 0.6,
# 'bagging_fraction': 0.9,
# 'bagging_freq': 3,
'verbose': 1
}
print('Start training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=2000,
valid_sets=lgb_eval,
early_stopping_rounds=30)
print('Feature names:', gbm.feature_name())
print('Calculate feature importances...')
# feature importances
print('Feature importances:', list(gbm.feature_importance()))
df = pd.DataFrame({'feature':gbm.feature_name(), 'importances': gbm.feature_importance()})
print(df.sort_values('importances')) | unlicense |
huzq/scikit-learn | examples/cluster/plot_digits_linkage.py | 21 | 3092 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters, while in the case
of single linkage we get a single central cluster with all other clusters
being drawn from noise points around the fringes.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
X, y = datasets.load_digits(return_X_y=True)
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.nipy_spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete', 'single'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s :\t%.2fs" % (linkage, time() - t0))
plot_clustering(X_red, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
srowen/spark | python/pyspark/pandas/tests/test_spark_functions.py | 11 | 2127 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.utils import spark_column_equals
from pyspark.sql import functions as F
from pyspark.sql.types import (
ByteType,
FloatType,
IntegerType,
LongType,
)
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class SparkFunctionsTests(PandasOnSparkTestCase):
def test_lit(self):
self.assertTrue(spark_column_equals(SF.lit(np.int64(1)), F.lit(1).astype(LongType())))
self.assertTrue(spark_column_equals(SF.lit(np.int32(1)), F.lit(1).astype(IntegerType())))
self.assertTrue(spark_column_equals(SF.lit(np.int8(1)), F.lit(1).astype(ByteType())))
self.assertTrue(spark_column_equals(SF.lit(np.byte(1)), F.lit(1).astype(ByteType())))
self.assertTrue(
spark_column_equals(SF.lit(np.float32(1)), F.lit(float(1)).astype(FloatType()))
)
self.assertTrue(spark_column_equals(SF.lit(1), F.lit(1)))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_spark_functions import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
caseyclements/blaze | blaze/compute/numpy.py | 7 | 11207 | from __future__ import absolute_import, division, print_function
import datetime
import numpy as np
from pandas import DataFrame, Series
from datashape import to_numpy, to_numpy_dtype
from numbers import Number
from ..expr import (
Reduction, Field, Projection, Broadcast, Selection, ndim,
Distinct, Sort, Tail, Head, Label, ReLabel, Expr, Slice, Join,
std, var, count, nunique, Summary, IsIn,
BinOp, UnaryOp, USub, Not, nelements, Repeat, Concat, Interp,
UTCFromTimestamp, DateTimeTruncate,
Transpose, TensorDot, Coerce, isnan
)
from ..utils import keywords
from .core import base, compute
from ..dispatch import dispatch
from odo import into
import pandas as pd
__all__ = ['np']
@dispatch(Field, np.ndarray)
def compute_up(c, x, **kwargs):
if x.dtype.names and c._name in x.dtype.names:
return x[c._name]
if not x.dtype.names and x.shape[1] == len(c._child.fields):
return x[:, c._child.fields.index(c._name)]
raise NotImplementedError() # pragma: no cover
@dispatch(Projection, np.ndarray)
def compute_up(t, x, **kwargs):
if x.dtype.names and all(col in x.dtype.names for col in t.fields):
return x[t.fields]
if not x.dtype.names and x.shape[1] == len(t._child.fields):
return x[:, [t._child.fields.index(col) for col in t.fields]]
raise NotImplementedError() # pragma: no cover
try:
from .numba import broadcast_numba as broadcast_ndarray
except ImportError:
def broadcast_ndarray(t, *data, **kwargs):
del kwargs['scope']
d = dict(zip(t._scalar_expr._leaves(), data))
return compute(t._scalar_expr, d, **kwargs)
compute_up.register(Broadcast, np.ndarray)(broadcast_ndarray)
for i in range(2, 6):
compute_up.register(Broadcast, *([(np.ndarray, Number)] * i))(broadcast_ndarray)
@dispatch(Repeat, np.ndarray)
def compute_up(t, data, _char_mul=np.char.multiply, **kwargs):
if isinstance(t.lhs, Expr):
return _char_mul(data, t.rhs)
else:
return _char_mul(t.lhs, data)
@compute_up.register(Repeat, np.ndarray, (np.ndarray, base))
@compute_up.register(Repeat, base, np.ndarray)
def compute_up_np_repeat(t, lhs, rhs, _char_mul=np.char.multiply, **kwargs):
return _char_mul(lhs, rhs)
def _interp(arr, v, _Series=pd.Series, _charmod=np.char.mod):
"""
Delegate to the most efficient string formatting technique based on
the length of the array.
"""
if len(arr) >= 145:
return _Series(arr) % v
return _charmod(arr, v)
@dispatch(Interp, np.ndarray)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return _interp(data, t.rhs)
else:
return _interp(t.lhs, data)
@compute_up.register(Interp, np.ndarray, (np.ndarray, base))
@compute_up.register(Interp, base, np.ndarray)
def compute_up_np_interp(t, lhs, rhs, **kwargs):
return _interp(lhs, rhs)
@dispatch(BinOp, np.ndarray, (np.ndarray, base))
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(BinOp, np.ndarray)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return t.op(data, t.rhs)
else:
return t.op(t.lhs, data)
@dispatch(BinOp, base, np.ndarray)
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(UnaryOp, np.ndarray)
def compute_up(t, x, **kwargs):
return getattr(np, t.symbol)(x)
@dispatch(Not, np.ndarray)
def compute_up(t, x, **kwargs):
return ~x
@dispatch(USub, np.ndarray)
def compute_up(t, x, **kwargs):
return -x
inat = np.datetime64('NaT').view('int64')
@dispatch(count, np.ndarray)
def compute_up(t, x, **kwargs):
result_dtype = to_numpy_dtype(t.dshape)
if issubclass(x.dtype.type, (np.floating, np.object_)):
return pd.notnull(x).sum(keepdims=t.keepdims, axis=t.axis,
dtype=result_dtype)
elif issubclass(x.dtype.type, np.datetime64):
return (x.view('int64') != inat).sum(keepdims=t.keepdims, axis=t.axis,
dtype=result_dtype)
else:
return np.ones(x.shape, dtype=result_dtype).sum(keepdims=t.keepdims,
axis=t.axis,
dtype=result_dtype)
@dispatch(nunique, np.ndarray)
def compute_up(t, x, **kwargs):
assert t.axis == tuple(range(ndim(t._child)))
result = len(np.unique(x))
if t.keepdims:
result = np.array([result])
return result
@dispatch(Reduction, np.ndarray)
def compute_up(t, x, **kwargs):
# can't use the method here, as they aren't Python functions
reducer = getattr(np, t.symbol)
if 'dtype' in keywords(reducer):
return reducer(x, axis=t.axis, keepdims=t.keepdims,
dtype=to_numpy_dtype(t.schema))
return reducer(x, axis=t.axis, keepdims=t.keepdims)
def axify(expr, axis, keepdims=False):
""" inject axis argument into expression
Helper function for compute_up(Summary, np.ndarray)
>>> from blaze import symbol
>>> s = symbol('s', '10 * 10 * int')
>>> expr = s.sum()
>>> axify(expr, axis=0)
sum(s, axis=(0,))
"""
return type(expr)(expr._child, axis=axis, keepdims=keepdims)
@dispatch(Summary, np.ndarray)
def compute_up(expr, data, **kwargs):
shape, dtype = to_numpy(expr.dshape)
if shape:
result = np.empty(shape=shape, dtype=dtype)
for n, v in zip(expr.names, expr.values):
result[n] = compute(axify(v, expr.axis, expr.keepdims), data)
return result
else:
return tuple(compute(axify(v, expr.axis), data) for v in expr.values)
@dispatch((std, var), np.ndarray)
def compute_up(t, x, **kwargs):
return getattr(x, t.symbol)(ddof=t.unbiased, axis=t.axis,
keepdims=t.keepdims)
@compute_up.register(Distinct, np.recarray)
def recarray_distinct(t, rec, **kwargs):
return pd.DataFrame.from_records(rec).drop_duplicates(
subset=t.on or None).to_records(index=False).astype(rec.dtype)
@dispatch(Distinct, np.ndarray)
def compute_up(t, arr, _recarray_distinct=recarray_distinct, **kwargs):
if t.on:
if getattr(arr.dtype, 'names', None) is not None:
return _recarray_distinct(t, arr, **kwargs).view(np.ndarray)
else:
raise ValueError('malformed expression: no columns to distinct on')
return np.unique(arr)
@dispatch(Sort, np.ndarray)
def compute_up(t, x, **kwargs):
if x.dtype.names is None: # not a struct array
result = np.sort(x)
elif (t.key in x.dtype.names or # struct array
isinstance(t.key, list) and all(k in x.dtype.names for k in t.key)):
result = np.sort(x, order=t.key)
elif t.key:
raise NotImplementedError("Sort key %s not supported" % t.key)
if not t.ascending:
result = result[::-1]
return result
@dispatch(Head, np.ndarray)
def compute_up(t, x, **kwargs):
return x[:t.n]
@dispatch(Tail, np.ndarray)
def compute_up(t, x, **kwargs):
return x[-t.n:]
@dispatch(Label, np.ndarray)
def compute_up(t, x, **kwargs):
return np.array(x, dtype=[(t.label, x.dtype.type)])
@dispatch(ReLabel, np.ndarray)
def compute_up(t, x, **kwargs):
types = [x.dtype[i] for i in range(len(x.dtype))]
return np.array(x, dtype=list(zip(t.fields, types)))
@dispatch(Selection, np.ndarray)
def compute_up(sel, x, **kwargs):
return x[compute(sel.predicate, {sel._child: x})]
@dispatch(UTCFromTimestamp, np.ndarray)
def compute_up(expr, data, **kwargs):
return (data * 1e6).astype('datetime64[us]')
@dispatch(Slice, np.ndarray)
def compute_up(expr, x, **kwargs):
return x[expr.index]
@dispatch(Expr, np.ndarray)
def compute_up(t, x, **kwargs):
ds = t._child.dshape
if x.ndim > 1 or isinstance(x, np.recarray) or x.dtype.fields is not None:
return compute_up(t, into(DataFrame, x, dshape=ds), **kwargs)
else:
return compute_up(t, into(Series, x, dshape=ds), **kwargs)
@dispatch(nelements, np.ndarray)
def compute_up(expr, data, **kwargs):
axis = expr.axis
if expr.keepdims:
shape = tuple(data.shape[i] if i not in axis else 1
for i in range(ndim(expr._child)))
else:
shape = tuple(data.shape[i] for i in range(ndim(expr._child))
if i not in axis)
value = np.prod([data.shape[i] for i in axis])
result = np.empty(shape)
result.fill(value)
result = result.astype('int64')
return result
# Note the use of 'week': 'M8[D]' here.
# We truncate week offsets "manually" in the compute_up implementation by first
# converting to days then multiplying our measure by 7 this simplifies our code
# by only requiring us to calculate the week offset relative to the day of week.
precision_map = {'year': 'M8[Y]',
'month': 'M8[M]',
'week': 'M8[D]',
'day': 'M8[D]',
'hour': 'M8[h]',
'minute': 'M8[m]',
'second': 'M8[s]',
'millisecond': 'M8[ms]',
'microsecond': 'M8[us]',
'nanosecond': 'M8[ns]'}
# these offsets are integers in units of their representation
epoch = datetime.datetime(1970, 1, 1)
offsets = {
'week': epoch.isoweekday(),
'day': epoch.toordinal() # number of days since *Python's* epoch (01/01/01)
}
@dispatch(DateTimeTruncate, (np.ndarray, np.datetime64))
def compute_up(expr, data, **kwargs):
np_dtype = precision_map[expr.unit]
offset = offsets.get(expr.unit, 0)
measure = expr.measure * 7 if expr.unit == 'week' else expr.measure
result = (((data.astype(np_dtype)
.view('int64')
+ offset)
// measure
* measure
- offset)
.astype(np_dtype))
return result
@dispatch(isnan, np.ndarray)
def compute_up(expr, data, **kwargs):
return np.isnan(data)
@dispatch(np.ndarray)
def chunks(x, chunksize=1024):
start = 0
n = len(x)
while start < n:
yield x[start:start + chunksize]
start += chunksize
@dispatch(Transpose, np.ndarray)
def compute_up(expr, x, **kwargs):
return np.transpose(x, axes=expr.axes)
@dispatch(TensorDot, np.ndarray, np.ndarray)
def compute_up(expr, lhs, rhs, **kwargs):
return np.tensordot(lhs, rhs, axes=[expr._left_axes, expr._right_axes])
@dispatch(IsIn, np.ndarray)
def compute_up(expr, data, **kwargs):
return np.in1d(data, tuple(expr._keys))
@compute_up.register(Join, DataFrame, np.ndarray)
@compute_up.register(Join, np.ndarray, DataFrame)
@compute_up.register(Join, np.ndarray, np.ndarray)
def join_ndarray(expr, lhs, rhs, **kwargs):
if isinstance(lhs, np.ndarray):
lhs = DataFrame(lhs)
if isinstance(rhs, np.ndarray):
rhs = DataFrame(rhs)
return compute_up(expr, lhs, rhs, **kwargs)
@dispatch(Coerce, np.ndarray)
def compute_up(expr, data, **kwargs):
return data.astype(to_numpy_dtype(expr.schema))
@dispatch(Concat, np.ndarray, np.ndarray)
def compute_up(expr, lhs, rhs, _concat=np.concatenate, **kwargs):
return _concat((lhs, rhs), axis=expr.axis)
| bsd-3-clause |
brettavedisian/phys202-project | Project/S7retromoviemaker.py | 1 | 1471 | import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import timeit
from scipy.integrate import odeint
from IPython.html.widgets import interact, fixed
from moviepy.video.io.bindings import mplfig_to_npimage
import moviepy.editor as mpy
from S7_icsandsolutions import *
gamma = 4.4983169634398597e4
tsteps = 1000
t = np.linspace(0,1.5,tsteps)
M = 10
S = 10
direct_r1, direct_r2, retro_r1, retro_r2, R1, R2, vR1, vR2 = S7_ode_solutions(t,tsteps,M,S,gamma)
fig_mpl_retro, ax_retro = plt.subplots(1,figsize=(5,5))
mr1=[retro_r1[k][0] for k in range(120)]
mr2=[retro_r2[k][0] for k in range(120)]
mR1=R1[0]
mR2=R2[0]
plt.sca(ax_retro)
plt.xlim(-75,75)
plt.ylim(-75,75)
plt.title('Retrograde Passage')
plt.tick_params(axis='x', labelbottom='off')
plt.tick_params(axis='y', labelleft='off')
scatr_retro = ax_retro.scatter(mr1,mr2,c='c',s=4,label='m')
scatR_retro = ax_retro.scatter(mR1,mR2,c='r',label='S')
scatM_retro = ax_retro.scatter(0,0,c='k',label='M')
def make_frame_mpl_S7retro(t):
newr1=[retro_r1[k][t*20] for k in range(120)]
newr2=[retro_r2[k][t*20] for k in range(120)]
newR1=R1[t*20]
newR2=R2[t*20]
# updates the data for each frame
# this creates Nx2 matrix
scatr_retro.set_offsets(np.transpose(np.vstack([newr1,newr2])))
scatR_retro.set_offsets(np.transpose(np.vstack([newR1,newR2])))
return mplfig_to_npimage(fig_mpl_retro)
S7retro_animation = mpy.VideoClip(make_frame_mpl_S7retro, duration=25) | mit |
CapOM/ChromiumGStreamerBackend | chrome/test/data/nacl/gdb_rsp.py | 42 | 2542 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is based on gdb_rsp.py file from NaCl repository.
import re
import socket
import time
def RspChecksum(data):
checksum = 0
for char in data:
checksum = (checksum + ord(char)) % 0x100
return checksum
class EofOnReplyException(Exception):
pass
class GdbRspConnection(object):
def __init__(self, addr):
self._socket = self._Connect(addr)
def _Connect(self, addr):
# We have to poll because we do not know when sel_ldr has
# successfully done bind() on the TCP port. This is inherently
# unreliable.
# TODO(mseaborn): Add a more reliable connection mechanism to
# sel_ldr's debug stub.
timeout_in_seconds = 10
poll_time_in_seconds = 0.1
for i in xrange(int(timeout_in_seconds / poll_time_in_seconds)):
# On Mac OS X, we have to create a new socket FD for each retry.
sock = socket.socket()
try:
sock.connect(addr)
except socket.error:
# Retry after a delay.
time.sleep(poll_time_in_seconds)
else:
return sock
raise Exception('Could not connect to sel_ldr\'s debug stub in %i seconds'
% timeout_in_seconds)
def _GetReply(self):
reply = ''
while True:
data = self._socket.recv(1024)
if len(data) == 0:
if reply == '+':
raise EofOnReplyException()
raise AssertionError('EOF on socket reached with '
'incomplete reply message: %r' % reply)
reply += data
if '#' in data:
break
match = re.match('\+\$([^#]*)#([0-9a-fA-F]{2})$', reply)
if match is None:
raise AssertionError('Unexpected reply message: %r' % reply)
reply_body = match.group(1)
checksum = match.group(2)
expected_checksum = '%02x' % RspChecksum(reply_body)
if checksum != expected_checksum:
raise AssertionError('Bad RSP checksum: %r != %r' %
(checksum, expected_checksum))
# Send acknowledgement.
self._socket.send('+')
return reply_body
# Send an rsp message, but don't wait for or expect a reply.
def RspSendOnly(self, data):
msg = '$%s#%02x' % (data, RspChecksum(data))
return self._socket.send(msg)
def RspRequest(self, data):
self.RspSendOnly(data)
return self._GetReply()
def RspInterrupt(self):
self._socket.send('\x03')
return self._GetReply()
| bsd-3-clause |
bOOm-X/spark | python/pyspark/sql/context.py | 11 | 23848 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import IntegerType, Row, StringType
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext", "UDFRegistration"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession.builder.getOrCreate()
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=None):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is not None, return
defaultValue. If the key is not set and defaultValue is None, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return UDFRegistration(self)
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@ignore_unicode_prefix
@since(1.2)
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a python function (including lambda function) as a UDF
so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
:param name: name of the UDF
:param f: python function
:param returnType: a :class:`pyspark.sql.types.DataType` object
:return: a wrapped :class:`UserDefinedFunction`
>>> strlen = sqlContext.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlContext.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> sqlContext.sql("SELECT 'foo' AS text").select(strlen("text")).collect()
[Row(stringLengthString(text)=u'3')]
>>> from pyspark.sql.types import IntegerType
>>> _ = sqlContext.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> _ = sqlContext.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
"""
return self.sparkSession.catalog.registerFunction(name, f, returnType)
@ignore_unicode_prefix
@since(2.1)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""Register a java UDF so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not specified we would infer it via reflection.
:param name: name of the UDF
:param javaClassName: fully qualified name of java class
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerJavaFunction("javaStringLength",
... "test.org.apache.spark.sql.JavaStringLength", IntegerType())
>>> sqlContext.sql("SELECT javaStringLength('test')").collect()
[Row(UDF(test)=4)]
>>> sqlContext.registerJavaFunction("javaStringLength2",
... "test.org.apache.spark.sql.JavaStringLength")
>>> sqlContext.sql("SELECT javaStringLength2('test')").collect()
[Row(UDF(test)=4)]
"""
jdt = None
if returnType is not None:
jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json())
self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table or view as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(database=u'', tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
if jhiveContext is None:
sparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
class UDFRegistration(object):
"""Wrapper for user-defined function registration."""
def __init__(self, sqlContext):
self.sqlContext = sqlContext
def register(self, name, f, returnType=StringType()):
return self.sqlContext.registerFunction(name, f, returnType)
register.__doc__ = SQLContext.registerFunction.__doc__
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
JPFrancoia/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 5 | 6805 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
iris = datasets.load_iris()
data, y = iris.data, iris.target
rng = np.random.RandomState(0)
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(data)
clf.set_params(penalty="l1")
clf.fit(X, y)
X_new = assert_warns(
DeprecationWarning, clf.transform, X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, y)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == y), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
for threshold in ["gobbledigook", ".5 * gobbledigook"]:
model = SelectFromModel(clf, threshold=threshold)
model.fit(data, y)
assert_raises(ValueError, model.transform, data)
def test_input_estimator_unchanged():
"""
Test that SelectFromModel fits on a clone of the estimator.
"""
est = RandomForestClassifier()
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
assert_true(transformer.estimator is est)
@skip_if_32bit
def test_feature_importances():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
est = RandomForestClassifier(n_estimators=50, random_state=0)
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
transformer = SelectFromModel(estimator=est, threshold=threshold)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'feature_importances_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
importances = transformer.estimator_.feature_importances_
feature_mask = np.abs(importances) > func(importances)
assert_array_almost_equal(X_new, X[:, feature_mask])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
est = RandomForestClassifier(n_estimators=50, random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(X, y, sample_weight=sample_weight)
importances = transformer.estimator_.feature_importances_
transformer.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = transformer.estimator_.feature_importances_
assert_almost_equal(importances, importances_bis)
# For the Lasso and related models, the threshold defaults to 1e-5
transformer = SelectFromModel(estimator=Lasso(alpha=0.1))
transformer.fit(X, y)
X_new = transformer.transform(X)
mask = np.abs(transformer.estimator_.coef_) > 1e-5
assert_array_equal(X_new, X[:, mask])
def test_partial_fit():
est = PassiveAggressiveClassifier(random_state=0, shuffle=False)
transformer = SelectFromModel(estimator=est)
transformer.partial_fit(data, y,
classes=np.unique(y))
old_model = transformer.estimator_
transformer.partial_fit(data, y,
classes=np.unique(y))
new_model = transformer.estimator_
assert_true(old_model is new_model)
X_transform = transformer.transform(data)
transformer.fit(np.vstack((data, data)), np.concatenate((y, y)))
assert_array_equal(X_transform, transformer.transform(data))
def test_warm_start():
est = PassiveAggressiveClassifier(warm_start=True, random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
old_model = transformer.estimator_
transformer.fit(data, y)
new_model = transformer.estimator_
assert_true(old_model is new_model)
def test_prefit():
"""
Test all possible combinations of the prefit parameter.
"""
# Passing a prefit parameter with the selected model
# and fitting a unfit model with prefit=False should give same results.
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf)
model.fit(data, y)
X_transform = model.transform(data)
clf.fit(data, y)
model = SelectFromModel(clf, prefit=True)
assert_array_equal(model.transform(data), X_transform)
# Check that the model is rewritten if prefit=False and a fitted model is
# passed
model = SelectFromModel(clf, prefit=False)
model.fit(data, y)
assert_array_equal(model.transform(data), X_transform)
# Check that prefit=True and calling fit raises a ValueError
model = SelectFromModel(clf, prefit=True)
assert_raises(ValueError, model.fit, data, y)
def test_threshold_string():
est = RandomForestClassifier(n_estimators=50, random_state=0)
model = SelectFromModel(est, threshold="0.5*mean")
model.fit(data, y)
X_transform = model.transform(data)
# Calculate the threshold from the estimator directly.
est.fit(data, y)
threshold = 0.5 * np.mean(est.feature_importances_)
mask = est.feature_importances_ > threshold
assert_array_equal(X_transform, data[:, mask])
def test_threshold_without_refitting():
"""Test that the threshold can be set without refitting the model."""
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf, threshold=0.1)
model.fit(data, y)
X_transform = model.transform(data)
# Set a higher threshold to filter out more features.
model.threshold = 1.0
assert_greater(X_transform.shape[1], model.transform(data).shape[1])
| bsd-3-clause |
slipguru/ignet | icing/inference.py | 2 | 11566 | """Compatibility module for sklearn-like ICING usage."""
from __future__ import print_function
import os
import logging
import numpy as np
import gzip
from functools import partial
from six.moves import cPickle as pkl
from sklearn.base import BaseEstimator
from sklearn.cluster import DBSCAN, MiniBatchKMeans
from sklearn.neighbors import BallTree
from icing.core.distances import distance_dataframe, StringDistance
from icing.similarity_ import compute_similarity_matrix
from icing.utils import extra
class DefineClones(BaseEstimator):
"""Clustering container for defining final clones."""
def __init__(
self, tag='debug', root=None, cluster='ap', igsimilarity=None,
threshold=0.05, compute_similarity=True, clustering=None):
"""Description of params."""
self.tag = tag
self.root = root
self.cluster = cluster
self.igsimilarity = igsimilarity
self.threshold = threshold
self.compute_similarity = compute_similarity
self.clustering = clustering
@property
def save_results(self):
return self.root is not None
def fit(self, records, db_name=None):
"""Run docstings."""
if self.save_results and not os.path.exists(self.root):
if self.root is None:
self.root = 'results_' + self.tag + extra.get_time()
os.makedirs(self.root)
logging.warn("No root folder supplied, folder %s "
"created", os.path.abspath(self.root))
if self.save_results:
output_filename = self.tag
output_folder = os.path.join(self.root, output_filename)
# Create exp folder into the root folder
os.makedirs(output_folder)
if self.compute_similarity:
similarity_matrix = compute_similarity_matrix(
records, sparse_mode=True,
igsimilarity=self.igsimilarity)
if self.save_results:
sm_filename = output_filename + '_similarity_matrix.pkl.tz'
try:
pkl.dump(similarity_matrix, gzip.open(
os.path.join(output_folder, sm_filename), 'w+'))
logging.info("Dumped similarity matrix: %s",
os.path.join(output_folder, sm_filename))
except OverflowError:
logging.error("Cannot dump similarity matrix")
logging.info("Start define_clusts function ...")
from icing.core.cluster import define_clusts
labels = define_clusts(
similarity_matrix, threshold=self.threshold,
method=self.cluster)
else:
# use a method which does not require an explicit similarity_matrix
# first, encode the IgRecords into strings
X_string = [x.features for x in records]
X_string = np.array(X_string, dtype=object)
logging.info("Start clonal inference ...")
from icing.core.distances import is_distance
if not is_distance(self.igsimilarity):
raise ValueError("If not computing similarity matrix, "
"you need to use a distance metric. "
"See icing.core.distances")
if self.clustering is None:
raise ValueError("If not computing similarity matrix, "
"you need to pass a clustering method")
self.clustering.metric = partial(self.clustering.metric, X_string)
# Fit on a index array
# see https://github.com/scikit-learn/scikit-learn/issues/3737
# labels = self.clustering.fit_predict(X_string)
labels = self.clustering.fit_predict(
np.arange(X_string.shape[0]).reshape(-1, 1))
# Number of clusters in labels, ignoring noise if present.
n_clones = len(set(labels)) - (1 if -1 in labels else 0)
if self.cluster.lower() == 'ap':
# log only number of clones
logging.critical("Number of clones: %i", n_clones)
else:
logging.critical(
"Number of clones: %i, threshold %.3f", n_clones,
self.threshold)
if self.save_results:
with open(os.path.join(output_folder, 'summary.txt'), 'w') as f:
f.write("filename: %s\n" % db_name)
f.write("clones: %i\n" % n_clones)
cl_filename = output_filename + '_labels.pkl.tz'
pkl.dump([labels, self.threshold], gzip.open(
os.path.join(output_folder, cl_filename), 'w+'))
logging.info("Dumped labels and threshold: %s",
os.path.join(output_folder, cl_filename))
self.output_folder_ = output_folder
clone_dict = {k.id: v for k, v in zip(records, labels)}
self.clone_dict_ = clone_dict
return self
class ICINGTwoStep(BaseEstimator):
def __init__(self, eps=0.5, model='aa', kmeans_params=None,
dbscan_params=None, method='dbscan', hdbscan_params=None,
dbspark_params=None, verbose=False):
self.eps = eps
self.model = 'aa_' if model == 'aa' else ''
self.dbscan_params = dbscan_params or {}
self.kmeans_params = kmeans_params or dict(n_init=100, n_clusters=100)
self.method = method
self.hdbscan_params = hdbscan_params or {}
self.dbspark_params = dbspark_params or {}
self.verbose = verbose
def fit(self, X, y=None, sample_weight=None):
"""X is a dataframe."""
if self.method not in ("dbscan", "hdbscan", "spark"):
raise ValueError("Unsupported method '%s'" % self.method)
if not self.dbscan_params:
self.dbscan_params = dict(
min_samples=20, n_jobs=-1, algorithm='brute',
metric=partial(distance_dataframe, X, **dict(
junction_dist=StringDistance(),
correct=False, tol=0)))
if not self.hdbscan_params and self.method == 'hdbscan':
self.hdbscan_params = dict(
min_samples=20, n_jobs=-1,
metric=partial(distance_dataframe, X, **dict(
junction_dist=StringDistance(),
correct=False, tol=0)))
self.dbscan_params['eps'] = self.eps
# new part: group by junction and v genes
if self.method == 'hdbscan' and False:
# no grouping; unsupported sample_weight
groups_values = [[x] for x in np.arange(X.shape[0])]
else:
# list of lists
groups_values = X.groupby(
["v_gene_set_str", self.model + "junc"]).groups.values()
idxs = np.array([elem[0] for elem in groups_values]) # take one of them
sample_weight = np.array([len(elem) for elem in groups_values])
X_all = idxs.reshape(-1, 1)
if self.kmeans_params.get('n_clusters', True):
# ensure the number of clusters is higher than points
self.kmeans_params['n_clusters'] = min(
self.kmeans_params['n_clusters'], X_all.shape[0])
kmeans = MiniBatchKMeans(**self.kmeans_params)
lengths = X[self.model + 'junction_length'].values
kmeans.fit(lengths[idxs].reshape(-1, 1))
dbscan_labels = np.zeros_like(kmeans.labels_).ravel()
if self.method == 'hdbscan':
from hdbscan import HDBSCAN
from hdbscan.prediction import all_points_membership_vectors
dbscan_sk = HDBSCAN(**self.hdbscan_params)
else:
dbscan_sk = DBSCAN(**self.dbscan_params)
if self.method == 'spark':
from pyspark import SparkContext
from icing.externals.pypardis import dbscan as dbpard
sc = SparkContext.getOrCreate()
sample_weight_map = dict(zip(idxs, sample_weight))
# self.dbscan_params.pop('n_jobs', None)
dbscan = dbpard.DBSCAN(
dbscan_params=self.dbscan_params,
**self.dbspark_params)
# else:
for i, label in enumerate(np.unique(kmeans.labels_)):
idx_row = np.where(kmeans.labels_ == label)[0]
if self.verbose:
print("Iteration %d/%d" % (i, np.unique(kmeans.labels_).size),
"(%d seqs)" % idx_row.size, end='\r')
X_idx = idxs[idx_row].reshape(-1, 1).astype('float64')
weights = sample_weight[idx_row]
if idx_row.size == 1:
db_labels = np.array([0])
elif self.method == 'spark' and idx_row.size > 5000:
test_data = sc.parallelize(enumerate(X_idx))
dbscan.train(test_data, sample_weight=sample_weight_map)
db_labels = np.array(dbscan.assignments())[:, 1]
elif self.method == 'hdbscan':
db_labels = dbscan_sk.fit_predict(X_idx) # unsupported weights
# avoid noise samples
soft_clusters = all_points_membership_vectors(dbscan_sk)
db_labels = np.array([np.argmax(x) for x in soft_clusters])
else:
db_labels = dbscan_sk.fit_predict(
X_idx, sample_weight=weights)
if len(dbscan_sk.core_sample_indices_) < 1:
db_labels[:] = 0
if -1 in db_labels:
balltree = BallTree(
X_idx[dbscan_sk.core_sample_indices_],
metric=dbscan_sk.metric)
noise_labels = balltree.query(
X_idx[db_labels == -1], k=1, return_distance=False).ravel()
# get labels for core points, then assign to noise points based
# on balltree
dbscan_noise_labels = db_labels[
dbscan_sk.core_sample_indices_][noise_labels]
db_labels[db_labels == -1] = dbscan_noise_labels
# hopefully, there are no noisy samples at this time
db_labels[db_labels > -1] = db_labels[db_labels > -1] + np.max(dbscan_labels) + 1
dbscan_labels[idx_row] = db_labels # + np.max(dbscan_labels) + 1
if self.method == 'spark':
sc.stop()
labels = dbscan_labels
# new part: put together the labels
labels_ext = np.zeros(X.shape[0], dtype=int)
labels_ext[idxs] = labels
for i, list_ in enumerate(groups_values):
labels_ext[list_] = labels[i]
self.labels_ = labels_ext
def fit_predict(self, X, y=None, sample_weight=None):
"""Perform clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-2-clause |
upliftaero/MissionPlanner | Lib/site-packages/numpy/lib/npyio.py | 53 | 59490 | __all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
import numpy as np
import format
import sys
import os
import sys
import itertools
import warnings
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
if sys.platform != 'cli':
from _compiled_base import packbits, unpackbits
else:
def packbits(*args, **kw):
raise NotImplementedError()
def unpackbits(*args, **kw):
raise NotImplementedError()
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype, _bytes_to_name
from numpy.compat import asbytes, asstr, asbytes_nested, bytes
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError, "Illegal argument"
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
g = GzipFile(fileobj=f.fileobj)
g.name = f.name
g.mode = f.mode
f = g
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
self._obj = obj
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError, key
def zipfile_factory(*args, **kwargs):
import zipfile
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = BytesIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError, "%s is not a file in the archive" % key
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load a pickled, ``.npy``, or ``.npz`` binary file.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap`). The mode has no effect for pickled or
zipped files.
A memory-mapped array is stored on disk, and not directly loaded
into memory. However, it can be accessed and sliced like any
ndarray. Memory mapping is especially useful for accessing
small fragments of large files without reading the entire file
into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever is stored in the
pickle is returned.
- If the file is a ``.npy`` file, then an array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
own_fid = True
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
own_fid = False
return NpzFile(fid, own_fid=True)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError, \
"Failed to interpret file %s as a pickle" % repr(file)
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
*args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
**kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with *args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with **kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
See Also
--------
numpy.savez_compressed : Save several arrays into a compressed .npz file format
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : string
File name of .npz file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed .npz file format
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError, "Cannot use un-named variables and keyword %s" % key
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zip = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is ``.gz`` or
``.bz2``, the file is first decompressed.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a record
data-type, the resulting array will be 1-dimensional, and each row
will be interpreted as an element of the array. In this case, the
number of columns used must match the number of fields in the
data-type.
comments : str, optional
The character used to indicate the start of a comment; default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data:
``converters = {3: lambda s: float(s or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. The default is False.
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
if delimiter is not None:
delimiter = asbytes(delimiter)
user_converters = converters
if usecols is not None:
usecols = list(usecols)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
fh = seek_gzip_factory(fname)
elif fname.endswith('.bz2'):
import bz2
fh = bz2.BZ2File(fname)
else:
fh = open(fname, 'U')
elif hasattr(fname, 'readline'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
return [dt.base] * int(np.prod(dt.shape))
else:
types = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt = flatten_dtype(tp)
types.extend(flat_dt)
return types
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip()
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.readline()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
while not first_vals:
first_line = fh.readline()
if not first_line: # EOF reached
raise IOError('End-of-file reached before encountering data.')
first_vals = split_line(first_line)
N = len(usecols or first_vals)
dtype_types = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
X.append(tuple([conv(val) for (conv, val) in zip(converters, vals)]))
finally:
if own_fh:
fh.close()
if len(dtype_types) > 1:
# We're dealing with a structured array, with a dtype such as
# [('x', int), ('y', [('s', int), ('t', float)])]
#
# First, create the array using a flattened dtype:
# [('x', int), ('s', int), ('t', float)]
#
# Then, view the array using the specified dtype.
try:
X = np.array(X, dtype=np.dtype([('', t) for t in dtype_types]))
X = X.view(dtype)
except TypeError:
# In the case we have an object dtype
X = np.array(X, dtype=dtype)
else:
X = np.array(X, dtype)
X = np.squeeze(X)
if unpack:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n'):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
newline : str
.. versionadded:: 1.5.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
finally:
if own_fh:
fh.close()
import re
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
fh.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skiprows` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is `.gz` or
`.bz2`, the file is first decompressed.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable or None, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable or None, optional
The set of strings corresponding to missing data.
filling_values : variable or None, optional
The set of values to be used as default when the data are missing.
usecols : sequence or None, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skiprows` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables names.
By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
if isinstance(fname, basestring):
fhd = np.lib._datasource.open(fname, 'U')
own_fhd = True
elif not hasattr(fname, 'read'):
raise TypeError("The input should be a string or a filehandle. "\
"(got %s instead)" % type(fname))
else:
fhd = fname
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn("The use of `skiprows` is deprecated.\n"\
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.readline()
# Keep on until we find the first valid values
first_values = None
while not first_values:
first_line = fhd.readline()
if not first_line:
raise IOError('End-of-file reached before encountering data.')
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = dtype.names
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn("The use of `missing` is deprecated.\n"\
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
else:
rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| gpl-3.0 |
herilalaina/scikit-learn | sklearn/cluster/dbscan_.py | 18 | 12859 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <[email protected]>
# Joel Nothman <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_array, check_consistent_length
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski', metric_params=None,
algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
For an example, see :ref:`examples/cluster/plot_dbscan.py
<sphx_glr_auto_examples_cluster_plot_dbscan.py>`.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = X.indices.astype(np.intp, copy=False)[X_mask]
masked_indptr = np.concatenate(([0], np.cumsum(X_mask)))[X.indptr[1:]]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params, p=p,
n_jobs=n_jobs)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
For an example, see :ref:`examples/cluster/plot_dbscan.py
<sphx_glr_auto_examples_cluster_plot_dbscan.py>`.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
metric_params=None, algorithm='auto', leaf_size=30, p=None,
n_jobs=1):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.metric_params = metric_params
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.n_jobs = n_jobs
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
y : Ignored
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight,
**self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
y : Ignored
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
mfjb/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
linebp/pandas | pandas/tests/scalar/test_interval.py | 7 | 3606 | from __future__ import division
import pytest
from pandas import Interval
import pandas.util.testing as tm
class TestInterval(object):
def setup_method(self, method):
self.interval = Interval(0, 1)
def test_properties(self):
assert self.interval.closed == 'right'
assert self.interval.left == 0
assert self.interval.right == 1
assert self.interval.mid == 0.5
def test_repr(self):
assert repr(self.interval) == "Interval(0, 1, closed='right')"
assert str(self.interval) == "(0, 1]"
interval_left = Interval(0, 1, closed='left')
assert repr(interval_left) == "Interval(0, 1, closed='left')"
assert str(interval_left) == "[0, 1)"
def test_contains(self):
assert 0.5 in self.interval
assert 1 in self.interval
assert 0 not in self.interval
pytest.raises(TypeError, lambda: self.interval in self.interval)
interval = Interval(0, 1, closed='both')
assert 0 in interval
assert 1 in interval
interval = Interval(0, 1, closed='neither')
assert 0 not in interval
assert 0.5 in interval
assert 1 not in interval
def test_equal(self):
assert Interval(0, 1) == Interval(0, 1, closed='right')
assert Interval(0, 1) != Interval(0, 1, closed='left')
assert Interval(0, 1) != 0
def test_comparison(self):
with tm.assert_raises_regex(TypeError, 'unorderable types'):
Interval(0, 1) < 2
assert Interval(0, 1) < Interval(1, 2)
assert Interval(0, 1) < Interval(0, 2)
assert Interval(0, 1) < Interval(0.5, 1.5)
assert Interval(0, 1) <= Interval(0, 1)
assert Interval(0, 1) > Interval(-1, 2)
assert Interval(0, 1) >= Interval(0, 1)
def test_hash(self):
# should not raise
hash(self.interval)
def test_math_add(self):
expected = Interval(1, 2)
actual = self.interval + 1
assert expected == actual
expected = Interval(1, 2)
actual = 1 + self.interval
assert expected == actual
actual = self.interval
actual += 1
assert expected == actual
with pytest.raises(TypeError):
self.interval + Interval(1, 2)
with pytest.raises(TypeError):
self.interval + 'foo'
def test_math_sub(self):
expected = Interval(-1, 0)
actual = self.interval - 1
assert expected == actual
actual = self.interval
actual -= 1
assert expected == actual
with pytest.raises(TypeError):
self.interval - Interval(1, 2)
with pytest.raises(TypeError):
self.interval - 'foo'
def test_math_mult(self):
expected = Interval(0, 2)
actual = self.interval * 2
assert expected == actual
expected = Interval(0, 2)
actual = 2 * self.interval
assert expected == actual
actual = self.interval
actual *= 2
assert expected == actual
with pytest.raises(TypeError):
self.interval * Interval(1, 2)
with pytest.raises(TypeError):
self.interval * 'foo'
def test_math_div(self):
expected = Interval(0, 0.5)
actual = self.interval / 2.0
assert expected == actual
actual = self.interval
actual /= 2.0
assert expected == actual
with pytest.raises(TypeError):
self.interval / Interval(1, 2)
with pytest.raises(TypeError):
self.interval / 'foo'
| bsd-3-clause |
sys-bio/tellurium | spyder_mod/Spyder 3.3.0/spyder/app/mainwindow.py | 2 | 133996 | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific PYthon Development EnviRonment
=====================================================
Developped and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
import atexit
import errno
import gc
import os
import os.path as osp
import re
import shutil
import signal
import socket
import subprocess
import sys
import threading
import traceback
#==============================================================================
# Keeping a reference to the original sys.exit before patching it
#==============================================================================
ORIGINAL_SYS_EXIT = sys.exit
#==============================================================================
# Check requirements
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Windows only: support for hiding console window when started with python.exe
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
is_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Workaround: importing rope.base.project here, otherwise this module can't
# be imported if Spyder was executed from another folder than spyder
#==============================================================================
try:
import rope.base.project # analysis:ignore
except ImportError:
pass
#==============================================================================
# Qt imports
#==============================================================================
from qtpy import API, PYQT5
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QByteArray, QCoreApplication, QPoint, QSize, Qt,
QThread, QTimer, QUrl, Signal, Slot)
from qtpy.QtGui import QColor, QDesktopServices, QIcon, QKeySequence, QPixmap
from qtpy.QtWidgets import (QAction, QApplication, QDockWidget, QMainWindow,
QMenu, QMessageBox, QShortcut, QSplashScreen,
QStyleFactory)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
# To catch font errors in QtAwesome
from qtawesome.iconic_font import FontError
#==============================================================================
# Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must
# be set before creating the application.
#==============================================================================
from spyder.config.main import CONF
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling, CONF.get('main', 'high_dpi_scaling'))
#==============================================================================
# Create our QApplication instance here because it's needed to render the
# splash screen created below
#==============================================================================
from spyder.utils.qthelpers import qapplication, MENU_SEPARATOR
from spyder.config.base import get_image_path
MAIN_APP = qapplication()
if PYQT5:
APP_ICON = QIcon(get_image_path("spyder.svg"))
else:
APP_ICON = QIcon(get_image_path("spyder.png"))
MAIN_APP.setWindowIcon(APP_ICON)
#==============================================================================
# Create splash screen out of MainWindow to reduce perceived startup time.
#==============================================================================
from spyder.config.base import _, get_image_path, DEV, running_under_pytest
if not running_under_pytest():
SPLASH = QSplashScreen(QPixmap(get_image_path('Tellurium_splash.png'), 'png'))
SPLASH_FONT = SPLASH.font()
SPLASH_FONT.setPixelSize(10)
SPLASH.setFont(SPLASH_FONT)
SPLASH.show()
SPLASH.showMessage(_("Initializing..."), Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.black))
QApplication.processEvents()
else:
SPLASH = None
#==============================================================================
# Local utility imports
#==============================================================================
from spyder import (__version__, __project_url__, __forum_url__,
__trouble_url__, __trouble_url_short__, get_versions)
from spyder.config.base import (get_conf_path, get_module_source_path, STDERR,
DEBUG, debug_print, MAC_APP_NAME, get_home_dir,
running_in_mac_app, get_module_path,
reset_config_files)
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.app.cli_options import get_options
from spyder import dependencies
from spyder.py3compat import (is_text_string, to_text_string,
PY3, qbytearray_to_str, configparser as cp)
from spyder.utils import encoding, programs
from spyder.utils import icon_manager as ima
from spyder.utils.introspection import module_completion
from spyder.utils.programs import is_module_installed
from spyder.utils.misc import select_port, getcwd_or_home, get_python_executable
from spyder.widgets.fileswitcher import FileSwitcher
#==============================================================================
# Local gui imports
#==============================================================================
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
try:
from spyder.utils.environ import WinUserEnvDialog
except ImportError:
WinUserEnvDialog = None # analysis:ignore
from spyder.utils.qthelpers import (create_action, add_actions, get_icon,
add_shortcut_to_tooltip,
create_module_bookmark_actions,
create_program_action, DialogManager,
create_python_script_action, file_uri)
from spyder.config.gui import get_shortcut
from spyder.otherplugins import get_spyderplugins_mods
from spyder.app import tour
#==============================================================================
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
#==============================================================================
CWD = getcwd_or_home()
#==============================================================================
# Spyder's main window widgets utilities
#==============================================================================
def get_python_doc_path():
"""
Return Python documentation path
(Windows: return the PythonXX.chm path if available)
"""
if os.name == 'nt':
doc_path = osp.join(sys.prefix, "Doc")
if not osp.isdir(doc_path):
return
python_chm = [path for path in os.listdir(doc_path)
if re.match(r"(?i)Python[0-9]{3,6}.chm", path)]
if python_chm:
return file_uri(osp.join(doc_path, python_chm[0]))
else:
vinf = sys.version_info
doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])
python_doc = osp.join(doc_path, "index.html")
if osp.isfile(python_doc):
return file_uri(python_doc)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = QMainWindow.AllowTabbedDocks|QMainWindow.AllowNestedDocks
CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
BOOKMARKS = (
('Python2', "https://docs.python.org/2/index.html",
_("Python2 documentation")),
('Python3', "https://docs.python.org/3/index.html",
_("Python3 documentation")),
('numpy', "http://docs.scipy.org/doc/",
_("Numpy and Scipy documentation")),
('matplotlib', "http://matplotlib.sourceforge.net/contents.html",
_("Matplotlib documentation")),
('PyQt5',
"http://pyqt.sourceforge.net/Docs/PyQt5/",
_("PyQt5 Reference Guide")),
('PyQt5',
"http://pyqt.sourceforge.net/Docs/PyQt5/class_reference.html",
_("PyQt5 API Reference")),
('winpython', "https://winpython.github.io/",
_("WinPython"))
)
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
all_actions_defined = Signal()
sig_pythonpath_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # related to interactive tour
sig_moved = Signal("QMoveEvent") # related to interactive tour
def __init__(self, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if PYQT5:
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.dialog_manager = DialogManager()
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
self.open_project = options.open_project
self.window_title = options.window_title
self.debug_print("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Create our TEMPDIR
if not osp.isdir(programs.TEMPDIR):
os.mkdir(programs.TEMPDIR)
# Shortcut management data
self.shortcut_data = []
# Loading Spyder path
self.path = []
self.not_active_path = []
self.project_path = []
if osp.isfile(self.SPYDER_PATH):
self.path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = [name for name in self.path if osp.isdir(name)]
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
self.not_active_path, _x = \
encoding.readlines(self.SPYDER_NOT_ACTIVE_PATH)
self.not_active_path = \
[name for name in self.not_active_path if osp.isdir(name)]
self.remove_path_from_sys_path()
self.add_path_to_sys_path()
# Plugins
self.console = None
self.workingdirectory = None
self.editor = None
self.explorer = None
self.help = None
self.onlinehelp = None
self.projects = None
self.outlineexplorer = None
self.historylog = None
self.extconsole = None
self.ipyconsole = None
self.variableexplorer = None
self.findinfiles = None
self.thirdparty_plugins = []
# Tour # TODO: Should I consider it a plugin?? or?
self.tour = None
self.tours_available = None
# File switcher
self.fileswitcher = None
# Check for updates Thread and Worker, refereces needed to prevent
# segfaulting
self.check_updates_action = None
self.thread_updates = None
self.worker_updates = None
self.give_updates_feedback = True
# Preferences
from spyder.plugins.configdialog import (MainConfigPage,
ColorSchemeConfigPage)
from spyder.plugins.shortcuts import ShortcutsConfigPage
from spyder.plugins.runconfig import RunConfigPage
from spyder.plugins.maininterpreter import MainInterpreterConfigPage
self.general_prefs = [MainConfigPage, ShortcutsConfigPage,
ColorSchemeConfigPage, MainInterpreterConfigPage,
RunConfigPage]
self.prefs_index = None
self.prefs_dialog_size = None
# Quick Layouts and Dialogs
from spyder.plugins.layoutdialog import (LayoutSaveDialog,
LayoutSettingsDialog)
self.dialog_layout_save = LayoutSaveDialog
self.dialog_layout_settings = LayoutSettingsDialog
# Actions
self.lock_dockwidgets_action = None
self.show_toolbars_action = None
self.close_dockwidget_action = None
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
self.maximize_action = None
self.fullscreen_action = None
# Menu bars
self.file_menu = None
self.file_menu_actions = []
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
self.tools_menu = None
self.tools_menu_actions = []
self.external_tools_menu = None # We must keep a reference to this,
# otherwise the external tools menu is lost after leaving setup method
self.external_tools_menu_actions = []
self.view_menu = None
self.plugins_menu = None
self.plugins_menu_actions = []
self.toolbars_menu = None
self.help_menu = None
self.help_menu_actions = []
# Status bar widgets
self.mem_status = None
self.cpu_status = None
# Toolbars
self.visible_toolbars = []
self.toolbarslist = []
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.edit_toolbar = None
self.edit_toolbar_actions = []
self.search_toolbar = None
self.search_toolbar_actions = []
self.source_toolbar = None
self.source_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.layout_toolbar = None
self.layout_toolbar_actions = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
# Set window title
self.set_window_title()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
debug_print("appusermodelid: " + str(res))
# Setting QTimer if running in travis
test_travis = os.environ.get('TEST_CI_APP', None)
if test_travis is not None:
global MAIN_APP
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(MAIN_APP.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = SPLASH
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.dockwidgets_locked = CONF.get('main', 'panes_locked')
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
self.state_before_maximizing = None
self.current_quick_layout = None
self.previous_layout_settings = None # TODO: related to quick layouts
self.last_plugin = None
self.fullscreen_flag = None # isFullscreen does not work as expected
# The following flag remember the maximized state even when
# the window is in fullscreen mode:
self.maximized_flag = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See issue 4132
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError as e:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: #555555\'><b>netsh winsock reset"
"</b></span><br>"))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
self.apply_settings()
self.debug_print("End of MainWindow constructor")
def debug_print(self, message):
"""Debug prints"""
debug_print(message)
#---- Window setup
def create_toolbar(self, title, object_name, iconsize=24):
"""Create and return toolbar with *title* and *object_name*"""
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setIconSize(QSize(iconsize, iconsize))
self.toolbarslist.append(toolbar)
return toolbar
def setup(self):
"""Setup main window"""
self.debug_print("*** Start of MainWindow setup ***")
self.debug_print(" ..core actions")
self.close_dockwidget_action = create_action(self,
icon=ima.icon('DialogCloseButton'),
text=_("Close current pane"),
triggered=self.close_current_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.close_dockwidget_action, "_",
"Close pane")
self.lock_dockwidgets_action = create_action(self, _("Lock panes"),
toggled=self.toggle_lock_dockwidgets,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.lock_dockwidgets_action, "_",
"Lock unlock panes")
# custom layouts shortcuts
self.toggle_next_layout_action = create_action(self,
_("Use next layout"),
triggered=self.toggle_next_layout,
context=Qt.ApplicationShortcut)
self.toggle_previous_layout_action = create_action(self,
_("Use previous layout"),
triggered=self.toggle_previous_layout,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.toggle_next_layout_action, "_",
"Use next layout")
self.register_shortcut(self.toggle_previous_layout_action, "_",
"Use previous layout")
# File switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_fileswitcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_sc_to_tip=True)
self.file_toolbar_actions = [self.file_switcher_action,
self.symbol_finder_action]
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action]
namespace = None
self.debug_print(" ..toolbars")
# File menu/toolbar
self.file_menu = self.menuBar().addMenu(_("&File"))
self.file_toolbar = self.create_toolbar(_("File toolbar"),
"file_toolbar")
# Edit menu/toolbar
self.edit_menu = self.menuBar().addMenu(_("&Edit"))
self.edit_toolbar = self.create_toolbar(_("Edit toolbar"),
"edit_toolbar")
# Search menu/toolbar
self.search_menu = self.menuBar().addMenu(_("&Search"))
self.search_toolbar = self.create_toolbar(_("Search toolbar"),
"search_toolbar")
# Source menu/toolbar
self.source_menu = self.menuBar().addMenu(_("Sour&ce"))
self.source_toolbar = self.create_toolbar(_("Source toolbar"),
"source_toolbar")
# Run menu/toolbar
self.run_menu = self.menuBar().addMenu(_("&Run"))
self.run_toolbar = self.create_toolbar(_("Run toolbar"),
"run_toolbar")
# Debug menu/toolbar
self.debug_menu = self.menuBar().addMenu(_("&Debug"))
self.debug_toolbar = self.create_toolbar(_("Debug toolbar"),
"debug_toolbar")
# Consoles menu/toolbar
self.consoles_menu = self.menuBar().addMenu(_("C&onsoles"))
# Projects menu
self.projects_menu = self.menuBar().addMenu(_("&Projects"))
self.projects_menu.aboutToShow.connect(self.valid_project)
# Tools menu
self.tools_menu = self.menuBar().addMenu(_("&Tools"))
# View menu
self.view_menu = self.menuBar().addMenu(_("&View"))
# Help menu
self.help_menu = self.menuBar().addMenu(_("&Help"))
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
self.debug_print(" ..tools")
# Tools + External Tools
prefs_action = create_action(self, _("Pre&ferences"),
icon=ima.icon('configure'),
triggered=self.edit_preferences,
context=Qt.ApplicationShortcut)
self.register_shortcut(prefs_action, "_", "Preferences",
add_sc_to_tip=True)
spyder_path_action = create_action(self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.path_manager_callback,
tip=_("Python Path Manager"),
menurole=QAction.ApplicationSpecificRole)
update_modules_action = create_action(self,
_("Update module names list"),
triggered=lambda:
module_completion.reset(),
tip=_("Refresh list of module names "
"available in PYTHONPATH"))
reset_spyder_action = create_action(
self, _("Reset Spyder to factory defaults"),
triggered=self.reset_spyder)
self.tools_menu_actions = [prefs_action, spyder_path_action]
if WinUserEnvDialog is not None:
winenv_action = create_action(self,
_("Current user environment variables..."),
icon='win_env.png',
tip=_("Show and edit current user environment "
"variables in Windows registry "
"(i.e. for all sessions)"),
triggered=self.win_env)
self.tools_menu_actions.append(winenv_action)
self.tools_menu_actions += [reset_spyder_action, MENU_SEPARATOR,
update_modules_action]
# External Tools submenu
self.external_tools_menu = QMenu(_("External Tools"))
self.external_tools_menu_actions = []
# WinPython control panel
self.wp_action = create_action(self, _("WinPython control panel"),
icon=get_icon('winpython.svg'),
triggered=lambda:
programs.run_python_script('winpython', 'controlpanel'))
if os.name == 'nt' and is_module_installed('winpython'):
self.external_tools_menu_actions.append(self.wp_action)
# Qt-related tools
additact = []
for name in ("designer-qt4", "designer"):
qtdact = create_program_action(self, _("Qt Designer"),
name, 'qtdesigner.png')
if qtdact:
break
for name in ("linguist-qt4", "linguist"):
qtlact = create_program_action(self, _("Qt Linguist"),
"linguist", 'qtlinguist.png')
if qtlact:
break
args = ['-no-opengl'] if os.name == 'nt' else []
for act in (qtdact, qtlact):
if act:
additact.append(act)
if additact and is_module_installed('winpython'):
self.external_tools_menu_actions += [None] + additact
# Guidata and Sift
self.debug_print(" ..sift?")
gdgq_act = []
# Guidata and Guiqwt don't support PyQt5 yet and they fail
# with an AssertionError when imported using those bindings
# (see issue 2274)
try:
from guidata import configtools
from guidata import config # analysis:ignore
guidata_icon = configtools.get_icon('guidata.svg')
guidata_act = create_python_script_action(self,
_("guidata examples"), guidata_icon,
"guidata",
osp.join("tests", "__init__"))
gdgq_act += [guidata_act]
except:
pass
try:
from guidata import configtools
from guiqwt import config # analysis:ignore
guiqwt_icon = configtools.get_icon('guiqwt.svg')
guiqwt_act = create_python_script_action(self,
_("guiqwt examples"), guiqwt_icon, "guiqwt",
osp.join("tests", "__init__"))
if guiqwt_act:
gdgq_act += [guiqwt_act]
sift_icon = configtools.get_icon('sift.svg')
sift_act = create_python_script_action(self, _("Sift"),
sift_icon, "guiqwt", osp.join("tests", "sift"))
if sift_act:
gdgq_act += [sift_act]
except:
pass
if gdgq_act:
self.external_tools_menu_actions += [None] + gdgq_act
# ViTables
vitables_act = create_program_action(self, _("ViTables"),
"vitables", 'vitables.png')
if vitables_act:
self.external_tools_menu_actions += [None, vitables_act]
# Maximize current plugin
self.maximize_action = create_action(self, '',
triggered=self.maximize_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.maximize_action, "_", "Maximize pane")
self.__update_maximize_action()
# Fullscreen mode
self.fullscreen_action = create_action(self,
_("Fullscreen mode"),
triggered=self.toggle_fullscreen,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.fullscreen_action, "_",
"Fullscreen mode", add_sc_to_tip=True)
# Main toolbar
self.main_toolbar_actions = [self.maximize_action,
self.fullscreen_action,
None,
prefs_action, spyder_path_action]
self.main_toolbar = self.create_toolbar(_("Main toolbar"),
"main_toolbar")
# Internal console plugin
self.debug_print(" ..plugin: internal console")
from spyder.plugins.console import Console
self.console = Console(self, namespace, exitfunc=self.closing,
profile=self.profile,
multithreaded=self.multithreaded,
message=_("Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"))
self.console.register_plugin()
# Working directory plugin
self.debug_print(" ..plugin: working directory")
from spyder.plugins.workingdirectory import WorkingDirectory
self.workingdirectory = WorkingDirectory(self, self.init_workdir, main=self)
self.workingdirectory.register_plugin()
self.toolbarslist.append(self.workingdirectory)
# Help plugin
if CONF.get('help', 'enable'):
self.set_splash(_("Loading help..."))
from spyder.plugins.help import Help
self.help = Help(self)
self.help.register_plugin()
# Outline explorer widget
if CONF.get('outline_explorer', 'enable'):
self.set_splash(_("Loading outline explorer..."))
from spyder.plugins.outlineexplorer import OutlineExplorer
self.outlineexplorer = OutlineExplorer(self)
self.outlineexplorer.register_plugin()
# Editor plugin
self.set_splash(_("Loading editor..."))
from spyder.plugins.editor import Editor
self.editor = Editor(self)
self.editor.register_plugin()
# Populating file menu entries
quit_action = create_action(self, _("&Quit"),
icon=ima.icon('exit'),
tip=_("Quit"),
triggered=self.console.quit,
context=Qt.ApplicationShortcut)
self.register_shortcut(quit_action, "_", "Quit")
restart_action = create_action(self, _("&Restart"),
icon=ima.icon('restart'),
tip=_("Restart"),
triggered=self.restart,
context=Qt.ApplicationShortcut)
self.register_shortcut(restart_action, "_", "Restart")
self.file_menu_actions += [self.file_switcher_action,
self.symbol_finder_action, None,
restart_action, quit_action]
self.set_splash("")
self.debug_print(" ..widgets")
# Explorer
if CONF.get('explorer', 'enable'):
self.set_splash(_("Loading file explorer..."))
from spyder.plugins.explorer import Explorer
self.explorer = Explorer(self)
self.explorer.register_plugin()
# History log widget
if CONF.get('historylog', 'enable'):
self.set_splash(_("Loading history plugin..."))
from spyder.plugins.history import HistoryLog
self.historylog = HistoryLog(self)
self.historylog.register_plugin()
# Online help widget
try: # Qt >= v4.4
from spyder.plugins.onlinehelp import OnlineHelp
except ImportError: # Qt < v4.4
OnlineHelp = None # analysis:ignore
if CONF.get('onlinehelp', 'enable') and OnlineHelp is not None:
self.set_splash(_("Loading online help..."))
self.onlinehelp = OnlineHelp(self)
self.onlinehelp.register_plugin()
# Project explorer widget
self.set_splash(_("Loading project explorer..."))
from spyder.plugins.projects import Projects
self.projects = Projects(self)
self.projects.register_plugin()
self.project_path = self.projects.get_pythonpath(at_start=True)
# Find in files
if CONF.get('find_in_files', 'enable'):
from spyder.plugins.findinfiles import FindInFiles
self.findinfiles = FindInFiles(self)
self.findinfiles.register_plugin()
# Namespace browser
self.set_splash(_("Loading namespace browser..."))
from spyder.plugins.variableexplorer import VariableExplorer
self.variableexplorer = VariableExplorer(self)
self.variableexplorer.register_plugin()
# IPython console
self.set_splash(_("Loading IPython console..."))
from spyder.plugins.ipythonconsole import IPythonConsole
self.ipyconsole = IPythonConsole(self)
self.ipyconsole.register_plugin()
self.set_splash(_("Setting up main window..."))
# Help menu
trouble_action = create_action(self,
_("Troubleshooting..."),
triggered=self.trouble_guide)
dep_action = create_action(self, _("Dependencies..."),
triggered=self.show_dependencies,
icon=ima.icon('advanced'))
report_action = create_action(self,
_("Report issue..."),
icon=ima.icon('bug'),
triggered=self.report_issue)
support_action = create_action(self,
_("Spyder support..."),
triggered=self.google_group)
self.check_updates_action = create_action(self,
_("Check for updates..."),
triggered=self.check_updates)
# Spyder documentation
spyder_doc = 'https://docs.spyder-ide.org/'
doc_action = create_action(self, _("Spyder documentation"),
icon=ima.icon('DialogHelpButton'),
triggered=lambda:
programs.start_file(spyder_doc))
self.register_shortcut(doc_action, "_",
"spyder documentation")
if self.help is not None:
tut_action = create_action(self, _("Spyder tutorial"),
triggered=self.help.show_tutorial)
else:
tut_action = None
shortcuts_action = create_action(self, _("Shortcuts Summary"),
shortcut="Meta+F1",
triggered=self.show_shortcuts_dialog)
#----- Tours
self.tour = tour.AnimatedTour(self)
self.tours_menu = QMenu(_("Interactive tours"))
self.tour_menu_actions = []
# TODO: Only show intro tour for now. When we are close to finish
# 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(0)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
def trigger(i=i, self=self): # closure needed!
return lambda: self.show_tour(i)
temp_action = create_action(self, tour_name, tip="",
triggered=trigger())
self.tour_menu_actions += [temp_action]
self.tours_menu.addActions(self.tour_menu_actions)
self.help_menu_actions = [doc_action, tut_action, shortcuts_action,
self.tours_menu,
MENU_SEPARATOR, trouble_action,
report_action, dep_action,
self.check_updates_action, support_action,
MENU_SEPARATOR]
# Python documentation
if get_python_doc_path() is not None:
pydoc_act = create_action(self, _("Python documentation"),
triggered=lambda:
programs.start_file(get_python_doc_path()))
self.help_menu_actions.append(pydoc_act)
# IPython documentation
if self.help is not None:
ipython_menu = QMenu(_("IPython documentation"), self)
intro_action = create_action(self, _("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(self, _("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(self, _("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(ipython_menu, (intro_action, guiref_action,
quickref_action))
self.help_menu_actions.append(ipython_menu)
# Windows-only: documentation located in sys.prefix/Doc
ipm_actions = []
def add_ipm_action(text, path):
"""Add installed Python module doc action to help submenu"""
# QAction.triggered works differently for PySide and PyQt
path = file_uri(path)
if not API == 'pyside':
slot=lambda _checked, path=path: programs.start_file(path)
else:
slot=lambda path=path: programs.start_file(path)
action = create_action(self, text,
icon='%s.png' % osp.splitext(path)[1][1:],
triggered=slot)
ipm_actions.append(action)
sysdocpth = osp.join(sys.prefix, 'Doc')
if osp.isdir(sysdocpth): # exists on Windows, except frozen dist.
for docfn in os.listdir(sysdocpth):
pt = r'([a-zA-Z\_]*)(doc)?(-dev)?(-ref)?(-user)?.(chm|pdf)'
match = re.match(pt, docfn)
if match is not None:
pname = match.groups()[0]
if pname not in ('Python', ):
add_ipm_action(pname, osp.join(sysdocpth, docfn))
# Installed Python modules submenu (Windows only)
if ipm_actions:
pymods_menu = QMenu(_("Installed Python modules"), self)
add_actions(pymods_menu, ipm_actions)
self.help_menu_actions.append(pymods_menu)
# Online documentation
web_resources = QMenu(_("Online documentation"))
webres_actions = create_module_bookmark_actions(self,
self.BOOKMARKS)
webres_actions.insert(2, None)
webres_actions.insert(5, None)
webres_actions.insert(8, None)
add_actions(web_resources, webres_actions)
self.help_menu_actions.append(web_resources)
# Qt assistant link
if sys.platform.startswith('linux') and not PYQT5:
qta_exe = "assistant-qt4"
else:
qta_exe = "assistant"
qta_act = create_program_action(self, _("Qt documentation"),
qta_exe)
if qta_act:
self.help_menu_actions += [qta_act, None]
# About Spyder
about_action = create_action(self,
_("About %s...") % "Spyder",
icon=ima.icon('MessageBoxInformation'),
triggered=self.about)
self.help_menu_actions += [MENU_SEPARATOR, about_action]
# Status bar widgets
from spyder.widgets.status import MemoryStatus, CPUStatus
self.mem_status = MemoryStatus(self, status)
self.cpu_status = CPUStatus(self, status)
self.apply_statusbar_settings()
# Third-party plugins
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
try:
# Not all the plugins have the check_compatibility method
# i.e Breakpoints, Profiler, Pylint
check = plugin.check_compatibility()[0]
except AttributeError:
check = True
if check:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
#----- View
# View menu
self.plugins_menu = QMenu(_("Panes"), self)
self.toolbars_menu = QMenu(_("Toolbars"), self)
self.quick_layout_menu = QMenu(_("Window layouts"), self)
self.quick_layout_set_menu()
self.view_menu.addMenu(self.plugins_menu) # Panes
add_actions(self.view_menu, (self.lock_dockwidgets_action,
self.close_dockwidget_action,
self.maximize_action,
MENU_SEPARATOR))
self.show_toolbars_action = create_action(self,
_("Show toolbars"),
triggered=self.show_toolbars,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.show_toolbars_action, "_",
"Show toolbars")
self.view_menu.addMenu(self.toolbars_menu)
self.view_menu.addAction(self.show_toolbars_action)
add_actions(self.view_menu, (MENU_SEPARATOR,
self.quick_layout_menu,
self.toggle_previous_layout_action,
self.toggle_next_layout_action,
MENU_SEPARATOR,
self.fullscreen_action))
if set_attached_console_visible is not None:
cmd_act = create_action(self,
_("Attached console window (debugging)"),
toggled=set_attached_console_visible)
cmd_act.setChecked(is_attached_console_visible())
add_actions(self.view_menu, (MENU_SEPARATOR, cmd_act))
# Adding external tools action to "Tools" menu
if self.external_tools_menu_actions:
external_tools_act = create_action(self, _("External Tools"))
external_tools_act.setMenu(self.external_tools_menu)
self.tools_menu_actions += [None, external_tools_act]
# Filling out menu/toolbar entries:
add_actions(self.file_menu, self.file_menu_actions)
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
add_actions(self.tools_menu, self.tools_menu_actions)
add_actions(self.external_tools_menu,
self.external_tools_menu_actions)
add_actions(self.help_menu, self.help_menu_actions)
add_actions(self.main_toolbar, self.main_toolbar_actions)
add_actions(self.file_toolbar, self.file_toolbar_actions)
add_actions(self.edit_toolbar, self.edit_toolbar_actions)
add_actions(self.search_toolbar, self.search_toolbar_actions)
add_actions(self.source_toolbar, self.source_toolbar_actions)
add_actions(self.debug_toolbar, self.debug_toolbar_actions)
add_actions(self.run_toolbar, self.run_toolbar_actions)
# Apply all defined shortcuts (plugins + 3rd-party plugins)
self.apply_shortcuts()
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
# Window set-up
self.debug_print("Setting up window...")
self.setup_layout(default=False)
# Show and hide shortcuts in menus for Mac.
# This is a workaround because we can't disable shortcuts
# by setting context=Qt.WidgetShortcut there
if sys.platform == 'darwin':
for name in ['file', 'edit', 'search', 'source', 'run', 'debug',
'projects', 'tools', 'plugins']:
menu_object = getattr(self, name + '_menu')
menu_object.aboutToShow.connect(
lambda name=name: self.show_shortcuts(name))
menu_object.aboutToHide.connect(
lambda name=name: self.hide_shortcuts(name))
if self.splash is not None:
self.splash.hide()
# Enabling tear off for all menus except help menu
if CONF.get('main', 'tear_off_menus'):
for child in self.menuBar().children():
if isinstance(child, QMenu) and child != self.help_menu:
child.setTearOffEnabled(True)
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
except TypeError:
pass
self.debug_print("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
self.restore_scrollbar_position.emit()
# Remove our temporary dir
atexit.register(self.remove_tmpdir)
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# In MacOS X 10.7 our app is not displayed after initialized (I don't
# know why because this doesn't happen when started from the terminal),
# so we need to resort to this hack to make it appear.
if running_in_mac_app():
idx = __file__.index(MAC_APP_NAME)
app_path = __file__[:idx]
subprocess.call(['open', app_path + MAC_APP_NAME])
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emmited by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Create Plugins and toolbars submenus
self.create_plugins_menu()
self.create_toolbars_menu()
# Update toolbar visibility status
self.toolbars_visible = CONF.get('main', 'toolbars_visible')
self.load_last_visible_toolbars()
# Update lock status of dockidgets (panes)
self.lock_dockwidgets_action.setChecked(self.dockwidgets_locked)
self.apply_panes_settings()
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
if not self.ipyconsole.isvisible:
self.historylog.add_history(get_conf_path('history.py'))
if self.open_project:
self.projects.open_project(self.open_project)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files()
# Check for spyder updates
if DEV is None and CONF.get('main', 'check_updates_on_startup'):
self.give_updates_feedback = False
self.check_updates(startup=True)
# Show dialog with missing dependencies
self.report_missing_dependencies()
self.is_setting_up = False
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if DEBUG:
title += u" [DEBUG MODE %d]" % DEBUG
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
def report_missing_dependencies(self):
"""Show a QMessageBox with a list of missing hard dependencies"""
missing_deps = dependencies.missing_dependencies()
if missing_deps:
QMessageBox.critical(self, _('Error'),
_("<b>You have missing dependencies!</b>"
"<br><br><tt>%s</tt><br><br>"
"<b>Please install them to avoid this message.</b>"
"<br><br>"
"<i>Note</i>: Spyder could work without some of these "
"dependencies, however to have a smooth experience when "
"using Spyder we <i>strongly</i> recommend you to install "
"all the listed missing dependencies.<br><br>"
"Failing to install these dependencies might result in bugs. "
"Please be sure that any found bugs are not the direct "
"result of missing dependencies, prior to reporting a new "
"issue."
) % missing_deps, QMessageBox.Ok)
def load_window_settings(self, prefix, default=False, section='main'):
"""Load window layout settings from userconfig-based configuration
with *prefix*, under *section*
default: if True, do not restore inner layout"""
get_func = CONF.get_default if default else CONF.get
window_size = get_func(section, prefix+'size')
prefs_dialog_size = get_func(section, prefix+'prefs_dialog_size')
if default:
hexstate = None
else:
hexstate = get_func(section, prefix+'state', None)
pos = get_func(section, prefix+'position')
# It's necessary to verify if the window/position value is valid
# with the current screen. See issue 3748
width = pos[0]
height = pos[1]
screen_shape = QApplication.desktop().geometry()
current_width = screen_shape.width()
current_height = screen_shape.height()
if current_width < width or current_height < height:
pos = CONF.get_default(section, prefix+'position')
is_maximized = get_func(section, prefix+'is_maximized')
is_fullscreen = get_func(section, prefix+'is_fullscreen')
return hexstate, window_size, prefs_dialog_size, pos, is_maximized, \
is_fullscreen
def get_window_settings(self):
"""Return current window settings
Symetric to the 'set_window_settings' setter"""
window_size = (self.window_size.width(), self.window_size.height())
is_fullscreen = self.isFullScreen()
if is_fullscreen:
is_maximized = self.maximized_flag
else:
is_maximized = self.isMaximized()
pos = (self.window_position.x(), self.window_position.y())
prefs_dialog_size = (self.prefs_dialog_size.width(),
self.prefs_dialog_size.height())
hexstate = qbytearray_to_str(self.saveState())
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""Set window settings
Symetric to the 'get_window_settings' accessor"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
# Window layout
if hexstate:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True)
def save_current_window_settings(self, prefix, section='main',
none_state=False):
"""Save current window settings with *prefix* in
the userconfig-based configuration, under *section*"""
win_size = self.window_size
prefs_size = self.prefs_dialog_size
CONF.set(section, prefix+'size', (win_size.width(), win_size.height()))
CONF.set(section, prefix+'prefs_dialog_size',
(prefs_size.width(), prefs_size.height()))
CONF.set(section, prefix+'is_maximized', self.isMaximized())
CONF.set(section, prefix+'is_fullscreen', self.isFullScreen())
pos = self.window_position
CONF.set(section, prefix+'position', (pos.x(), pos.y()))
self.maximize_dockwidget(restore=True)# Restore non-maximized layout
if none_state:
CONF.set(section, prefix + 'state', None)
else:
qba = self.saveState()
CONF.set(section, prefix + 'state', qbytearray_to_str(qba))
CONF.set(section, prefix+'statusbar',
not self.statusBar().isHidden())
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets"""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
# --- Layouts
def setup_layout(self, default=False):
"""Setup window layout"""
prefix = 'window' + '/'
settings = self.load_window_settings(prefix, default)
hexstate = settings[0]
self.first_spyder_run = False
if hexstate is None:
# First Spyder execution:
self.setWindowState(Qt.WindowMaximized)
self.first_spyder_run = True
self.setup_default_layouts('default', settings)
# Now that the initial setup is done, copy the window settings,
# except for the hexstate in the quick layouts sections for the
# default layouts.
# Order and name of the default layouts is found in config.py
section = 'quick_layouts'
get_func = CONF.get_default if default else CONF.get
order = get_func(section, 'order')
# restore the original defaults if reset layouts is called
if default:
CONF.set(section, 'active', order)
CONF.set(section, 'order', order)
CONF.set(section, 'names', order)
for index, name, in enumerate(order):
prefix = 'layout_{0}/'.format(index)
self.save_current_window_settings(prefix, section,
none_state=True)
# store the initial layout as the default in spyder
prefix = 'layout_default/'
section = 'quick_layouts'
self.save_current_window_settings(prefix, section, none_state=True)
self.current_quick_layout = 'default'
# Regenerate menu
self.quick_layout_set_menu()
self.set_window_settings(*settings)
for plugin in self.widgetlist:
try:
plugin.initialize_plugin_in_mainwindow_layout()
except Exception as error:
print("%s: %s" % (plugin, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
def setup_default_layouts(self, index, settings):
"""Setup default layouts when run for the first time"""
self.set_window_settings(*settings)
self.setUpdatesEnabled(False)
# IMPORTANT: order has to be the same as defined in the config file
MATLAB, RSTUDIO, VERTICAL, HORIZONTAL = range(self.DEFAULT_LAYOUTS)
# define widgets locally
editor = self.editor
console_ipy = self.ipyconsole
console_int = self.console
outline = self.outlineexplorer
explorer_project = self.projects
explorer_file = self.explorer
explorer_variable = self.variableexplorer
history = self.historylog
finder = self.findinfiles
help_plugin = self.help
helper = self.onlinehelp
plugins = self.thirdparty_plugins
global_hidden_widgets = [finder, console_int, explorer_project,
helper] + plugins
global_hidden_toolbars = [self.source_toolbar, self.edit_toolbar,
self.search_toolbar]
# Layout definition
# layouts are organized by columns, each colum is organized by rows
# widths have to add 1.0, height per column have to add 1.0
# Spyder Default Initial Layout
s_layout = {'widgets': [
# column 0
[[explorer_project]],
# column 1
[[editor]],
# column 2
[[outline]],
# column 3
[[help_plugin, explorer_variable, helper, explorer_file,
finder] + plugins,
[console_int, console_ipy, history]]
],
'width fraction': [0.0, # column 0 width
0.55, # column 1 width
0.0, # column 2 width
0.45], # column 3 width
'height fraction': [[1.0], # column 0, row heights
[1.0], # column 1, row heights
[1.0], # column 2, row heights
[0.46, 0.54]], # column 3, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
r_layout = {'widgets': [
# column 0
[[editor],
[console_ipy, console_int]],
# column 1
[[explorer_variable, history, outline, finder] + plugins,
[explorer_file, explorer_project, help_plugin, helper]]
],
'width fraction': [0.55, # column 0 width
0.45], # column 1 width
'height fraction': [[0.55, 0.45], # column 0, row heights
[0.55, 0.45]], # column 1, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
# Matlab
m_layout = {'widgets': [
# column 0
[[explorer_file, explorer_project],
[outline]],
# column 1
[[editor],
[console_ipy, console_int]],
# column 2
[[explorer_variable, finder] + plugins,
[history, help_plugin, helper]]
],
'width fraction': [0.20, # column 0 width
0.40, # column 1 width
0.40], # column 2 width
'height fraction': [[0.55, 0.45], # column 0, row heights
[0.55, 0.45], # column 1, row heights
[0.55, 0.45]], # column 2, row heights
'hidden widgets': [],
'hidden toolbars': [],
}
# Vertically split
v_layout = {'widgets': [
# column 0
[[editor],
[console_ipy, console_int, explorer_file,
explorer_project, help_plugin, explorer_variable,
history, outline, finder, helper] + plugins]
],
'width fraction': [1.0], # column 0 width
'height fraction': [[0.55, 0.45]], # column 0, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
# Horizontally split
h_layout = {'widgets': [
# column 0
[[editor]],
# column 1
[[console_ipy, console_int, explorer_file,
explorer_project, help_plugin, explorer_variable,
history, outline, finder, helper] + plugins]
],
'width fraction': [0.55, # column 0 width
0.45], # column 1 width
'height fraction': [[1.0], # column 0, row heights
[1.0]], # column 1, row heights
'hidden widgets': [outline],
'hidden toolbars': []
}
# Layout selection
layouts = {'default': s_layout,
RSTUDIO: r_layout,
MATLAB: m_layout,
VERTICAL: v_layout,
HORIZONTAL: h_layout}
layout = layouts[index]
widgets_layout = layout['widgets']
widgets = []
for column in widgets_layout :
for row in column:
for widget in row:
if widget is not None:
widgets.append(widget)
# Make every widget visible
for widget in widgets:
widget.toggle_view(True)
action = widget.toggle_view_action
try:
action.setChecked(widget.dockwidget.isVisible())
except:
pass
# Set the widgets horizontally
for i in range(len(widgets) - 1):
first, second = widgets[i], widgets[i+1]
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
Qt.Horizontal)
# Arrange rows vertically
for column in widgets_layout :
for i in range(len(column) - 1):
first_row, second_row = column[i], column[i+1]
if first_row is not None and second_row is not None:
self.splitDockWidget(first_row[0].dockwidget,
second_row[0].dockwidget,
Qt.Vertical)
# Tabify
for column in widgets_layout :
for row in column:
for i in range(len(row) - 1):
first, second = row[i], row[i+1]
if first is not None and second is not None:
self.tabify_plugins(first, second)
# Raise front widget per row
row[0].dockwidget.show()
row[0].dockwidget.raise_()
# Hide toolbars
hidden_toolbars = global_hidden_toolbars + layout['hidden toolbars']
for toolbar in hidden_toolbars:
if toolbar is not None:
toolbar.close()
# Hide widgets
hidden_widgets = global_hidden_widgets + layout['hidden widgets']
for widget in hidden_widgets:
if widget is not None:
widget.dockwidget.close()
# set the width and height
self._layout_widget_info = []
width, height = self.window_size.width(), self.window_size.height()
# fix column width
# for c in range(len(widgets_layout)):
# widget = widgets_layout[c][0][0].dockwidget
# min_width, max_width = widget.minimumWidth(), widget.maximumWidth()
# info = {'widget': widget,
# 'min width': min_width,
# 'max width': max_width}
# self._layout_widget_info.append(info)
# new_width = int(layout['width fraction'][c] * width * 0.95)
# widget.setMinimumWidth(new_width)
# widget.setMaximumWidth(new_width)
# widget.updateGeometry()
# fix column height
for c, column in enumerate(widgets_layout):
for r in range(len(column) - 1):
widget = column[r][0]
dockwidget = widget.dockwidget
dock_min_h = dockwidget.minimumHeight()
dock_max_h = dockwidget.maximumHeight()
info = {'widget': widget,
'dock min height': dock_min_h,
'dock max height': dock_max_h}
self._layout_widget_info.append(info)
# The 0.95 factor is to adjust height based on usefull
# estimated area in the window
new_height = int(layout['height fraction'][c][r]*height*0.95)
dockwidget.setMinimumHeight(new_height)
dockwidget.setMaximumHeight(new_height)
self._custom_layout_timer = QTimer(self)
self._custom_layout_timer.timeout.connect(self.layout_fix_timer)
self._custom_layout_timer.setSingleShot(True)
self._custom_layout_timer.start(5000)
def layout_fix_timer(self):
"""Fixes the height of docks after a new layout is set."""
info = self._layout_widget_info
for i in info:
dockwidget = i['widget'].dockwidget
if 'dock min width' in i:
dockwidget.setMinimumWidth(i['dock min width'])
dockwidget.setMaximumWidth(i['dock max width'])
if 'dock min height' in i:
dockwidget.setMinimumHeight(i['dock min height'])
dockwidget.setMaximumHeight(i['dock max height'])
dockwidget.updateGeometry()
self.setUpdatesEnabled(True)
@Slot()
def toggle_previous_layout(self):
""" """
self.toggle_layout('previous')
@Slot()
def toggle_next_layout(self):
""" """
self.toggle_layout('next')
def toggle_layout(self, direction='next'):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
if len(active) == 0:
return
layout_index = ['default']
for name in order:
if name in active:
layout_index.append(names.index(name))
current_layout = self.current_quick_layout
dic = {'next': 1, 'previous': -1}
if current_layout is None:
# Start from default
current_layout = 'default'
if current_layout in layout_index:
current_index = layout_index.index(current_layout)
else:
current_index = 0
new_index = (current_index + dic[direction]) % len(layout_index)
self.quick_layout_switch(layout_index[new_index])
def quick_layout_set_menu(self):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
ql_actions = []
ql_actions = [create_action(self, _('Spyder Default Layout'),
triggered=lambda:
self.quick_layout_switch('default'))]
for name in order:
if name in active:
index = names.index(name)
# closure required so lambda works with the default parameter
def trigger(i=index, self=self):
return lambda: self.quick_layout_switch(i)
qli_act = create_action(self, name, triggered=trigger())
# closure above replaces the following which stopped working
# qli_act = create_action(self, name, triggered=lambda i=index:
# self.quick_layout_switch(i)
ql_actions += [qli_act]
self.ql_save = create_action(self, _("Save current layout"),
triggered=lambda:
self.quick_layout_save(),
context=Qt.ApplicationShortcut)
self.ql_preferences = create_action(self, _("Layout preferences"),
triggered=lambda:
self.quick_layout_settings(),
context=Qt.ApplicationShortcut)
self.ql_reset = create_action(self, _('Reset to spyder default'),
triggered=self.reset_window_layout)
self.register_shortcut(self.ql_save, "_", "Save current layout")
self.register_shortcut(self.ql_preferences, "_", "Layout preferences")
ql_actions += [None]
ql_actions += [self.ql_save, self.ql_preferences, self.ql_reset]
self.quick_layout_menu.clear()
add_actions(self.quick_layout_menu, ql_actions)
if len(order) == 0:
self.ql_preferences.setEnabled(False)
else:
self.ql_preferences.setEnabled(True)
@Slot()
def reset_window_layout(self):
"""Reset window layout to default"""
answer = QMessageBox.warning(self, _("Warning"),
_("Window layout will be reset to default settings: "
"this affects window position, size and dockwidgets.\n"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.setup_layout(default=True)
def quick_layout_save(self):
"""Save layout dialog"""
get = CONF.get
set_ = CONF.set
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
dlg = self.dialog_layout_save(self, names)
if dlg.exec_():
name = dlg.combo_box.currentText()
if name in names:
answer = QMessageBox.warning(self, _("Warning"),
_("Layout <b>%s</b> will be \
overwritten. Do you want to \
continue?") % name,
QMessageBox.Yes | QMessageBox.No)
index = order.index(name)
else:
answer = True
if None in names:
index = names.index(None)
names[index] = name
else:
index = len(names)
names.append(name)
order.append(name)
# Always make active a new layout even if it overwrites an inactive
# layout
if name not in active:
active.append(name)
if answer:
self.save_current_window_settings('layout_{}/'.format(index),
section='quick_layouts')
set_('quick_layouts', 'names', names)
set_('quick_layouts', 'order', order)
set_('quick_layouts', 'active', active)
self.quick_layout_set_menu()
def quick_layout_settings(self):
"""Layout settings dialog"""
get = CONF.get
set_ = CONF.set
section = 'quick_layouts'
names = get(section, 'names')
order = get(section, 'order')
active = get(section, 'active')
dlg = self.dialog_layout_settings(self, names, order, active)
if dlg.exec_():
set_(section, 'names', dlg.names)
set_(section, 'order', dlg.order)
set_(section, 'active', dlg.active)
self.quick_layout_set_menu()
def quick_layout_switch(self, index):
"""Switch to quick layout number *index*"""
section = 'quick_layouts'
try:
settings = self.load_window_settings('layout_{}/'.format(index),
section=section)
(hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen) = settings
# The defaults layouts will always be regenerated unless there was
# an overwrite, either by rewriting with same name, or by deleting
# and then creating a new one
if hexstate is None:
# The value for hexstate shouldn't be None for a custom saved
# layout (ie, where the index is greater than the number of
# defaults). See issue 6202.
if index != 'default' and index >= self.DEFAULT_LAYOUTS:
QMessageBox.critical(
self, _("Warning"),
_("Error opening the custom layout. Please close"
" Spyder and try again. If the issue persists,"
" then you must use 'Reset to Spyder default' "
"from the layout menu."))
return
self.setup_default_layouts(index, settings)
except cp.NoOptionError:
QMessageBox.critical(self, _("Warning"),
_("Quick switch layout #%s has not yet "
"been defined.") % str(index))
return
# TODO: is there any real use in calling the previous layout
# setting?
# self.previous_layout_settings = self.get_window_settings()
self.set_window_settings(*settings)
self.current_quick_layout = index
# make sure the flags are correctly set for visible panes
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
# --- Show/Hide toolbars
def _update_show_toolbars_action(self):
"""Update the text displayed in the menu entry."""
if self.toolbars_visible:
text = _("Hide toolbars")
tip = _("Hide toolbars")
else:
text = _("Show toolbars")
tip = _("Show toolbars")
self.show_toolbars_action.setText(text)
self.show_toolbars_action.setToolTip(tip)
def save_visible_toolbars(self):
"""Saves the name of the visible toolbars in the .ini file."""
toolbars = []
for toolbar in self.visible_toolbars:
toolbars.append(toolbar.objectName())
CONF.set('main', 'last_visible_toolbars', toolbars)
def get_visible_toolbars(self):
"""Collects the visible toolbars."""
toolbars = []
for toolbar in self.toolbarslist:
if toolbar.toggleViewAction().isChecked():
toolbars.append(toolbar)
self.visible_toolbars = toolbars
def load_last_visible_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbarslist:
dic[toolbar.objectName()] = toolbar
toolbars = []
for name in toolbars_names:
if name in dic:
toolbars.append(dic[name])
self.visible_toolbars = toolbars
else:
self.get_visible_toolbars()
self._update_show_toolbars_action()
@Slot()
def show_toolbars(self):
"""Show/Hides toolbars."""
value = not self.toolbars_visible
CONF.set('main', 'toolbars_visible', value)
if value:
self.save_visible_toolbars()
else:
self.get_visible_toolbars()
for toolbar in self.visible_toolbars:
toolbar.toggleViewAction().setChecked(value)
toolbar.setVisible(value)
self.toolbars_visible = value
self._update_show_toolbars_action()
# --- Other
def valid_project(self):
"""Handle an invalid active project."""
if bool(self.projects.get_active_project_path()):
path = self.projects.get_active_project_path()
if not self.projects.is_valid_project(path):
if path:
QMessageBox.critical(
self,
_('Error'),
_("<b>{}</b> is no longer a valid Spyder project! "
"Since it is the current active project, it will "
"be closed automatically.").format(path))
self.projects.close_project()
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(element._shown_shortcut)
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(QKeySequence())
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
widget = QApplication.focusWidget()
from spyder.widgets.shell import ShellBaseWidget
from spyder.widgets.editor import TextEditBaseWidget
from spyder.widgets.ipythonconsole import ControlWidget
# if focused widget isn't valid try the last focused
if not isinstance(widget, (ShellBaseWidget, TextEditBaseWidget,
ControlWidget)):
widget = self.previous_focused_widget
textedit_properties = None
if isinstance(widget, (ShellBaseWidget, TextEditBaseWidget,
ControlWidget)):
console = isinstance(widget, (ShellBaseWidget, ControlWidget))
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
#!!! Below this line, widget is expected to be a QPlainTextEdit instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if not console and not_readonly and not self.editor.is_file_opened():
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
if self.menuBar().hasFocus():
return
widget, textedit_properties = self.get_focus_widget_properties()
for action in self.editor.search_menu_actions:
try:
action.setEnabled(self.editor.isAncestorOf(widget))
except RuntimeError:
pass
if textedit_properties is None: # widget is not an editor/console
return
#!!! Below this line, widget is expected to be a QPlainTextEdit instance
_x, _y, readwrite_editor = textedit_properties
# Disable the replace action for read-only files
self.search_menu_actions[3].setEnabled(readwrite_editor)
def create_plugins_menu(self):
order = ['editor', 'console', 'ipython_console', 'variable_explorer',
'help', None, 'explorer', 'outline_explorer',
'project_explorer', 'find_in_files', None, 'historylog',
'profiler', 'breakpoints', 'pylint', None,
'onlinehelp', 'internal_console']
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
try:
name = plugin.CONF_SECTION
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
actions = order[:]
for action in order:
if type(action) is str:
actions.remove(action)
self.plugins_menu_actions = actions
add_actions(self.plugins_menu, actions)
def create_toolbars_menu(self):
order = ['file_toolbar', 'run_toolbar', 'debug_toolbar',
'main_toolbar', 'Global working directory', None,
'search_toolbar', 'edit_toolbar', 'source_toolbar']
for toolbar in self.toolbarslist:
action = toolbar.toggleViewAction()
name = toolbar.objectName()
try:
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
add_actions(self.toolbars_menu, order)
def createPopupMenu(self):
menu = QMenu('', self)
actions = self.help_menu_actions[:3] + \
[None, self.help_menu_actions[-1]]
add_actions(menu, actions)
return menu
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
self.debug_print(message)
self.splash.show()
self.splash.showMessage(message, Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.black))
QApplication.processEvents()
def remove_tmpdir(self):
"""Remove Spyder temporary directory"""
if CONF.get('main', 'single_instance') and not self.new_instance:
shutil.rmtree(programs.TEMPDIR, ignore_errors=True)
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in self.widgetlist:
if plugin.isAncestorOf(self.last_focused_widget):
plugin.visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
prefix = 'window' + '/'
self.save_current_window_settings(prefix)
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
for plugin in self.thirdparty_plugins:
if not plugin.closing_plugin(cancelable):
return False
for widget in self.widgetlist:
if not widget.closing_plugin(cancelable):
return False
self.dialog_manager.close_all()
if self.toolbars_visible:
self.save_visible_toolbars()
self.already_closed = True
return True
def add_dockwidget(self, child):
"""Add QDockWidget and toggleViewAction"""
dockwidget, location = child.create_dockwidget()
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(dockwidget.features()|
QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(child)
@Slot()
def close_current_dockwidget(self):
widget = QApplication.focusWidget()
for plugin in self.widgetlist:
if plugin.isAncestorOf(widget):
plugin.dockwidget.hide()
break
def toggle_lock_dockwidgets(self, value):
"""Lock/Unlock dockwidgets"""
self.dockwidgets_locked = value
self.apply_panes_settings()
CONF.set('main', 'panes_locked', value)
def __update_maximize_action(self):
if self.state_before_maximizing is None:
text = _("Maximize current pane")
tip = _("Maximize current pane")
icon = ima.icon('maximize')
else:
text = _("Restore current pane")
tip = _("Restore pane to its original size")
icon = ima.icon('unmaximize')
self.maximize_action.setText(text)
self.maximize_action.setIcon(icon)
self.maximize_action.setToolTip(tip)
@Slot()
@Slot(bool)
def maximize_dockwidget(self, restore=False):
"""Shortcut: Ctrl+Alt+Shift+M
First call: maximize current dockwidget
Second call (or restore=True): restore original window layout"""
if self.state_before_maximizing is None:
if restore:
return
# Select plugin to maximize
self.state_before_maximizing = self.saveState()
focus_widget = QApplication.focusWidget()
for plugin in self.widgetlist:
plugin.dockwidget.hide()
if plugin.isAncestorOf(focus_widget):
self.last_plugin = plugin
# Only plugins that have a dockwidget are part of widgetlist,
# so last_plugin can be None after the above "for" cycle.
# For example, this happens if, after Spyder has started, focus
# is set to the Working directory toolbar (which doesn't have
# a dockwidget) and then you press the Maximize button
if self.last_plugin is None:
# Using the Editor as default plugin to maximize
self.last_plugin = self.editor
# Maximize last_plugin
self.last_plugin.dockwidget.toggleViewAction().setDisabled(True)
self.setCentralWidget(self.last_plugin)
self.last_plugin.ismaximized = True
# Workaround to solve an issue with editor's outline explorer:
# (otherwise the whole plugin is hidden and so is the outline explorer
# and the latter won't be refreshed if not visible)
self.last_plugin.show()
self.last_plugin.visibility_changed(True)
if self.last_plugin is self.editor:
# Automatically show the outline if the editor was maximized:
self.addDockWidget(Qt.RightDockWidgetArea,
self.outlineexplorer.dockwidget)
self.outlineexplorer.dockwidget.show()
else:
# Restore original layout (before maximizing current dockwidget)
self.last_plugin.dockwidget.setWidget(self.last_plugin)
self.last_plugin.dockwidget.toggleViewAction().setEnabled(True)
self.setCentralWidget(None)
self.last_plugin.ismaximized = False
self.restoreState(self.state_before_maximizing)
self.state_before_maximizing = None
self.last_plugin.get_focus_widget().setFocus()
self.__update_maximize_action()
def __update_fullscreen_action(self):
if self.isFullScreen():
icon = ima.icon('window_nofullscreen')
else:
icon = ima.icon('window_fullscreen')
if is_text_string(icon):
icon = get_icon(icon)
self.fullscreen_action.setIcon(icon)
@Slot()
def toggle_fullscreen(self):
if self.isFullScreen():
self.fullscreen_flag = False
self.showNormal()
if self.maximized_flag:
self.showMaximized()
else:
self.maximized_flag = self.isMaximized()
self.fullscreen_flag = True
self.showFullScreen()
self.__update_fullscreen_action()
def add_to_toolbar(self, toolbar, widget):
"""Add widget actions to toolbar"""
actions = widget.toolbar_actions
if actions is not None:
add_actions(toolbar, actions)
@Slot()
def about(self):
"""About Spyder"""
versions = get_versions()
# Show Mercurial revision for development version
revlink = ''
if versions['revision']:
rev = versions['revision']
revlink = " (<a href='https://github.com/spyder-ide/spyder/"\
"commit/%s'>Commit: %s</a>)" % (rev, rev)
QMessageBox.about(self,
_("About %s") % "Spyder",
"""<b>Spyder %s</b> %s
<br>The Scientific PYthon Development EnviRonment
<br>Copyright © The Spyder Project Contributors
<br>Licensed under the terms of the MIT License
<p>Created by Pierre Raybaut.
<br>Developed and maintained by the
<a href="%s/blob/master/AUTHORS">Spyder Project Contributors</a>.
<br>Many thanks to all the Spyder beta testers and regular users.
<p>For help with Spyder errors and crashes, please read our
<a href="%s">Troubleshooting page</a>, and for bug reports and
feature requests, visit our <a href="%s">Github website</a>.
For project discussion, see our <a href="%s">Google Group</a>.
<p>This project is part of a larger effort to promote and
facilitate the use of Python for scientific and engineering
software development. The popular Python distributions
<a href="http://continuum.io/downloads">Anaconda</a>,
<a href="https://winpython.github.io/">WinPython</a> and
<a href="http://python-xy.github.io/">Python(x,y)</a>
also contribute to this plan.
<p>Python %s %dbits, Qt %s, %s %s on %s
<p><small>Most of the icons for the Spyder 2 theme come from the Crystal
Project (© 2006-2007 Everaldo Coelho). Other icons for that
theme come from <a href="http://p.yusukekamiyamane.com/"> Yusuke
Kamiyamane</a> (all rights reserved) and from
<a href="http://www.oxygen-icons.org/">
The Oxygen icon theme</a></small>.
"""
% (versions['spyder'], revlink, __project_url__, __trouble_url__,
__project_url__, __forum_url__, versions['python'],
versions['bitness'], versions['qt'], versions['qt_api'],
versions['qt_api_ver'], versions['system']))
@Slot()
def show_dependencies(self):
"""Show Spyder's Dependencies dialog box"""
from spyder.widgets.dependencies import DependenciesDialog
dlg = DependenciesDialog(None)
dlg.set_data(dependencies.DEPENDENCIES)
dlg.exec_()
def render_issue(self, description='', traceback=''):
"""Render issue before sending it to Github"""
# Get component versions
versions = get_versions()
# Get git revision for development version
revision = ''
if versions['revision']:
revision = versions['revision']
# Make a description header in case no description is supplied
if not description:
description = "### What steps reproduce the problem?"
# Make error section from traceback and add appropriate reminder header
if traceback:
error_section = ("### Traceback\n"
"```python-traceback\n"
"{}\n"
"```".format(traceback))
else:
error_section = ''
issue_template = """\
## Description
{description}
{error_section}
## Versions
* Spyder version: {spyder_version} {commit}
* Python version: {python_version}
* Qt version: {qt_version}
* {qt_api_name} version: {qt_api_version}
* Operating System: {os_name} {os_version}
### Dependencies
```
{dependencies}
```
""".format(description=description,
error_section=error_section,
spyder_version=versions['spyder'],
commit=revision,
python_version=versions['python'],
qt_version=versions['qt'],
qt_api_name=versions['qt_api'],
qt_api_version=versions['qt_api_ver'],
os_name=versions['system'],
os_version=versions['release'],
dependencies=dependencies.status())
return issue_template
@Slot()
def report_issue(self, body=None, title=None, open_webpage=False):
"""Report a Spyder issue to github, generating body text if needed."""
if body is None:
from spyder.widgets.reporterror import SpyderErrorDialog
report_dlg = SpyderErrorDialog(self, is_report=True)
report_dlg.show()
else:
if open_webpage:
if PY3:
from urllib.parse import quote
else:
from urllib import quote # analysis:ignore
from qtpy.QtCore import QUrlQuery
url = QUrl(__project_url__ + '/issues/new')
query = QUrlQuery()
query.addQueryItem("body", quote(body))
if title:
query.addQueryItem("title", quote(title))
url.setQuery(query)
QDesktopServices.openUrl(url)
@Slot()
def trouble_guide(self):
"""Open Spyder troubleshooting guide in a web browser."""
url = QUrl(__trouble_url__)
QDesktopServices.openUrl(url)
@Slot()
def google_group(self):
"""Open Spyder Google Group in a web browser."""
url = QUrl(__forum_url__)
QDesktopServices.openUrl(url)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.widgets.editor import TextEditBaseWidget
# If focused widget isn't valid try the last focused
if not isinstance(widget, TextEditBaseWidget):
widget = self.previous_focused_widget
if isinstance(widget, TextEditBaseWidget):
getattr(widget, callback)()
def redirect_internalshell_stdio(self, state):
if state:
self.console.shell.interpreter.redirect_stds()
else:
self.console.shell.interpreter.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in IPython console and eventually set focus
to the Editor.
"""
console = self.ipyconsole
console.visibility_changed(True)
console.raise_()
console.execute_code(lines)
if focus_to_editor:
self.editor.visibility_changed(True)
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
fname = encoding.to_unicode_from_fs(fname)
if osp.isfile(fname):
self.open_file(fname, external=True)
elif osp.isfile(osp.join(CWD, fname)):
self.open_file(osp.join(CWD, fname), external=True)
# ---- PYTHONPATH management, etc.
def get_spyder_pythonpath(self):
"""Return Spyder PYTHONPATH"""
active_path = [p for p in self.path if p not in self.not_active_path]
return active_path + self.project_path
def add_path_to_sys_path(self):
"""Add Spyder path to sys.path"""
for path in reversed(self.get_spyder_pythonpath()):
sys.path.insert(1, path)
def remove_path_from_sys_path(self):
"""Remove Spyder path from sys.path"""
for path in self.path + self.project_path:
while path in sys.path:
sys.path.remove(path)
@Slot()
def path_manager_callback(self):
"""Spyder path manager"""
from spyder.widgets.pathmanager import PathManager
self.remove_path_from_sys_path()
project_path = self.projects.get_pythonpath()
dialog = PathManager(self, self.path, project_path,
self.not_active_path, sync=True)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.exec_()
self.add_path_to_sys_path()
try:
encoding.writelines(self.path, self.SPYDER_PATH) # Saving path
encoding.writelines(self.not_active_path,
self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError:
pass
self.sig_pythonpath_changed.emit()
def pythonpath_changed(self):
"""Projects PYTHONPATH contribution has changed"""
self.remove_path_from_sys_path()
self.project_path = self.projects.get_pythonpath()
self.add_path_to_sys_path()
self.sig_pythonpath_changed.emit()
@Slot()
def win_env(self):
"""Show Windows current user environment variables"""
self.dialog_manager.show(WinUserEnvDialog(self))
#---- Preferences
def apply_settings(self):
"""Apply settings changed in 'Preferences' dialog box"""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes Issue 2036
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
else:
style_name = CONF.get('main', 'windows_style',
self.default_style)
style = QStyleFactory.create(style_name)
if style is not None:
style.setProperty('name', style_name)
qapp.setStyle(style)
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
if CONF.get('main', 'animated_docks'):
default = default|QMainWindow.AnimatedDocks
self.setDockOptions(default)
self.apply_panes_settings()
self.apply_statusbar_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings"""
# Update toggle action on menu
for child in self.widgetlist:
features = child.FEATURES
if CONF.get('main', 'vertical_dockwidget_titlebars'):
features = features | QDockWidget.DockWidgetVerticalTitleBar
if not self.dockwidgets_locked:
features = features | QDockWidget.DockWidgetMovable
child.dockwidget.setFeatures(features)
child.update_margins()
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
show_status_bar = CONF.get('main', 'show_status_bar')
self.statusBar().setVisible(show_status_bar)
if show_status_bar:
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
else:
return
@Slot()
def edit_preferences(self):
"""Edit Spyder preferences"""
from spyder.plugins.configdialog import ConfigDialog
dlg = ConfigDialog(self)
dlg.size_change.connect(self.set_prefs_size)
if self.prefs_dialog_size is not None:
dlg.resize(self.prefs_dialog_size)
for PrefPageClass in self.general_prefs:
widget = PrefPageClass(dlg, main=self)
widget.initialize()
dlg.add_page(widget)
for plugin in [self.workingdirectory, self.editor,
self.projects, self.ipyconsole,
self.historylog, self.help, self.variableexplorer,
self.onlinehelp, self.explorer, self.findinfiles
]+self.thirdparty_plugins:
if plugin is not None:
try:
widget = plugin.create_configwidget(dlg)
if widget is not None:
dlg.add_page(widget)
except Exception:
traceback.print_exc(file=sys.stderr)
if self.prefs_index is not None:
dlg.set_current_index(self.prefs_index)
dlg.show()
dlg.check_all_settings()
dlg.pages_widget.currentChanged.connect(self.__preference_page_changed)
dlg.exec_()
def __preference_page_changed(self, index):
"""Preference page index has changed"""
self.prefs_index = index
def set_prefs_size(self, size):
"""Save preferences dialog size"""
self.prefs_dialog_size = size
#---- Shortcuts
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_sc_to_tip=False):
"""
Register QAction or QShortcut to Spyder main application,
with shortcut (context, name, default)
"""
self.shortcut_data.append( (qaction_or_qshortcut, context,
name, add_sc_to_tip) )
def apply_shortcuts(self):
"""Apply shortcuts settings to all widgets/plugins"""
toberemoved = []
for index, (qobject, context, name,
add_sc_to_tip) in enumerate(self.shortcut_data):
keyseq = QKeySequence( get_shortcut(context, name) )
try:
if isinstance(qobject, QAction):
if sys.platform == 'darwin' and \
qobject._shown_shortcut == 'missing':
qobject._shown_shortcut = keyseq
else:
qobject.setShortcut(keyseq)
if add_sc_to_tip:
add_shortcut_to_tooltip(qobject, context, name)
elif isinstance(qobject, QShortcut):
qobject.setKey(keyseq)
except RuntimeError:
# Object has been deleted
toberemoved.append(index)
for index in sorted(toberemoved, reverse=True):
self.shortcut_data.pop(index)
@Slot()
def show_shortcuts_dialog(self):
from spyder.widgets.shortcutssummary import ShortcutsSummaryDialog
dlg = ShortcutsSummaryDialog(None)
dlg.exec_()
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See Issue 1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""
Quit and Restart Spyder application.
If reset True it allows to reset spyder on restart.
"""
# Get start path to use in restart script
spyder_start_directory = get_module_path('spyder')
restart_script = osp.join(spyder_start_directory, 'app', 'restart.py')
# Get any initial argument passed when spyder was started
# Note: Variables defined in bootstrap.py and spyder/app/start.py
env = os.environ.copy()
bootstrap_args = env.pop('SPYDER_BOOTSTRAP_ARGS', None)
spyder_args = env.pop('SPYDER_ARGS')
# Get current process and python running spyder
pid = os.getpid()
python = sys.executable
# Check if started with bootstrap.py
if bootstrap_args is not None:
spyder_args = bootstrap_args
is_bootstrap = True
else:
is_bootstrap = False
# Pass variables as environment variables (str) to restarter subprocess
env['SPYDER_ARGS'] = spyder_args
env['SPYDER_PID'] = str(pid)
env['SPYDER_IS_BOOTSTRAP'] = str(is_bootstrap)
env['SPYDER_RESET'] = str(reset)
if DEV:
if os.name == 'nt':
env['PYTHONPATH'] = ';'.join(sys.path)
else:
env['PYTHONPATH'] = ':'.join(sys.path)
# Build the command and popen arguments depending on the OS
if os.name == 'nt':
# Hide flashing command prompt
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
shell = False
else:
startupinfo = None
shell = True
command = '"{0}" "{1}"'
command = command.format(python, restart_script)
try:
if self.closing(True):
subprocess.Popen(command, shell=shell, env=env,
startupinfo=startupinfo)
self.console.quit()
except Exception as error:
# If there is an error with subprocess, Spyder should not quit and
# the error can be inspected in the internal console
print(error) # spyder: test-skip
print(command) # spyder: test-skip
# ---- Interactive Tours
def show_tour(self, index):
""" """
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global File Switcher
def open_fileswitcher(self, symbol=False):
"""Open file list management dialog box."""
if self.fileswitcher is not None and \
self.fileswitcher.is_visible:
self.fileswitcher.hide()
self.fileswitcher.is_visible = False
return
if symbol:
self.fileswitcher.plugin = self.editor
self.fileswitcher.set_search_text('@')
else:
self.fileswitcher.set_search_text('')
self.fileswitcher.show()
self.fileswitcher.is_visible = True
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_fileswitcher(symbol=True)
def add_to_fileswitcher(self, plugin, tabs, data, icon):
"""Add a plugin to the File Switcher."""
if self.fileswitcher is None:
self.fileswitcher = FileSwitcher(self, plugin, tabs, data, icon)
else:
self.fileswitcher.add_plugin(plugin, tabs, data, icon)
self.fileswitcher.sig_goto_file.connect(
plugin.get_current_tab_manager().set_stack_index)
# ---- Check for Spyder Updates
def _check_updates_ready(self):
"""Called by WorkerUpdates when ready"""
from spyder.widgets.helperwidgets import MessageCheckBox
# feedback` = False is used on startup, so only positive feedback is
# given. `feedback` = True is used when after startup (when using the
# menu action, and gives feeback if updates are, or are not found.
feedback = self.give_updates_feedback
# Get results from worker
update_available = self.worker_updates.update_available
latest_release = self.worker_updates.latest_release
error_msg = self.worker_updates.error
url_r = __project_url__ + '/releases'
url_i = 'https://docs.spyder-ide.org/installation.html'
# Define the custom QMessageBox
box = MessageCheckBox(icon=QMessageBox.Information,
parent=self)
box.setWindowTitle(_("Spyder updates"))
box.set_checkbox_text(_("Check for updates on startup"))
box.setStandardButtons(QMessageBox.Ok)
box.setDefaultButton(QMessageBox.Ok)
# Adjust the checkbox depending on the stored configuration
section, option = 'main', 'check_updates_on_startup'
check_updates = CONF.get(section, option)
box.set_checked(check_updates)
if error_msg is not None:
msg = error_msg
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
else:
if update_available:
anaconda_msg = ''
if 'Anaconda' in sys.version or 'conda-forge' in sys.version:
anaconda_msg = _("<hr><b>IMPORTANT NOTE:</b> It seems "
"that you are using Spyder with "
"<b>Anaconda/Miniconda</b>. Please "
"<b>don't</b> use <code>pip</code> to "
"update it as that will probably break "
"your installation.<br><br>"
"Instead, please wait until new conda "
"packages are available and use "
"<code>conda</code> to perform the "
"update.<hr>")
msg = _("<b>Spyder %s is available!</b> <br><br>Please use "
"your package manager to update Spyder or go to our "
"<a href=\"%s\">Releases</a> page to download this "
"new version. <br><br>If you are not sure how to "
"proceed to update Spyder please refer to our "
" <a href=\"%s\">Installation</a> instructions."
"") % (latest_release, url_r, url_i)
msg += '<br>' + anaconda_msg
box.setText(msg)
box.set_check_visible(True)
box.exec_()
check_updates = box.is_checked()
elif feedback:
msg = _("Spyder is up to date.")
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
# Update checkbox based on user interaction
CONF.set(section, option, check_updates)
# Enable check_updates_action after the thread has finished
self.check_updates_action.setDisabled(False)
# Provide feeback when clicking menu if check on startup is on
self.give_updates_feedback = True
@Slot()
def check_updates(self, startup=False):
"""
Check for spyder updates on github releases using a QThread.
"""
from spyder.workers.updates import WorkerUpdates
# Disable check_updates_action while the thread is working
self.check_updates_action.setDisabled(True)
if self.thread_updates is not None:
self.thread_updates.terminate()
self.thread_updates = QThread(self)
self.worker_updates = WorkerUpdates(self, startup=startup)
self.worker_updates.sig_ready.connect(self._check_updates_ready)
self.worker_updates.sig_ready.connect(self.thread_updates.quit)
self.worker_updates.moveToThread(self.thread_updates)
self.thread_updates.started.connect(self.worker_updates.start)
self.thread_updates.start()
#==============================================================================
# Utilities to create the 'main' function
#==============================================================================
def initialize():
"""Initialize Qt, patching sys.exit and eventually setting up ETS"""
# This doesn't create our QApplication, just holds a reference to
# MAIN_APP, created above to show our splash screen as early as
# possible
app = qapplication()
# --- Set application icon
app.setWindowIcon(APP_ICON)
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
if PYQT5:
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
# Selecting Qt4 backend for Enthought Tool Suite (if installed)
try:
from enthought.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
except ImportError:
pass
return app
class Spy(object):
"""
Inspect Spyder internals
Attributes:
app Reference to main QApplication object
window Reference to spyder.MainWindow widget
"""
def __init__(self, app, window):
self.app = app
self.window = window
def __dir__(self):
return list(self.__dict__.keys()) +\
[x for x in dir(self.__class__) if x[0] != '_']
def versions(self):
return get_versions()
def run_spyder(app, options, args):
"""
Create and show Spyder's main window
Start QApplication event loop
"""
#TODO: insert here
# Main window
main = MainWindow(options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.shell.exit_interpreter()
except BaseException:
pass
raise
main.show()
main.post_visible_setup()
if main.console:
main.console.shell.interpreter.namespace['spy'] = \
Spy(app=app, window=main)
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main():
"""Main function"""
if running_under_pytest():
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
options = Mock()
options.working_directory = None
options.profile = False
options.multithreaded = False
options.new_instance = False
options.open_project = None
options.window_title = None
app = initialize()
window = run_spyder(app, options, None)
return window
# **** Collect command line options ****
# Note regarding Options:
# It's important to collect options before monkey patching sys.exit,
# otherwise, optparse won't be able to exit if --help option is passed
options, args = get_options()
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize or bool(DEBUG))
app = initialize()
if options.reset_config_files:
# <!> Remove all configuration files!
reset_config_files()
return
elif options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults(save=True)
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# Show crash dialog
if CONF.get('main', 'crash', False) and not DEV:
CONF.set('main', 'crash', False)
if SPLASH is not None:
SPLASH.hide()
QMessageBox.information(
None, "Spyder",
"Spyder crashed during last session.<br><br>"
"If Spyder does not start at all and <u>before submitting a "
"bug report</u>, please try to reset settings to defaults by "
"running Spyder with the command line option '--reset':<br>"
"<span style=\'color: #555555\'><b>spyder --reset</b></span>"
"<br><br>"
"<span style=\'color: #ff5555\'><b>Warning:</b></span> "
"this command will remove all your Spyder configuration files "
"located in '%s').<br><br>"
"If Spyder still fails to launch, you should consult our "
"comprehensive <b><a href=\"%s\">Troubleshooting Guide</a></b>, "
"which when followed carefully solves the vast majority of "
"crashes; also, take "
"the time to search for <a href=\"%s\">known bugs</a> or "
"<a href=\"%s\">discussions</a> matching your situation before "
"submitting a report to our <a href=\"%s\">issue tracker</a>. "
"Your feedback will always be greatly appreciated."
"" % (get_conf_path(), __trouble_url__, __project_url__,
__forum_url__, __project_url__))
# Create main window
mainwindow = None
try:
mainwindow = run_spyder(app, options, args)
except FontError as fontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('main', 'icon_theme', 'spyder 2')
except BaseException:
CONF.set('main', 'crash', True)
import traceback
traceback.print_exc(file=STDERR)
traceback.print_exc(file=open('spyder_crash.log', 'w'))
if mainwindow is None:
# An exception occured
if SPLASH is not None:
SPLASH.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
| apache-2.0 |
pylayers/pylayers | pylayers/antprop/loss.py | 1 | 55258 | # -*- coding:Utf-8 -*-
#from numpy import *
from __future__ import print_function
"""
.. currentmodule:: pylayers.antprop.loss
.. autosummary::
:members:
"""
import doctest
import os
import glob
import doctest
import logging
import numpy as np
from scipy import io
import matplotlib.pylab as plt
import pylayers.gis.gisutil as gu
from pylayers.util.project import logger
import numpy.linalg as la
import pdb
import time
from numba import jit
def PL0(fGHz,GtdB=0,GrdB=0,R=1):
""" Path Loss at frequency fGHZ @ R
Parameters
----------
fGHz: float
frequency GHz
GtdB: float
transmitting antenna gain dB (default 0 dB)
GrdB: float
receiving antenna gain dB (default 0 dB)
R : float
distance in m
Returns
-------
PL0 : float
path @ R
Notes
-----
.. math:: PL_0 = -20 log_{10}(\\frac{\\lambda}{4\\pi}) - GtdB -GrdB
Examples
--------
>>> fGHz = 2.4
>>> PL = PL0(fGHz)
>>> assert (PL<41)&(PL>40),"something wrong"
"""
if not isinstance(fGHz,np.ndarray):
fGHz=np.array([fGHz])
ld = 0.3/fGHz
PL0 = -20*np.log10(ld/(4.0*np.pi*R))-GtdB-GrdB
return PL0
def Dgrid_points(points,Px):
""" distance point to grid
Parameters
----------
points : np.array
grid Np x 2 array
Px : np.array
point 2 x 1 array
Returns
-------
D: Euclidian distance matrix
"""
Dx = points[:,0] - Px[0]
Dy = points[:,1] - Px[1]
D = np.sqrt( Dx*Dx + Dy*Dy )
return(D)
def FMetisShad2(fGHz,r,D,sign=1):
""" F Metis shadowing function
Parameters
----------
fGHz : np.array(Nf)
frequency GHz
r : np.array(Nseg,)
distance between Tx and Rx
D : np.array(Nseg,Nscreen)
indirect distance between Tx and Rx (screen effect)
sign : np.array(Nseg,Nscreen)
== 1 : Shadowing NLOS situation
==-1 : No shadowing LOS situation
Returns
-------
F : np.array(Nseg,Nscreen,Nf)
Notes
-----
Provides an implementation of formula (6.6) in D1.4 of METIS project
See Also
--------
LossMetisShadowing
The arctan is approximating the enveloppe of the Fresnel integral.
"""
lamda = 0.3/fGHz[None,None,:]
F = np.arctan(sign[:,:,None]*np.pi/2.*(np.sqrt((np.pi/lamda)*(D[:,:,None]-r[:,None,None])))) / np.pi
return(F)
def FMetisShad(fGHz,r,D,sign=1):
""" F Metis shadowing function
Parameters
----------
fGHz : float
frequency GHz
r : float
distance between Tx and Rx
D : float
indirect distance between Tx and Rx (screen effect)
sign : int
== 1 : Shadowing NLOS situation
==-1 : No shadowing LOS situation
Notes
-----
Provides an implementation of formula (6.6) in D1.4 of METIS project
See Also
--------
LossMetisShadowing
"""
lamda = 0.3/fGHz
F = np.arctan(sign*np.pi/2.*(np.sqrt((np.pi/lamda)*(D-r)))) / np.pi
return(F)
def LossMetisShadowing(fGHz,tx,rx,pg,uw,uh,w,h):
""" Calculate the Loss from
Parameters
----------
fGHz : float
tx : np.array (,3) of floats
transmiter coordinates
rx : np.array (,3) of floats
receiver coordinates
pg : np.array (,3) of floats
center of gravity of the screen
uw : np.array (,3) of floats
unitary vector along width dimension
uh : np.array (,3) of floats
unitary vector along height dimension
w : float
width in meters
h : float
height in meters
Returns
-------
Lsh : float
Loss in dB to add to the FS path Loss
Notes
-----
This function provides an implementation of formula 6.5 of D1.4 deliverable of METIS project
[Metis D1.4](Ahttps://www.metis2020.com/wp-content/uploads/METIS_D1.4_v3.pdf)
# geometry parametric issue : find M in [tx-rx] defined as M = alpha*rx + (1-alpha)tx where alpha in [0-1].
# if alpha = 0 then M = tx ; if alpha = 1 then M = rx.
# Besides, M is defined as M = pg + beta*uw + gamma*uh then alpha*rx + (1-alpha)tx = pg + beta*uw + gamma*uh
# [rx-tx , -uw, -uh]*[alpha,beta,gamma].T = pg - tx <==> Ax = b solved by la.solve ; x[0]=alpha, x[1]=beta and
TODO To be vectorized
"""
rxtx = rx - tx # LOS distance
# x[2]=gamma.
A = np.vstack((rxtx,-uw,-uh)).T
b = pg - tx
x = la.solve(A,b)
# condition of shadowing
condseg = ((x[0]>1) or (x[0]<0))
condw = ((x[1]>w/2.) or (x[1]<-w/2.))
condh = ((x[2]>h/2.) or (x[2]<-h/2.))
visi = condseg or condw or condh
if visi:
shad = -1
else:
shad = 1
r = np.dot(rxtx,rxtx)**0.5
w1 = pg + uw*w/2.
w2 = pg - uw*w/2.
h1 = pg + uh*h/2.
h2 = pg - uh*h/2.
Dtw1 = np.dot(tx-w1,tx-w1)**0.5
Drw1 = np.dot(rx-w1,rx-w1)**0.5
Dtw2 = np.dot(tx-w2,tx-w2)**0.5
Drw2 = np.dot(rx-w2,rx-w2)**0.5
Dth1 = np.dot(tx-h1,tx-h1)**0.5
Drh1 = np.dot(rx-h1,rx-h1)**0.5
Dth2 = np.dot(tx-h2,tx-h2)**0.5
Drh2 = np.dot(rx-h2,rx-h2)**0.5
D1w = Dtw1+Drw1
D1h = Dth1+Drh1
D2w = Dtw2+Drw2
D2h = Dth2+Drh2
if shad == 1:
signw1 = 1
signw2 = 1
signh1 = 1
signh2 = 1
else:
if condw:
if D1w>D2w:
signw1=1
signw2=-1
else:
signw1=-1
signw2=1
else:
signw1 = 1
signw2 = 1
if condh:
if D1h>D2h:
signh1=1
signh2=-1
else:
signh1=-1
signh2=1
else:
signh1 = 1
signh2 = 1
Fw1 = FMetisShad(fGHz,r,D1w,sign=signw1)
Fh1 = FMetisShad(fGHz,r,D1h,sign=signh1)
Fw2 = FMetisShad(fGHz,r,D2w,sign=signw2)
Fh2 = FMetisShad(fGHz,r,D2h,sign=signh2)
tmp = (Fh1+Fh2)*(Fw1+Fw2)
Lsh = -20*np.log10(1-tmp)
#return(Lsh,shad,tmp,Fw1,Fh1,Fw2,Fh2,condh,condw)
return(Lsh)
def LossMetisShadowing2(fGHz,tx,rx,pg,uw,uh,w,h):
""" Calculate the Loss from
Parameters
----------
fGHz : np.array(,Nf)
tx : np.array (3,Nseg) of floats
transmiter coordinates
rx : np.array (3,Nseg) of floats
receiver coordinates
pg : np.array (3,Nscreen) of floats
center of gravity of the screen
uw : np.array (3,Nscreen) of floats
unitary vector along width dimension
uh : np.array (3,Nscreen) of floats
unitary vector along height dimension
w : np.array (,Nscreen)
width in meters
h : np.array (,Nscreen)
height in meters
Returns
-------
Lsh : np.array (Nseg,Nscreen,Nf)
Loss in dB to add to the FS path Loss
Notes
-----
This function provides an implementation of formula 6.5 of D1.4 deliverable of METIS project
[Metis D1.4](Ahttps://www.metis2020.com/wp-content/uploads/METIS_D1.4_v3.pdf)
# geometry parametric issue : find M in [tx-rx] defined as M = alpha*rx + (1-alpha)tx where alpha in [0-1].
# if alpha = 0 then M = tx ; if alpha = 1 then M = rx.
# Besides, M is defined as M = pg + beta*uw + gamma*uh then alpha*rx + (1-alpha)tx = pg + beta*uw + gamma*uh
# [rx-tx , -uw, -uh]*[alpha,beta,gamma].T = pg - tx <==> Ax = b solved by la.solve ; x[0]=alpha, x[1]=beta and
"""
Nseg = tx.shape[1]
Nscreen = uw.shape[1]
rxtx = rx - tx # (3,Nseg) LOS distance
# A : (Nseg,Nscreen,3,3)
# b : (Nseg,Nscreen,3)
# rxtx.T (Nseg,3)
# uw.T (Nscreen, 3)
# uh.T (Nscreen, 3)
U = rxtx.T[:,None,:,None]
W = uw.T[None,:,:,None]
H = uh.T[None,:,:,None]
We = W + np.zeros(U.shape)
He = H + np.zeros(U.shape)
Ue = U + np.zeros(He.shape)
A = np.concatenate((Ue,-We,-He),axis=3)
#A = np.vstack((rxtx,-uw,-uh)).T
# pg.T Nscreen, 3
# tx.T Nseg,3
b = pg.T[None,:,:]-tx.T[:,None,:]
#b = pg - tx
x = la.solve(A,b)
# condition of shadowing
condseg = ((x[:,:,0]>1) + (x[:,:,0]<0))
condw = ((x[:,:,1]>w[None,:]/2.) + (x[:,:,1]<-w[None,:]/2.))
condh = ((x[:,:,2]>h[None,:]/2.) + (x[:,:,2]<-h[None,:]/2.))
visi = (condseg + condw + condh)%2
# if visi:
# shad = -1
# else:
# shad = 1
#shad = - visi
r = np.sum(rxtx*rxtx,axis=0)**0.5
w1 = pg + uw*w[None,:]/2.
w2 = pg - uw*w[None,:]/2.
h1 = pg + uh*h[None,:]/2.
h2 = pg - uh*h[None,:]/2.
Dtw1 = np.sum((tx[...,None]-w1[:,None,:])*(tx[...,None]-w1[:,None,:]),axis=0)**0.5
Drw1 = np.sum((rx[...,None]-w1[:,None,:])*(rx[...,None]-w1[:,None,:]),axis=0)**0.5
Dtw2 = np.sum((tx[...,None]-w2[:,None,:])*(tx[...,None]-w2[:,None,:]),axis=0)**0.5
Drw2 = np.sum((rx[...,None]-w2[:,None,:])*(rx[...,None]-w2[:,None,:]),axis=0)**0.5
Dth1 = np.sum((tx[...,None]-h1[:,None,:])*(tx[...,None]-h1[:,None,:]),axis=0)**0.5
Drh1 = np.sum((rx[...,None]-h1[:,None,:])*(rx[...,None]-h1[:,None,:]),axis=0)**0.5
Dth2 = np.sum((tx[...,None]-h2[:,None,:])*(tx[...,None]-h2[:,None,:]),axis=0)**0.5
Drh2 = np.sum((rx[...,None]-h2[:,None,:])*(rx[...,None]-h2[:,None,:]),axis=0)**0.5
# Drw1 = np.dot(rx-w1,rx-w1)**0.5
# Dtw2 = np.dot(tx-w2,tx-w2)**0.5
# Drw2 = np.dot(rx-w2,rx-w2)**0.5
# Dth1 = np.dot(tx-h1,tx-h1)**0.5
# Drh1 = np.dot(rx-h1,rx-h1)**0.5
# Dth2 = np.dot(tx-h2,tx-h2)**0.5
# Drh2 = np.dot(rx-h2,rx-h2)**0.5
D1w = Dtw1+Drw1
D1h = Dth1+Drh1
D2w = Dtw2+Drw2
D2h = Dth2+Drh2
signw1 = np.ones((Nseg,Nscreen))
signw2 = np.ones((Nseg,Nscreen))
signh1 = np.ones((Nseg,Nscreen))
signh2 = np.ones((Nseg,Nscreen))
condw1 = (visi*condw*(D1w<=D2w)).astype(bool)
condw2 = (visi*condw*(D1w>D2w)).astype(bool)
signw1[condw1]=-1
signw2[condw2]=-1
condh1 = (visi*condh*(D1h<=D2h)).astype(bool)
condh2 = (visi*condh*(D1h>D2h)).astype(bool)
signh1[condh1]=-1
signh2[condh2]=-1
Fw1 = FMetisShad2(fGHz,r,D1w,sign=signw1)
Fh1 = FMetisShad2(fGHz,r,D1h,sign=signh1)
Fw2 = FMetisShad2(fGHz,r,D2w,sign=signw2)
Fh2 = FMetisShad2(fGHz,r,D2h,sign=signh2)
tmp = (Fh1+Fh2)*(Fw1+Fw2)
Lsh = -20*np.log10(1-tmp)
#return(Lsh,shad,tmp,Fw1,Fh1,Fw2,Fh2,condh,condw)
return(Lsh)
def Dgrid_zone(zone,Px):
""" Distance point to zone
A zone is a quadrilateral zone.
Parameters
----------
zone : dictionnary
xmin xmax Nx
ymin ymax Ny
Px : np.array
point
Returns
-------
D : np.array Nx x Ny
Euclidian distance matrix
Notes
-----
Build the distance matrix between Tx and points in the zone
use broadcasting instead
"""
rx = np.linspace(zone['xmin'],zone['xmax'],zone['Nx'])
ry = np.linspace(zone['ymin'],zone['ymax'],zone['Ny'])
R_x = np.outer(np.ones(len(ry)),rx)
R_y = np.outer(ry,np.ones(len(rx)))
Dx = R_x - Px[0]
Dy = R_y - Px[1]
D = np.sqrt(Dx*Dx+Dy*Dy)
return (D)
def OneSlopeMdl(D,n,fGHz):
""" one slope model
Parameters
----------
D : np.array
distance array
n : float
path loss exponent
fGHz : np.array
frequency GHz
Returns
-------
PL : np.array
path loss as a function of distance
"""
PL = PL0(fGHz) + 10*n*np.log10(D)
return(PL)
def cost231(pBS,pMS,hroof,phir,wr,fMHz,wb=20,dB=True,city='medium'):
""" Walfish Ikegami model (COST 231)
Parameters
----------
pBS : np.array (3xNlink)
pMS : np.array (3xNlink)
hroof : np.array (1xNlink)
phir : np.array (1xNlink)
degrees
wr : np.array (1xNlink)
fMHz : np.array (1xNf)
wb : float
average building separation
dB : boolean
Returns
-------
PathLoss (Nlink,Nf)
References
----------
http://morse.colorado.edu/~tlen5510/text/classwebch3.html
Examples
--------
.. plot::
:include-source:
>>> from pylayers.antprop.loss import *
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> # Number of links and BS and MS heights
>>> Nlink = 100
>>> hBS = 300
>>> hMS = 1.5
>>> # hroof and phir are drawn uniformily at random
>>> hroof = 40*np.random.rand(Nlink)
>>> wr = 10*np.ones(Nlink)
>>> phir = 90*np.random.rand(Nlink)
>>> pMS = np.vstack((np.linspace(10,2500,Nlink),np.zeros(Nlink),hMS*np.ones(Nlink)))
>>> pBS = np.vstack((np.zeros(Nlink),np.zeros(Nlink),hBS*np.ones(Nlink)))
>>> # frequency range
>>> fMHz = np.linspace(700,1900,120)
>>> pl = cost231(pBS,pMS,hroof,phir,wr,fMHz)
>>> im = plt.imshow(pl,extent=(0,100,0.7,1.9))
>>> cb = plt.colorbar()
>>> cb.set_label('Loss (dB)')
>>> plt.axis('tight')
>>> plt.xlabel('Frequency (GHz)')
>>> plt.ylabel('Link Number')
>>> plt.title('100 WI Path Loss realizations ')
>>> plt.show()
"""
hBS = pBS[2,:][:,np.newaxis]
hMS = pMS[2,:][:,np.newaxis]
wr = wr[:,np.newaxis]
hroof = hroof[:,np.newaxis]
phir = phir[:,np.newaxis]
fMHz = fMHz[np.newaxis,:]
dm = np.sqrt(np.sum((pBS-pMS)*(pBS-pMS),axis=0))[:,np.newaxis]
dkm = dm/1000.
Nlink = len(dm)
pl0 = 32.4 + 20*np.log10(dkm) + 20*np.log10(fMHz)
delta_base = hBS-hroof
u035 = np.where((phir>=0) & (phir<35))
u3555 = np.where((phir>=35) & (phir<55))
u5590 = np.where((phir>=55) & (phir<90))
plori = np.zeros(Nlink)[:,np.newaxis]
# path loss due to orientation w.r.t road
plori[u035] = -10+0.354*phir[u035]
plori[u3555] = 2.5+0.075*phir[u3555]
plori[u5590] = 4.0-0.114*(phir[u5590]-55)
# rooftop to street
plrts = -16.9-10*np.log10(wr)+10*np.log10(fMHz)+20*np.log10(hroof-hMS)+plori
uroofsupBS = np.where(hBS>hroof)
uroofinfBS = np.where(hBS<=hroof)
udistsup500 = np.where((hBS<=hroof)&(dkm>0.5))
udistinf500 = np.where((hBS<=hroof)&(dkm<0.5))
plbsh = np.zeros((Nlink,1))
plbsh[uroofsupBS] = -18*np.log10(1+delta_base[uroofsupBS])
ka = 54*np.ones((Nlink,1))
ka[udistsup500] = ka[udistsup500]-0.8*delta_base[udistsup500]
ka[udistinf500] = ka[udistinf500]-0.8*delta_base[udistinf500]*dkm[udistinf500]/0.5
kd = 18*np.ones((Nlink,1))
kd[uroofinfBS] = kd[uroofinfBS]-15*delta_base[uroofinfBS]/hroof[uroofinfBS]
if city=='medium':
kf = -4+0.7*(fMHz/925.-1)
else:
kf = -4+1.5*(fMHz/925.-1)
plmsd = plbsh+ka+kd*np.log10(dkm)+kf*np.log10(fMHz)-9*np.log10(wb)
pl = pl0
padd = plmsd + plrts
ulosspos = np.where(padd>0)[0]
pl[ulosspos]=pl[ulosspos]+padd[ulosspos]
if not dB:
pl = 10**(-pl/20.)
return(pl)
def cost259(pMS,pBS,fMHz):
""" cost259 model
Parameters
----------
pMS : np.array (position of Mobile Station)
pBS : np.array (position of Base station)
fMHz : float
"""
dm = np.sqrt((pBS-pMS)*(pBS-pMS))
lmbd = 300/fMHz
pl = 10*2.6*np.log10(dm)+20*log10(4*np.pi/lmbd)
if not dB:
pl = 10**(-pl/20.);
return(pl)
def hata(pMS,pBS,fGHz,typ):
""" Hata Path loss model
Parameters
----------
pMS : np.array | (3,N)
Mobile position (meters)
pBS : np.array | (3,N)
Base station position (meters)
fGHz : np.array
hMS : height mobile station (m)
hBS : height base station (m)
typ : 'small' or 'big'
Returns
-------
L : Attenuation (dB)
Examples
--------
>>> Npt = 120
>>> d = np.linspace(100,5000,Npt)
>>> hBS = 30
>>> hMS = 1.5
>>> fGHz = 0.9
>>> pMS = np.zeros((3,Npt))
>>> pMS[2,:] = hMS
>>> pBS = np.c_[d,[1]*Npt,[hBS]*Npt].T
>>> L = hata(pMS,pBS,fGHz,'small')
Notes
-----
This model is valid until 1.5GHz, for higher frequency see
COST231-Hata model
References
----------
OKUMURA (Y.), OHMORI (E.), KAWANO (T.)
et FUKUA (K.). Field strength and its varia-
bility in UHF and VHF land-mobile radio ser-
vice. Rev. Elec. Commun. Lab., vol. 16, n o 9,
1968.
HATA (M.). Empirical formula for propaga-
tion loss in land mobile radio services. IEEE
Trans. Veh. Technol., vol. 29, pp. 317-325,
Aug. 1980
"""
assert pMS.shape[0] == 3
assert pBS.shape[0] == 3
dm = np.sqrt(np.sum((pBS-pMS)**2,axis=0))
hMS = pMS[2]
hBS = pBS[2]
if (typ=='small'):
CH = (1.1*np.log10(fGHz*1000)-0.7)*hMS-(1.56*np.log10(fGHz*1000)-0.8)
if (typ=='big'):
if fGHz<0.2:
CH = 8.29*(np.log10(1.54*hMS)**2)-1.1
else:# valid until 1.5GHz
CH = 3.2*(np.log10(11.75*hMS)**2)-4.97
import ipdb
ipdb.set_trace()
L = 69.55+26.16*np.log10(fGHz*1000)-13.82*np.log10(hBS)+(44.9-6.55*np.log10(hBS))*np.log10(dm/1000.)-CH
return(L)
def cost2100(pMS,pBS,fGHz,nfloor=1,dB=True):
""" cost 2100 model
Parameters
----------
pMS :
pBS :
fGHz : float
nfloor : int
dB : boolean
"""
# distance (meters)
dm = np.sqrt((pBS-pMS)*(pBS-pMS))
pl0 = 32.4+20*log10(dm)+20*np.log10(fGHz)
pld = nfloor*30
pl = pl0+pld
if not dB:
pl = 10**(-pl/20.)
return(pl)
def PL(fGHz,pts,p,n=2.0,dB=True,d0=1):
""" calculate Free Space Path Loss
Parameters
----------
fGHz : float
frequency (GHz)
pts : np.array (2xNp)
points
p : np.array (2x1) or (2xNp)
n : float
path loss exponent (default = 2)
dB : : boolean
return result in dB
Returns
-------
PL : np.array
path loss w.r.t distance and frequency
"""
shp = np.shape(p)
# assert(shp[0]==2)
D = np.sqrt(np.sum((pts-p)**2,axis=0))
# f x grid x ap
#PL = np.array([PL0(fGHz)])[:,np.newaxis] + 10*n*np.log10(D)[np.newaxis,:]
PL = PL0(fGHz,d0)[:,np.newaxis] + 10*n*np.log10(D/d0)[np.newaxis,:]
if not dB:
PL=10**(-PL/10)
return(PL)
def Lconcrete(fGHz):
""" 3GPP above 6GHz concrete loss
From Table 7.4.3.1 of 3GPP TR38.900
Study on channel model for frequency spectrum above 6 GHz
"""
return(5+4*fGHz)
def Lglass(fGHz):
""" 3GPP above 6GHz glass loss
From Table 7.4.3.1 of 3GPP TR38.900
Study on channel model for frequency spectrum above 6 GHz
"""
return(2 + 0.2* fGHz)
def LIRRglass(fGHz):
""" 3GPP above 6GHz IRR (Infra Red Reflecting) glass loss
From Table 7.4.3.1 of 3GPP TR38.900
Study on channel model for frequency spectrum above 6 GHz
"""
return(23 + 0.3* fGHz)
def Lwood(fGHz):
""" 3GPP above 6GHz wood loss
From Table 7.4.3.1 of 3GPP TR38.900
Study on channel model for frequency spectrum above 6 GHz
"""
return(4.85 + 0.12* fGHz)
def LossPenetration(fGHz, alpha = 0.3, typ='low'):
if typ=='low':
PLTW = 5 - 10*np.log10(alpha*10**(-Lglass(fGHz)/10)+(1-alpha)*10**(-Lconcrete(fGHz)))
if typ=='high':
PLTW = 5 - 10*np.log10((1-alpha)*10**(-LIRRglass(fGHz)/10)+alpha*10**(-Lconcrete(fGHz)))
return(PLTW)
def Losst(L,fGHz,p1,p2,dB=True,bceilfloor=False):
""" calculate Losses between links p1-p2
Parameters
----------
L : Layout object
fGHz : np.array
frequency GHz
p1 : source points
(3 x Np1) array or (3,) array
p2 : observation points
(3 x Np2) array or (3,) array
dB : boolean
bceilfloor : boolean
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from pylayers.measures.mesuwb import *
>>> from pylayers.antprop.loss import *
>>> S = Simul()
>>> S.layout('WHERE1.lay')
>>> fGHz = 4
>>> Tx,Rx = ptw1()
>>> Lwo,Lwp,Edo,Edp = Losst(S.L,fGHz,Tx.T,Rx[1,0:2],dB=True)
>>> fig=plt.figure(figsize=(20,10))
>>> fig,ax = S.L.showGs(fig=fig)
>>> tit = plt.title('test Losst')
>>> sc2 = ax.scatter(Rx[1,0],Rx[1,1],s=20,marker='x',c='k')
>>> sc1 = ax.scatter(Tx[:,0],Tx[:,1],s=20,c=Lwo,linewidth=0)
>>> cb = plt.colorbar(sc1)
>>> cb.set_label('dB')
>>> plt.show()
See Also
--------
pylayers.antprop.coverage
pylayers.slab.Interface.losst
"""
if (type(fGHz)==float) | (type(fGHz)==int):
fGHz=np.array([fGHz],dtype=float)
sh1 = np.shape(p1)
sh2 = np.shape(p2)
if (len(sh1)>1) & (len(sh2)>1):
Nlink = max(sh1[1],sh2[1])
if (len(sh1)>1) & (len(sh2)<2):
Nlink = sh1[1]
if (len(sh1)<2) & (len(sh2)>1):
Nlink = sh2[1]
if (len(sh1)<2) & (len(sh2)<2):
Nlink = 1
# determine incidence angles on segment crossing p1-p2 segment
logger.debug('losst before angleonlink3')
data = L.angleonlink3(p1,p2)
# as many slabs as segments and subsegments
us = data['s']
slabs = np.array([ L.Gs.node[x]['name'] for x in us ])
#slabs = L.sla[us]
check = np.where(slabs=='')
#
# As segment numbering is not necessarily contiguous
# there exist void string '' in slabs
cslab = list(np.unique(slabs))
if '' in cslab:
cslab.remove('')
if 'AIR' in cslab:
cslab.remove('AIR')
if '_AIR' in cslab:
cslab.remove('_AIR')
LossWallo = np.zeros((len(fGHz),Nlink))
LossWallp = np.zeros((len(fGHz),Nlink))
EdWallo = np.zeros((len(fGHz),Nlink))
EdWallp = np.zeros((len(fGHz),Nlink))
for slname in cslab:
# u index of slabs of name slname
# data['a'][u] angle
# data['s'][u] segment number including subsegment
u = np.nonzero(np.array(slabs)==slname)[0]
#
# calculate Loss for slab slname
#
lko,lkp = L.sl[slname].losst(fGHz,data['a'][u])
#
# calculate Excess delay for slab slname
#
do , dp = L.sl[slname].excess_grdelay(theta=data['a'][u])
# data['i'][u] links number
indexu = data['i'][u]
# reduce to involved links
involved_links, indices = np.unique(indexu,return_index=True)
indicep = np.hstack((indices[1:],np.array([len(indexu)])))
# range on involved links
irange = np.arange(len(involved_links))
#
# sum contribution of slab of a same link
#
Wallo = np.array([ np.sum(lko[:,indices[x]:indicep[x]],axis=1) for x in irange ] ).T
Wallp = np.array([ np.sum(lkp[:,indices[x]:indicep[x]],axis=1) for x in irange ] ).T
Edo = np.array([np.sum(do[indices[x]:indicep[x]]) for x in irange]).T
Edp = np.array([np.sum(dp[indices[x]:indicep[x]]) for x in irange]).T
LossWallo[:,involved_links] = LossWallo[:,involved_links] + Wallo
LossWallp[:,involved_links] = LossWallp[:,involved_links] + Wallp
EdWallo[:,involved_links] = EdWallo[:,involved_links] + Edo
EdWallp[:,involved_links] = EdWallp[:,involved_links] + Edp
if bceilfloor:
# Managing Ceil / Floor transmission
# At that point there is only a single type of ceil and floor
# it should be defined ideally as a specific entity
#
# TODO fix shape error p2 is not always (3 x N)
#
if (p1[2,:]> L.zceil).any() or (p2[2,:]> L.zceil).any():
# WARNING : this test should be done individually
if (p1[2]>p2[2]).all():
v0 = p1
v1 = p2
else:
v0 = p2
v1 = p1
uu = v0 - v1
# 1 x N
nu = np.sqrt(np.sum(uu * uu, axis=0))
# 3 x N
un = uu / nu[np.newaxis, :]
dotp = np.einsum('ij,i->j',un,np.array([0,0,1]))
alphas = np.arccos(dotp)
#
# calculate Loss for slab CEIL
#
lkco,lkcp = L.sl['CEIL'].losst(fGHz,alphas)
#
# calculate Excess delay for slab CEIL
#
dco , dcp = L.sl['CEIL'].excess_grdelay(theta=alphas)
LossWallo = LossWallo + lkco
LossWallp = LossWallp + lkcp
EdWallo = EdWallo + dco
EdWallp = EdWallp + dcp
# check crossing floor
if (p1[2,:]< L.zfloor).any() or (p2[2,:]< L.zfloor).any():
# WARNING : this test should be done individually
if (p1[2]>p2[2]).all():
v0 = p1
v1 = p2
else:
v0 = p2
v1 = p1
uu = v0 - v1
# 1 x N
nu = np.sqrt(np.sum(uu * uu, axis=0))
# 3 x N
un = uu / nu[np.newaxis, :]
dotp = np.einsum('ij,i->j',un,np.array([0,0,1]))
alphas = np.arccos(dotp)
#
# calculate Loss for slab CEIL
#
lkfo,lkfp = L.sl['FLOOR'].losst(fGHz,alphas)
#
# calculate Excess delay for slab CEIL
#
dfo , dfp = L.sl['FLOOR'].excess_grdelay(theta=alphas)
LossWallo = LossWallo + lkfo
LossWallp = LossWallp + lkfp
EdWallo = EdWallo + dfo
EdWallp = EdWallp + dfp
if not dB:
LossWallo = 10**(-LossWallo/10)
LossWallp = 10**(-LossWallp/10)
print(LossWallo)
return(LossWallo,LossWallp,EdWallo,EdWallp)
def gaspl(d,fGHz,T,PhPa,wvden):
""" attenuation due to atmospheric gases
Parameters
----------
d : np.array
range (meters)
fGHz : np.array
frequency (GHz)
T : float
Temprature in degree Celcius
PhPa : float
Pressure in hPa
wvden : float
Water vapor density (g/m**3)
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> T = 15
>>> PhPa = 1013
>>> wvden = 7.5
>>> d = 1000
>>> fGHz = np.linspace(1,1000,100)
>>> L = gaspl(d,fGHz,T,PhPa,wvden)
>>> plt.plot(fGHz,L)
Notes
-----
This function implements the recommandation UIT-P676-10
"""
affO2=np.array([
[50.474214,0.975,9.651,6.690,0.0,2.566,6.850],
[50.987745,2.529,8.653,7.170,0.0,2.246,6.800],
[51.503360,6.193,7.709,7.640,0.0,1.947,6.729],
[52.021429,14.320,6.819,8.110,0.0,1.667,6.640],
[52.542418,31.240,5.983,8.580,0.0,1.388,6.526],
[53.066934,64.290,5.201,9.060,0.0,1.349,6.206],
[53.595775,124.600,4.474,9.550,0.0,2.227,5.085],
[54.130025,227.300,3.800,9.960,0.0,3.170,3.750],
[54.671180,389.700,3.182,10.370,0.0,3.558,2.654],
[55.221384,627.100,2.618,10.890,0.0,2.560,2.952],
[55.783815,945.300,2.109,11.340,0.0,-1.172,6.135],
[56.264774,543.400,0.014,17.030,0.0,3.525,-0.978],
[56.363399,1331.800,1.654,11.890,0.0,-2.378,6.547],
[56.968211,1746.600,1.255,12.230,0.0,-3.545,6.451],
[57.612486,2120.100,0.910,12.620,0.0,-5.416,6.056],
[58.323877,2363.700,0.621,12.950,0.0,-1.932,0.436],
[58.446588,1442.100,0.083,14.910,0.0,6.768,-1.273],
[59.164204,2379.900,0.387,13.530,0.0,-6.561,2.309],
[59.590983,2090.700,0.207,14.080,0.0,6.957,-0.776],
[60.306056,2103.400,0.207,14.150,0.0,-6.395,0.699],
[60.434778,2438.000,0.386,13.390,0.0,6.342,-2.825],
[61.150562,2479.500,0.621,12.920,0.0,1.014,-0.584],
[61.800158,2275.900,0.910,12.630,0.0,5.014,-6.619],
[62.411220,1915.400,1.255,12.170,0.0,3.029,-6.759],
[62.486253,1503.000,0.083,15.130,0.0,-4.499,0.844],
[62.997984,1490.200,1.654,11.740,0.0,1.856,-6.675],
[63.568526,1078.000,2.108,11.340,0.0,0.658,-6.139],
[64.127775,728.700,2.617,10.880,0.0,-3.036,-2.895],
[64.678910,461.300,3.181,10.380,0.0,-3.968,-2.590],
[65.224078,274.000,3.800,9.960,0.0,-3.528,-3.680],
[65.764779,153.000,4.473,9.550,0.0,-2.548,-5.002],
[66.302096,80.400,5.200,9.060,0.0,-1.660,-6.091],
[66.836834,39.800,5.982,8.580,0.0,-1.680,-6.393],
[67.369601,18.560,6.818,8.110,0.0,-1.956,-6.475],
[67.900868,8.172,7.708,7.640,0.0,-2.216,-6.545],
[68.431006,3.397,8.652,7.170,0.0,-2.492,-6.600],
[68.960312,1.334,9.650,6.690,0.0,-2.773,-6.650],
[118.750334,940.300,0.010,16.640,0.0,-0.439,0.079],
[368.498246,67.400,0.048,16.400,0.0,0.000,0.000],
[424.763020,637.700,0.044,16.400,0.0,0.000,0.000],
[487.249273,237.400,0.049,16.000,0.0,0.000,0.000],
[715.392902,98.100,0.145,16.000,0.0,0.000,0.000],
[773.839490,572.300,0.141,16.200,0.0,0.000,0.000],
[834.145546,183.100,0.145,14.700,0.0,0.000,0.000]])
## spectroscopic data for H20
## f0 b1 b2 b3 b4 b5 b6
affH2O=np.array([
[22.235080,0.1130,2.143,28.11,0.69,4.800,1.00],
[67.803960,0.0012,8.735,28.58,0.69,4.930,0.82],
[119.995940,0.0008,8.356,29.48,0.70,4.780,0.79],
[183.310091,2.4200,0.668,30.50,0.64,5.300,0.85,],
[321.225644,0.0483,6.181,23.03,0.67,4.690,0.54],
[325.152919,1.4990,1.540,27.83,0.68,4.850,0.74],
[336.222601,0.0011,9.829,26.93,0.69,4.740,0.61],
[380.197372,11.5200,1.048,28.73,0.54,5.380,0.89,],
[390.134508,0.0046,7.350,21.52,0.63,4.810,0.55],
[437.346667,0.0650,5.050,18.45,0.60,4.230,0.48,],
[439.150812,0.9218,3.596,21.00,0.63,4.290,0.52,],
[443.018295,0.1976,5.050,18.60,0.60,4.230,0.50],
[448.001075,10.3200,1.405,26.32,0.66,4.840,0.67],
[470.888947,0.3297,3.599,21.52,0.66,4.570,0.65,],
[474.689127,1.2620,2.381,23.55,0.65,4.650,0.64],
[488.491133,0.2520,2.853,26.02,0.69,5.040,0.72],
[503.568532,0.0390,6.733,16.12,0.61,3.980,0.43],
[504.482692,0.0130,6.733,16.12,0.61,4.010,0.45],
[547.676440,9.7010,0.114,26.00,0.70,4.500,1.00],
[552.020960,14.7700,0.114,26.00,0.70,4.500,1.00],
[556.936002,487.4000,0.159,32.10,0.69,4.110,1.00],
[620.700807,5.0120,2.200,24.38,0.71,4.680,0.68],
[645.866155,0.0713,8.580,18.00,0.60,4.000,0.50],
[658.005280,0.3022,7.820,32.10,0.69,4.140,1.00],
[752.033227,239.6000,0.396,30.60,0.68,4.090,0.84],
[841.053973,0.0140,8.180,15.90,0.33,5.760,0.45],
[859.962313,0.1472,7.989,30.60,0.68,4.090,0.84],
[899.306675,0.0605,7.917,29.85,0.68,4.530,0.90],
[902.616173,0.0426,8.432,28.65,0.70,5.100,0.95],
[906.207325,0.1876,5.111,24.08,0.70,4.700,0.53],
[916.171582,8.3400,1.442,26.70,0.70,4.780,0.78],
[923.118427,0.0869,10.220,29.00,0.70,5.000,0.80],
[970.315022,8.9720,1.920,25.50,0.64,4.940,0.67],
[987.926764,132.1000,0.258,29.85,0.68,4.550,0.90],
[1780.000000,22,300.0000,0.952,176.20,0.50,30.500]])
dkm = d/1000.
TK = T + 273.15
theta = 300./TK #3
fO2 = affO2[:,0]
a1 = affO2[:,1]
a2 = affO2[:,2]
a3 = affO2[:,3]
a4 = affO2[:,4]
a5 = affO2[:,5]
a6 = affO2[:,6]
fH2O = affH2O[:,0]
b1 = affH2O[:,1]
b2 = affH2O[:,2]
b3 = affH2O[:,3]
b4 = affH2O[:,4]
b5 = affH2O[:,5]
b6 = affH2O[:,6]
e = wvden*TK/216.7 # 4
SO2 = a1*1e-7*PhPa*(theta**3)*np.exp(a2*(1-theta)) # 3
DO2 = a3*1e-4*(PhPa*(theta**(0.8-a4))+1.1*e*theta) # 6a
SH2O = b1*1e-1*e*(theta**(3.5))*np.exp(b2*(1-theta)) # 3
DH2O = b3*1e-4*(PhPa*theta**b4+b5*e*theta**b6) # 6a
DO2_m = np.sqrt(DO2**2+2.25e-6) # 6b
DH2O_m = 0.535*DH2O+np.sqrt(0.217*DH2O**2+(2.1316*1e-12*fH2O**2)/theta)
deltaO2 = (a5+a6*theta)*1e-4*(PhPa+e)*theta**(0.8)
#
# O2
#
uO2 = fO2[:,None]-fGHz[None,:]
vO2 = fO2[:,None]+fGHz[None,:]
n1O2 = DO2_m[:,None]-deltaO2[:,None]*uO2
n2O2 = DO2_m[:,None]-deltaO2[:,None]*vO2
d1O2 = uO2**2 + DO2_m[:,None]**2
d2O2 = vO2**2 + DO2_m[:,None]**2
FO2 = (fGHz[None,:]/fO2[:,None])*(n1O2/d1O2+n2O2/d2O2)
UO2 = SO2[:,None]*FO2
#
# H2O
#
uH2O = fH2O[:,None]-fGHz[None,:]
vH2O = fH2O[:,None]+fGHz[None,:]
nH2O = DH2O_m[:,None]
d1H2O = uH2O**2 + DH2O_m[:,None]**2
d2H2O = vH2O**2 + DH2O_m[:,None]**2
FH2O = (fGHz[None,:]/fH2O[:,None])*(nH2O/d1H2O+nH2O/d2H2O)
UH2O = SH2O[:,None]*FH2O
# Nsec (8)
dD = 5.6e-4*(PhPa+e)*theta**(0.8)
t1 = 6.14e-5/(dD*(1.+(fGHz/dD)**2))
t2 = 1.4e-12*PhPa*(theta**(1.5))/(1+1.9e-5*fGHz**(1.5))
Nsec = fGHz*PhPa*(theta**2)*(t1+t2) # 9
ulow = np.where(fGHz<118.750343)[0]
uhigh = np.where(fGHz>=118.750343)[0]
UO2low = UO2[:,ulow]
UO2high = UO2[:,uhigh]
SO2low = np.sum(UO2low,axis=0)
SO2high = np.sum(UO2high[38:,:],axis=0)
sSO2 = np.hstack((SO2low,SO2high))
Npp = sSO2 + np.sum(UH2O,axis=0)+Nsec
Npp = np.sum(UO2,axis=0) + np.sum(UH2O,axis=0)+Nsec
gamma = 0.1820*fGHz*Npp
LgasdB = gamma*dkm
return(LgasdB)
def Loss0(S,rx,ry,f,p):
""" calculate Loss through Layers for theta=0 deg
Parameters
----------
S : Simulation object
rx : extremity of link
ry : extremity of link
fGHz : float
frequency GHz
p :
"""
Nx = len(rx)
Ny = len(ry)
Lw = np.zeros((Nx,Ny))
print(shape(Lw))
i = 0
for x in rx:
j = 0
for y in ry:
Loss = 0
pxy = np.array([x,y])
seglist,theta = L.angleonlinkold(p,pxy)
for k in seglist:
name = L.name[k]
lk = L.sl[name].loss0(f)
Loss = Loss + lk[0]
Lw[i,j] = Loss
j = j+1
i = i+1
return(Lw)
def Loss_diff(u):
""" calculate Path Loss of the diffraction
"""
if u < -0.7:
Ld = 0
elif u > 1.5:
Ld = 13 + 20*np.log10(u)
else:
Ld = 6.9 + 20*np.log10(np.sqrt((u-0.1)**2+1)+u-0.1)
return(Ld)
def calnu(h,d1,d2,fGHz):
r""" Calculate the diffraction Fresnel parameter
Parameters
----------
h : signed height w.r.t LOS (meter)
d1 : distance 1 (meter)
d2 : distance 2 (meter)
fGHz : frequency GHz
Notes
-----
.. math:: \nu = h \sqrt{\frac{2}{\lambda} \frac{d_1+d_2}{d_1 d_2}}
"""
ld = 0.3/fGHz
nu = h*np.sqrt(2*(d1+d2)/(ld*d1*d2))
return(nu)
def route(X, Y, Z, Ha, Hb, fGHz, K, method='deygout'):
""" diffraction loss along a route
Parameters
----------
X : np.array (Nphi,Nr)
cartesian coordinate grid
Y : np.array (Nphi,Nr)
cartesian coordinate grid
Z : np.array (Nphi,Nr)
height (meters)
Ha : float
Hb : float
fGHz : np.array (,Nf)
frequency in GHz
method : 'deygout' | 'bullington'
Returns
-------
L : Losses (dB)
"""
Nphi, Nr = Z.shape
if (type(fGHz) == float):
fGHz = np.array([fGHz])
Nf = len(fGHz)
L = np.zeros((Nphi, Nf))
L0 = np.zeros(Nf)
# loop over azimut
for ip in range(Nphi):
x = X[ip, :]
y = Y[ip, :]
z = Z[ip, :]
d = np.sqrt((x-x[0])**2+(y-y[0])**2)
# effect of refraction in equivalent earth curvature
dh = d*(d[::-1])/(2*K*6375e3)
z = z + dh
LOS = 32.4 + 20*np.log10(fGHz) + 20*np.log10(d[-1])
z[0] = z[0] + Ha
z[-1] = z[-1] + Hb
if method == 'deygout':
LDiff = deygout(d, z, fGHz, L0, 0)
if method == 'bullington':
LDiff, deq, heq = bullington(d, z, fGHz)
L[ip, :] = LDiff+LOS
return(L)
def cover(X, Y, Z, Ha, Hb, fGHz, K, method='deygout'):
""" outdoor coverage on a region
Parameters
----------
X : np.array (Nphi,Nr)
cartesian coordinate grid
Y : np.array (Nphi,Nr)
cartesian coordinate grid
Z : np.array (Nphi,Nr)
height (meters)
Ha : float
Hb : float
fGHz : np.array (,Nf)
frequency in GHz
method : 'deygout' | 'bullington'
Returns
-------
L : Losses (dB)
"""
Nphi, Nr = Z.shape
if (type(fGHz) == float):
fGHz = np.array([fGHz])
Nf = len(fGHz)
L = np.zeros((Nphi, Nr, Nf))
L0 = np.zeros(Nf)
# loop over azimut
for ip in range(Nphi):
# loop over range
# il : 2 ... Nr-2
# uk : 0 ....Nr-1
for il in np.arange(2, Nr-1):
uk = np.arange(0, il+1)
z = np.empty(len(uk))
x = X[ip, uk]
y = Y[ip, uk]
z[uk] = Z[ip, uk]
d = np.sqrt((x-x[0])**2+(y-y[0])**2)
# effect of refraction in equivalent earth curvature
dh = d*(d[::-1])/(2*K*6375e3)
z = z + dh
LOS = 32.4 + 20*np.log10(fGHz) + 20*np.log10(d[-1])
z[0] = z[0] + Ha
z[-1] = z[-1] + Hb
if method == 'deygout':
LDiff = deygout(d, z, fGHz, L0, 0)
if method == 'bullington':
LDiff, deq, heq = bullington(d, z, fGHz)
L[ip, il, :] = LDiff[None, :]+LOS[None,:]
return(L)
def deygout(d, height, fGHz, L, depth):
""" Deygout attenuation
Parameters
----------
d : np.array (,N)
horizontal distance
height : np.array (,N)
height profile
fGHz : np.array (,Nf)
frequency GHz
L : np.array (,Nf)
Additional Loss
depth : recursive depth
Notes
-----
This function is recursive
"""
lmbda = 0.3/fGHz
L0 = np.zeros(len(fGHz))
depth = depth+1
N = len(height)
if depth < 3:
if N > 3:
u = np.arange(N)/(N-1.0) # float
# l : straight line between termination (LOS)
l = (height[0])*(1-u)+(height[-1])*u
# h excludes termination points
h = height[1:-1] - l[1:-1]
# Fresnel parameter (engagement)
nu = h[:, None] * np.sqrt((2/lmbda[None, :]) *
(1/d[1:-1, None]+1/(d[-1]-d[1:-1, None])))
imax = np.unique(np.nanargmax(nu, axis=0))[0]
numax = nu[imax, :]
else:
numax = -10*np.ones(len(fGHz))
if (numax > -0.78).any():
w = numax - 0.1
L = L + np.maximum(6.9 + 20*np.log10(np.sqrt(w**2+1)+w), 0)
# left link
height1 = height[0:imax+2]
d1 = d[0:imax+2]
Ll = deygout(d1, height1, fGHz, L0, depth)
# right link
height2 = height[imax+1:]
d2 = d[imax+1:]
Lr = deygout(d2, height2, fGHz, L0, depth)
# add losses
L = L + Lr + Ll
return(L)
def bullington(d, height, fGHz):
""" edges attenuation with Bullington method
Parameters
----------
d : np.array
height : np.array
antenna height is includes in height[0] and height[-1]
fGHz : np.array
Returns
-------
L : np.array
total loss
"""
def recl(d, height):
""" determine left interception point
Parameters
----------
d : np.array
height : np.array
"""
N = len(height)
u = np.arange(N)/(N-1.)
# l : straight line between termination (LOS)
l = height[0]*(1-u)+(height[-1])*u
h = height - l
# imax : index of the maximum height offset
imax = np.argmax(h)
if imax>0:
# hmax : maximum height offset
hmax = h[imax]
# parameterization from 0 to imax
ul = np.arange(imax)/(imax-1.)
# straight line
dhl = h[0]*(1-ul) + hmax*ul
# el : offset if <0 split again
el = dhl - h[0:imax]
if np.min(el) < 0:
u, v = recl(d[0:imax+1], height[0:imax+1])
else:
u = d[0:imax+1]
v = h[0:imax+1]
else:
u = d[0:1]
v = d[0:1]
return(u, v)
#if min(er)<0:
# u,v = rec(d[imax-1:-1],dhl)
#else:
#er = dhr - h[imax-1:-1]
def recr(d, height):
""" determine the right interception point
"""
N = len(height)
u = np.arange(N)/(N-1.)
l = height[0]*(1-u)+(height[-1])*u
h = height - l
imax = np.argmax(h)
hmax = h[imax]
ur = np.arange(N-imax)/(N-imax-1.)
dhr = hmax*(1-ur) + h[-1]*ur
er = dhr - h[imax:]
if np.min(er) < 0:
u, v = recr(d[imax:],h[imax:])
else:
u = d[imax:]
v = h[imax:]
return(u,v)
#if min(er)<0:
# u,v = rec(d[imax-1:-1],dhl)
#else:
#er = dhr - h[imax-1:-1]
lmbda = 0.3/fGHz
u = np.arange(len(height))/(len(height)-1.)
l = (height[0])*(1-u)+(height[-1])*u
h = height - l
if (h>0).any():
ul, vl = recl(d, height)
ur, vr = recr(d, height)
idtx = len(ul)
idrx = len(h) - len(ur)
dtx = d[idtx]
drx = d[-1]-d[idrx]
htx = h[idtx-1]
hrx = h[idrx]
deq = (dtx*hrx)*d[-1]/(drx*htx+dtx*hrx)
heq = deq*(htx/dtx)
else:
heq = -np.min(np.abs(h[1:-1]))
ieq = np.where(h==heq)[0][0]
deq = d[ieq]
nu = heq*np.sqrt((2/lmbda)*(1/deq+1/(d[-1]-deq)))
w = nu - 0.1
L = np.maximum(6.9 + 20*np.log10(np.sqrt(w**2+1)+w), 0)
return(L, deq, heq)
def two_rays_flatearth(fGHz, **kwargs):
"""
Parameters
----------
p0 : transmitter position
(3 x Np1) array or (2,) array
p1 : receiver position
(3 x Np2) array or (2,) array
OR :
d : distance between Tx and Rx
(Np1,)
ht : Tx height
hr : Rx height
(Np1)
GtdB : float (0)
Transmitter Antenna Gain (dB)
GrdB : float(0)
Receiver Antenna Gain (dB)
fGHz : float (2.4)
frequency (GHz)
gamma : complex (-1.+0.j)
Reflexion coeff
dB : boolean (True)
return result in d
Returns
-------
P :
received power
Examples
--------
.. plot::
:include-source:
>>> from pylayers.antprop.loss import *
>>> NPT=10000
>>> x=np.array([0,0,8])
>>> x=x.reshape(3,1)
>>> y = np.ones((3,NPT))
>>> y[0,:]=0
>>> y[1,:]=np.arange(NPT)
>>> y[2,:]=2
>>> g0=1
>>> g1=1
>>> fGHz=2.4
>>> PL2R=two_rays_flatearth(p0=x,p1=y,fGHz=fGHz,GtdB=g0,GrdB=g1)
>>> PL1R = PL(fGHz,x,y,2)
>>> plt.semilogx(PL2R,label='two-ray model')
>>> plt.semilogx(-PL1R[0,:],label='one slope model')
>>> plt.axis([10,NPT,-150,-50])
>>> plt.title('Loss 2-rays model vs one slope model')
>>> plt.xlabel('distance (m)')
>>> plt.ylabel('Loss Pr/Pt (dB)')
>>> plt.legend()
>>> plt.show()
>>> d=np.arange(1,1000)
>>> PL2Rd = two_rays_flatearth(d=d,ht=np.array([5]),hr=np.array([10]),fGHz=fGHz,GtdB=g0,GrdB=g1)
>>> plt.semilogx(PL2Rd,label='two-ray model')
>>> plt.semilogx(-PL1R[0,:],label='one slope model')
>>> plt.axis([10,NPT,-150,-50])
>>> plt.title('Loss 2-rays model vs one slope model')
>>> plt.xlabel('distance (m)')
>>> plt.ylabel('Loss Pr/Pt (dB)')
>>> plt.legend()
>>> plt.show()
References
----------
https://en.wikipedia.org/wiki/Two-ray_ground-reflection_model#As_a_case_of_log_distance_path_loss_model
http://morse.colorado.edu/~tlen5510/text/classwebch3.html#x15-590003.3.3
"""
defaults = { 'p0':np.array((0,0,10)),
'p1':np.array((0,10,10)),
'd':[],
'ht':10,
'hr':10,
'GtdB':0.,
'GrdB':0.,
'gamma': -1.+0.j,
'pol':'v',
'eps' :[],
'sig':0.,
'dB':True
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
GtdB=kwargs.pop('GtdB')
GrdB=kwargs.pop('GrdB')
Gt = 10**((1.*GtdB)/10.)
Gr = 10**((1.*GrdB)/10.)
gamma=kwargs.pop('gamma')
pol=kwargs.pop('pol')
eps=kwargs.pop('eps')
sig=kwargs.pop('sig')
if kwargs['d'] == []:
p0=kwargs['p0']
p1=kwargs['p1']
assert p0.shape[0] == 3, 'p0 is not 3D'
assert p1.shape[0] == 3, 'p1 is not 3D'
if len(p0.shape) == 1:
p0=p0.reshape(p0.shape[0],1)
if len(p1.shape) == 1:
p1=p1.reshape(p1.shape[0],1)
p0=1.*p0
p1=1.*p1
ht = p0[2,:]
hr = p1[2,:]
dloss = np.sqrt(np.sum((p0-p1)**2,axis=0)) #l0
else:
dloss=kwargs['d']
ht=kwargs['ht']
hr=kwargs['hr']
Gt = 10**((1.*Gt)/10.)
Gr = 10**((1.*Gr)/10.)
d0 = np.sqrt( dloss**2 - 1.*(ht-hr)**2 ) # d0
dref = np.sqrt(d0**2+1.*(ht+hr)**2) #l0'
if eps != []:
psy = np.arcsin((ht+hr)/dref)
er = eps - 60.j*sig*0.3/fGHz
if pol == 'v':
Z = (1./er)* np.sqrt(er-np.cos(psy)**2)
elif pol == 'h':
Z = np.sqrt(er-np.cos(psy)**2)
gamma = (np.sin(psy)-Z)/((np.sin(psy)+Z))
deltad = dref-dloss
deltaphi = (2*np.pi*fGHz*deltad)/0.3
E= (0.3/(4*np.pi*fGHz)) * (np.sqrt(Gt*Gr)/dloss + gamma * np.sqrt(Gr*Gr)*(np.exp(-1.j*deltaphi))/dref)
P = abs(E)**2
# import ipdb
# ipdb.set_trace()
if kwargs['dB'] :
return 10*np.log10(P)
else:
return P
def lossref_compute(P,h0,h1,k=4/3.) :
"""
compute loss and reflection rays on curved earth
Parameters
----------
P : float |list
if len(P) == 1 => P is a distance
if len(P) == 4 => P is a list of [lon0,lat0,lon1,lat1]
where :
lat0 : float |string
latitude first point (decimal |deg min sec Direction)
lat1 : float |string
latitude second point (decimal |deg min sec Direction)
lon0 : float |string
longitude first point (decimal |deg min sec Direction)
lon1 : float |string
longitude second point (decimal |deg min sec Direction)
h0 : float:
height of 1st point
h1 : float:
height of 2nd point
k : electromagnetic earth factor
Returns
-------
dloss : float
length of direct path (meter)
dref : float
length of reflective path (meter)
psy : float
Reflection angle
References
----------
B. R. Mahafza, Radar systems analysis and design using MATLAB, Third edition. Boca Raton; London: CRC/Taylor & Francis, chapter 8, 2013.
"""
if isinstance(P,float) or isinstance(P,int) :
#P is a distance
r=P
mode = 'dist'
elif isinstance(P,np.ndarray) or isinstance(P,list):
if len(P) == 1:
#P is a distance
r=P
mode = 'dist'
elif len(P) == 4:
#P is a lonlat
lat0=P[0]
lon0=P[1]
lat1=P[2]
lon1=P[2]
mode = 'lonlat'
else :
raise AttributeError('P must be a list [lat0,lon0,lat1,lon0] or a distance')
else :
raise AttributeError('Invalid P format ( list |ndarray )')
# if h0<h1:
# h1,h0 = h0,h1
r0 = 6371.e3 # earth radius
re = k*r0 # telecom earth radius
if mode == 'lonlat':
# r = distance curvilignenp.arcsin((h1/R1)-R1/(2.*re)) entre TXetRX / geodesic
r = gu.distance_on_earth(lat0, lon0, lat1, lon1)
else :
r=P
r=1.*r
# import ipdb
# ipdb.set_trace()
p = 2/(np.sqrt(3))*np.sqrt(re*(h0+h1)+(r**2/4.)) #eq 8.45
eps = np.arcsin(2*re*r*(h1-h0)/p**3) # eq 8.46
#distance of reflection on curved earth
r1 = r/2 - p*np.sin(eps/3) #eq 8.44
r2 = r -r1
phi1 = r1/re #8.47
phi2 = r2/re # 8.48
R1 = np.sqrt(h0**2+4*re*(re+h0)*(np.sin(phi1/2))**2) # 8.51
R2 = np.sqrt(h1**2+4*re*(re+h1)*(np.sin(phi2/2))**2) #8.52
Rd = np.sqrt((h1-h0)**2+4*(re+h1)*(re+h0)*np.sin((phi1+phi2)/2.)**2) # 8.53
# tangente angle on earth
psy = np.arcsin((h1/R1)-R1/(2.*re)) #eq 8.55
deltaR = 4*R1*R2*np.sin(psy)**2/(R1+R2+Rd)
dloss = Rd
dref = R1+R2
return psy,dloss,dref
def two_rays_curvedearthold(P,h0,h1,fGHz=2.4,**kwargs):
"""
Parameters
----------
P : float |list
if len(P) == 1 => P is a distance
if len(P) == 4 => P is a list of [lon0,lat0,lon1,lat1]
where :
lat0 : float |string
latitude first point (decimal |deg min sec Direction)
lat1 : float |string
latitude second point (decimal |deg min sec Direction)
lon0 : float |string
longitude first point (decimal |deg min sec Direction)
lon1 : float |string
longitude second point (decimal |deg min sec Direction)
h0 : float:
height of 1st point
h1 : float:
height of 2nd point
fGHz : float
frequency (GHz)
k : float
electromagnetic earth factor
GtdB : float
Transmitter Antenna Gain (dB)
GrdB : float
Receiver Antenna Gain (dB)
gamma : complex (-1.+0.j)
Reflexion coeff if eps and sig are not precised
'pol': string ('v')
polarization ('v'|'h')
'eps' : float ([])
lossless relative permittivity [],
'sig': float (0.)
conductivity
dB : boolean (True)
return result in dB
Returns
-------
P :
received power
Examples
--------
.. plot::
:include-source:
>>> from pylayers.antprop.loss import *
>>> import matplotlib.pyplot as plt
>>> fGHz=2.4
>>> p0=np.array(([0,0,20]))
>>> p1=np.array(([0,1,20]))
>>> p0=p0.reshape(3,1)
>>> p1=p1.reshape(3,1)
>>> TRF = [] #Two Ray model on flat earth
>>> TRC = [] #Two Ray model on curved earth
>>> PLoss=[]
>>> for d in np.arange(1,10000,1):
>>> p1[1,:]=d
>>> TRF.append(two_rays_flatearth(p0[:,0],p1[:,0],fGHz,GtdB=0.,GrdB=0.,))
>>> TRC.append(two_rays_curvedearth(d,p0[2,:],p1[2,:],fGHz))
>>> PLoss.append(PL(fGHz, p0[:,0],p1[:,0], n=2.0, dB=True, d0=np.array([1])))
>>> PLoss=np.array(PLoss)[:,0,0]
>>> plt.semilogx(TRF,label='two-rays model flat earth')
>>> plt.semilogx(TRC,label='two-rays model curved earth')
>>> plt.semilogx(-PLoss,label='Path Loss')
>>> plt.legend()
>>> plt.show()
"""
defaults = { 'GtdB':0.,
'GrdB':0.,
'k':4/3.,
'gamma': -1.+0.j,
'pol':'v',
'eps' :[],
'sig':0.,
'mode':'PL',
'dB':True
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
GtdB=kwargs.pop('GtdB')
GrdB=kwargs.pop('GrdB')
Gt = 10**((1.*GtdB)/10.)
Gr = 10**((1.*GrdB)/10.)
k=kwargs.pop('k')
gamma=kwargs.pop('gamma')
pol=kwargs.pop('pol')
eps=kwargs.pop('eps')
sig=kwargs.pop('sig')
h0=1.*h0
h1=1.*h1
psy,dloss,dref = lossref_compute(P,h0,h1,k)
if eps != []:
er = eps - 60.j*sig*0.3/fGHz
if pol == 'v':
Z= (1./er)* np.sqrt(er-np.cos(psy)**2)
elif pol == 'h':
Z= np.sqrt(er-np.cos(psy)**2)
gamma = (np.sin(psy)-Z)/((np.sin(psy)+Z))
deltad= dref-dloss
deltaphi = (2*np.pi*fGHz*deltad)/0.3
E= (0.3/(4*np.pi*fGHz) ) *(np.sqrt(Gt*Gr)/dloss + gamma * np.sqrt(Gr*Gr)*(np.exp(-1.j*deltaphi))/dref)
P = abs(E)**2
# import ipdb
# ipdb.set_trace()
if kwargs['dB'] :
return 10*np.log10(P)
else:
return P
def visuPts(S,nu,nd,Pts,Values,fig=[],sp=[],vmin=0,vmax=-1,label=' ',tit='',size=25,colbar=True,xticks=False):
""" visuPt : Visualization of values a given points
Parameters
----------
S : Simul
nu : useful Points
nd : Points deleted
Pts : Points coordinates
Value
"""
vx = Pts[nu,0]
vy = Pts[nu,1]
vxd = Pts[nd,0]
vyd = Pts[nd,1]
if vmax<vmin:
#vmin = min(Values)
vmin = -150
vmax = max(Values)
S.L.showGs()
if xticks:
for loc, spine in sp.spines.iteritems():
if loc in ['left','bottom']:
spine.set_position(('outward',10)) # outward by 10 points
sp.yaxis.set_ticks_position('left')
sp.xaxis.set_ticks_position('bottom')
elif loc in ['right','top']:
spine.set_color('none') # don't draw spine
else:
raise ValueError('unknown spine location: %s'%loc)
else:
for loc, spine in sp.spines.iteritems():
if loc in ['left']:
spine.set_position(('outward',10)) # outward by 10 points
sp.yaxis.set_ticks_position('left')
elif loc in ['right','top','bottom']:
spine.set_color('none') # don't draw spine
sp.xaxis.set_ticks([])
else:
raise ValueError('unknown spine location: %s'%loc)
# no xaxis ticks
#ax.xaxis.set_ticks([])
#sp.spines['left'].set_position('center')
#sp.spines['left'].set_color('none')
#sp.spines['right'].set_position('center')
#sp.spines['right'].set_color('none')
#sp.spines['bottom'].set_position('center')
#sp.xaxis.set_ticks_position('bottom')
#sp.yaxis.set_ticks_position('left')
#sp.spines['bottom'].set_color('none')
#sp.spines['top'].set_position('center')
#sp.spines['top'].set_color('none')
#
# Rooms annotation
#
annotate('R 8',xy=(-19,14.1))
annotate('R 9',xy=(-24.5,6.5))
annotate('R 14',xy=(-20,6.5))
annotate('R 10',xy=(-16.5,6.5))
annotate('R 7',xy=(-10.5,14.1))
annotate('R 11',xy=(-2.5,13.5))
annotate('R 12',xy=(-8.7,6.5))
annotate('R 13',xy=(-5.2,14.5))
annotate('R 1',xy=(3.5,8))
annotate('R 2',xy=(1.5,13.8))
annotate('R 6',xy=(-3.6,6.5))
n=scatter(vx,vy,marker='o',c=Values,s=size,vmin=vmin,vmax=vmax)
n.set_edgecolor('face')
# m=scatter(vxd,vyd,marker='o',c='k',s=size)
# m.set_edgecolor('face')
axis('scaled')
title(tit)
ylabel('meters')
if xticks:
xlabel('meters')
if colbar:
cbar=colorbar(orientation='vertical')
cbar.set_label(label)
def cdf(x,colsym="",lab="",lw=4):
""" plot the cumulative density function
Parameters
----------
x : np.array()
colsym : string
lab : string
lw : int
linewidth
Examples
--------
.. plot::
:include-source:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> x = np.random.randn(10000)
>>> cdf(x)
>>> plt.show()
"""
rcParams['legend.fontsize']=20
rcParams['font.size']=20
x = np.sort(x)
n = len(x)
x2 = np.repeat(x, 2)
y2 = np.hstack([0.0, repeat(np.arange(1,n) / float(n), 2), 1.0])
plt.plot(x2,y2,colsym,label=lab,linewidth=lw)
plt.grid('on')
plt.legend(loc=2)
plt.xlabel('Ranging Error[m]')
plt.ylabel('Cumulative Probability')
if __name__=="__main__":
doctest.testmod()
| mit |
Mega-DatA-Lab/mxnet | docs/conf.py | 20 | 6941 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
import sys, os, re, subprocess
import mock
from recommonmark import parser
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
libpath = os.path.join(curr_path, '../python/')
sys.path.insert(0, libpath)
sys.path.insert(0, curr_path)
# -- mock out modules
MOCK_MODULES = ['scipy', 'scipy.sparse', 'sklearn']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# General information about the project.
project = u'mxnet'
author = u'%s developers' % project
copyright = u'2015-2017, %s' % author
github_doc_root = 'https://github.com/dmlc/mxnet/tree/master/docs/'
doc_root = 'http://mxnet.io/'
# add markdown parser
source_parsers = {
'.md': parser.CommonMarkParser,
'.Rmd': parser.CommonMarkParser
}
# Version information.
# from mxnet import libinfo
# version = libinfo.__version__
# release = libinfo.__version__
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'breathe',
'mxdoc'
]
# Use breathe to include doxygen documents
breathe_projects = {'mxnet' : 'doxygen/xml/'}
breathe_default_project = 'mxnet'
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
# source_suffix = '.rst'
source_suffix = ['.rst', '.md', '.Rmd']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Version and release are passed from CMake.
#version = None
# The full version, including alpha/beta/rc tags.
#release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['virtualenv']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
suppress_warnings = [
'image.nonlocal_uri',
]
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'mxnet-theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_static']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': 'relations.html'
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'formatdoc'
| apache-2.0 |
ElaineHuang/elite-app | machine_learning/simulation.py | 1 | 28815 | import numpy as np
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
#含 bias
def add_layer(inputs, in_size, out_size, n_layer,activation_function = None):
layer_name = 'layer%s' %n_layer
with tf.name_scope("layer_name"):
with tf.name_scope("weights"):
Weights = tf.Variable(tf.random_normal([in_size,out_size]),name='W')
tf.summary.histogram(layer_name+'/weights',Weights)
with tf.name_scope("biases"):
biases = tf.Variable(tf.zeros([1, out_size])+ 0.1,name='b')
tf.summary.histogram(layer_name+'/biases',biases)
with tf.name_scope("Wx_plus_b"):
Wx_plus_b = tf.add(tf.matmul(inputs, Weights), biases)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
tf.summary.histogram(layer_name+'/outputs',outputs)
return outputs
#不含 bias
"""
def add_layer(inputs, in_size, out_size, n_layer,activation_function = None):
layer_name = 'layer%s' %n_layer
with tf.name_scope("layer_name"):
with tf.name_scope("weights"):
Weights = tf.Variable(tf.random_normal([in_size,out_size]),name='W')
tf.summary.histogram(layer_name+'/weights',Weights)
with tf.name_scope("Wx_plus_b"):
Wx_plus_b = tf.matmul(inputs, Weights)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
tf.summary.histogram(layer_name+'/outputs',outputs)
return outputs
"""
#print(newdata)
def simulation():
#df=pd.DataFrame(pd.read_excel('M023shutdownErr_300.xlsx'))
df=pd.DataFrame(pd.read_excel('M023shutdownErr_200.xlsx'))
df_simulation=pd.DataFrame(pd.read_excel('simulationErr.xlsx'))
print(df)
print(df.shape)
print(len(df)) # 顯示有多少列
print(df['ErrCode']) #會印出整行ErrCode
for i in range(len(df)):
if df['ErrCode'][i] == 'E620002L':
df['ErrCode'][i] = 54
elif df['ErrCode'][i] == 'E620002R':
df['ErrCode'][i] = 55
elif df['ErrCode'][i] == 'E620003L':
df['ErrCode'][i] = 56
elif df['ErrCode'][i] == 'E620003R':
df['ErrCode'][i] = 57
elif df['ErrCode'][i] == 'E551010L':
df['ErrCode'][i] = 58
elif df['ErrCode'][i] == 'E551010R':
df['ErrCode'][i] = 59
#54~59是要剔除的資料
elif df['ErrCode'][i] == 'E632237L': #0.996812
df['ErrCode'][i] = 1
elif df['ErrCode'][i] == 'E632233L': #0.999894
df['ErrCode'][i] = 2
elif df['ErrCode'][i] == 'E632501R': #0.625135
df['ErrCode'][i] = 3
elif df['ErrCode'][i] == 'E632234L': #0.769584
df['ErrCode'][i] = 4
elif df['ErrCode'][i] == 'E551502R': #0.527601
df['ErrCode'][i] = 5
elif df['ErrCode'][i] == 'E632104L': #0.989905
df['ErrCode'][i] = 6
elif df['ErrCode'][i] == 'E632434R': #0.596713
df['ErrCode'][i] = 7
elif df['ErrCode'][i] == 'E632201L': #0.956136
df['ErrCode'][i] = 8
elif df['ErrCode'][i] == 'E632102L': #0.334927
df['ErrCode'][i] = 9
elif df['ErrCode'][i] == 'E551092L': #0.68981
df['ErrCode'][i] = 10
elif df['ErrCode'][i] == 'E632307R': #0.186271
df['ErrCode'][i] = 11
elif df['ErrCode'][i] == 'E530020L': #0.51157
df['ErrCode'][i] = 12
elif df['ErrCode'][i] == 'E632107L': #0.967166
df['ErrCode'][i] = 13
elif df['ErrCode'][i] == 'E632108L': #0.00011
df['ErrCode'][i] = 14
elif df['ErrCode'][i] == 'E610045L': #0.27513
df['ErrCode'][i] = 15
elif df['ErrCode'][i] == 'E632405R': #0.630326
df['ErrCode'][i] = 16
elif df['ErrCode'][i] == 'E620005L':
df['ErrCode'][i] = 17
elif df['ErrCode'][i] == 'E620007L':
df['ErrCode'][i] = 18
elif df['ErrCode'][i] == 'E551250L':
df['ErrCode'][i] = 19
elif df['ErrCode'][i] == 'E610056R':
df['ErrCode'][i] = 20
elif df['ErrCode'][i] == 'E600003R':
df['ErrCode'][i] = 21
elif df['ErrCode'][i] == 'E600003L':
df['ErrCode'][i] = 22
elif df['ErrCode'][i] == 'E632236L':
df['ErrCode'][i] = 23
elif df['ErrCode'][i] == 'E551092R':
df['ErrCode'][i] = 24
elif df['ErrCode'][i] == 'E632436R':
df['ErrCode'][i] = 25
elif df['ErrCode'][i] == 'E551502L':
df['ErrCode'][i] = 26
elif df['ErrCode'][i] == 'E551506R':
df['ErrCode'][i] = 27
elif df['ErrCode'][i] == 'E632103L':
df['ErrCode'][i] = 28
elif df['ErrCode'][i] == 'E530020R':
df['ErrCode'][i] = 29
elif df['ErrCode'][i] == 'E530030L':
df['ErrCode'][i] = 30
elif df['ErrCode'][i] == 'E605179R':
df['ErrCode'][i] = 31
elif df['ErrCode'][i] == 'E551506L':
df['ErrCode'][i] = 32
elif df['ErrCode'][i] == 'E632301R':
df['ErrCode'][i] = 33
elif df['ErrCode'][i] == 'E530024R':
df['ErrCode'][i] = 34
elif df['ErrCode'][i] == 'E530025R':
df['ErrCode'][i] = 35
elif df['ErrCode'][i] == 'E610054L':
df['ErrCode'][i] = 36
elif df['ErrCode'][i] == 'E600102R':
df['ErrCode'][i] = 37
elif df['ErrCode'][i] == 'E632304R':
df['ErrCode'][i] = 38
elif df['ErrCode'][i] == 'E600102L':
df['ErrCode'][i] = 39
elif df['ErrCode'][i] == 'E632504R':
df['ErrCode'][i] = 40
elif df['ErrCode'][i] == 'E610045R':
df['ErrCode'][i] = 41
elif df['ErrCode'][i] == 'E632205L':
df['ErrCode'][i] = 42
elif df['ErrCode'][i] == 'E632433R':
df['ErrCode'][i] = 43
elif df['ErrCode'][i] == 'E530041R':
df['ErrCode'][i] = 44
elif df['ErrCode'][i] == 'E530014R':
df['ErrCode'][i] = 45
elif df['ErrCode'][i] == 'E632507R':
df['ErrCode'][i] = 46
elif df['ErrCode'][i] == 'E530041L':
df['ErrCode'][i] = 47
elif df['ErrCode'][i] == 'E601101R':
df['ErrCode'][i] = 48
elif df['ErrCode'][i] == 'E551001L':
df['ErrCode'][i] = 49
elif df['ErrCode'][i] == 'E551001R':
df['ErrCode'][i] = 50
elif df['ErrCode'][i] == 'E632101L':
df['ErrCode'][i] = 51
elif df['ErrCode'][i] == 'E620004L':
df['ErrCode'][i] = 52
elif df['ErrCode'][i] == 'E620007R':
df['ErrCode'][i] = 53
#模擬資料
for i in range(len(df_simulation)):
if df_simulation['ErrCode'][i] == 'E620002L':
df_simulation['ErrCode'][i] = 54
elif df_simulation['ErrCode'][i] == 'E620002R':
df_simulation['ErrCode'][i] = 55
elif df_simulation['ErrCode'][i] == 'E620003L':
df_simulation['ErrCode'][i] = 56
elif df_simulation['ErrCode'][i] == 'E620003R':
df_simulation['ErrCode'][i] = 57
elif df_simulation['ErrCode'][i] == 'E551010L':
df_simulation['ErrCode'][i] = 58
elif df_simulation['ErrCode'][i] == 'E551010R':
df_simulation['ErrCode'][i] = 59
#54~59是要剔除的資料
elif df_simulation['ErrCode'][i] == 'E632237L': #0.996812
df_simulation['ErrCode'][i] = 1
elif df_simulation['ErrCode'][i] == 'E632233L': #0.999894
df_simulation['ErrCode'][i] = 2
elif df_simulation['ErrCode'][i] == 'E632501R': #0.625135
df_simulation['ErrCode'][i] = 3
elif df_simulation['ErrCode'][i] == 'E632234L': #0.769584
df_simulation['ErrCode'][i] = 4
elif df_simulation['ErrCode'][i] == 'E551502R': #0.527601
df_simulation['ErrCode'][i] = 5
elif df_simulation['ErrCode'][i] == 'E632104L': #0.989905
df_simulation['ErrCode'][i] = 6
elif df_simulation['ErrCode'][i] == 'E632434R': #0.596713
df_simulation['ErrCode'][i] = 7
elif df_simulation['ErrCode'][i] == 'E632201L': #0.956136
df_simulation['ErrCode'][i] = 8
elif df_simulation['ErrCode'][i] == 'E632102L': #0.334927
df_simulation['ErrCode'][i] = 9
elif df_simulation['ErrCode'][i] == 'E551092L': #0.68981
df_simulation['ErrCode'][i] = 10
elif df_simulation['ErrCode'][i] == 'E632307R': #0.186271
df_simulation['ErrCode'][i] = 11
elif df_simulation['ErrCode'][i] == 'E530020L': #0.51157
df_simulation['ErrCode'][i] = 12
elif df_simulation['ErrCode'][i] == 'E632107L': #0.967166
df_simulation['ErrCode'][i] = 13
elif df_simulation['ErrCode'][i] == 'E632108L': #0.00011
df_simulation['ErrCode'][i] = 14
elif df_simulation['ErrCode'][i] == 'E610045L': #0.27513
df_simulation['ErrCode'][i] = 15
elif df_simulation['ErrCode'][i] == 'E632405R': #0.630326
df_simulation['ErrCode'][i] = 16
elif df_simulation['ErrCode'][i] == 'E620005L':
df_simulation['ErrCode'][i] = 17
elif df_simulation['ErrCode'][i] == 'E620007L':
df_simulation['ErrCode'][i] = 18
elif df_simulation['ErrCode'][i] == 'E551250L':
df_simulation['ErrCode'][i] = 19
elif df_simulation['ErrCode'][i] == 'E610056R':
df_simulation['ErrCode'][i] = 20
elif df_simulation['ErrCode'][i] == 'E600003R':
df_simulation['ErrCode'][i] = 21
elif df_simulation['ErrCode'][i] == 'E600003L':
df_simulation['ErrCode'][i] = 22
elif df_simulation['ErrCode'][i] == 'E632236L':
df_simulation['ErrCode'][i] = 23
elif df_simulation['ErrCode'][i] == 'E551092R':
df_simulation['ErrCode'][i] = 24
elif df_simulation['ErrCode'][i] == 'E632436R':
df_simulation['ErrCode'][i] = 25
elif df_simulation['ErrCode'][i] == 'E551502L':
df_simulation['ErrCode'][i] = 26
elif df_simulation['ErrCode'][i] == 'E551506R':
df_simulation['ErrCode'][i] = 27
elif df_simulation['ErrCode'][i] == 'E632103L':
df_simulation['ErrCode'][i] = 28
elif df_simulation['ErrCode'][i] == 'E530020R':
df_simulation['ErrCode'][i] = 29
elif df_simulation['ErrCode'][i] == 'E530030L':
df_simulation['ErrCode'][i] = 30
elif df_simulation['ErrCode'][i] == 'E605179R':
df_simulation['ErrCode'][i] = 31
elif df_simulation['ErrCode'][i] == 'E551506L':
df_simulation['ErrCode'][i] = 32
elif df_simulation['ErrCode'][i] == 'E632301R':
df_simulation['ErrCode'][i] = 33
elif df_simulation['ErrCode'][i] == 'E530024R':
df_simulation['ErrCode'][i] = 34
elif df_simulation['ErrCode'][i] == 'E530025R':
df_simulation['ErrCode'][i] = 35
elif df_simulation['ErrCode'][i] == 'E610054L':
df_simulation['ErrCode'][i] = 36
elif df_simulation['ErrCode'][i] == 'E600102R':
df_simulation['ErrCode'][i] = 37
elif df_simulation['ErrCode'][i] == 'E632304R':
df_simulation['ErrCode'][i] = 38
elif df_simulation['ErrCode'][i] == 'E600102L':
df_simulation['ErrCode'][i] = 39
elif df_simulation['ErrCode'][i] == 'E632504R':
df_simulation['ErrCode'][i] = 40
elif df_simulation['ErrCode'][i] == 'E610045R':
df_simulation['ErrCode'][i] = 41
elif df_simulation['ErrCode'][i] == 'E632205L':
df_simulation['ErrCode'][i] = 42
elif df_simulation['ErrCode'][i] == 'E632433R':
df_simulation['ErrCode'][i] = 43
elif df_simulation['ErrCode'][i] == 'E530041R':
df_simulation['ErrCode'][i] = 44
elif df_simulation['ErrCode'][i] == 'E530014R':
df_simulation['ErrCode'][i] = 45
elif df_simulation['ErrCode'][i] == 'E632507R':
df_simulation['ErrCode'][i] = 46
elif df_simulation['ErrCode'][i] == 'E530041L':
df_simulation['ErrCode'][i] = 47
elif df_simulation['ErrCode'][i] == 'E601101R':
df_simulation['ErrCode'][i] = 48
elif df_simulation['ErrCode'][i] == 'E551001L':
df_simulation['ErrCode'][i] = 49
elif df_simulation['ErrCode'][i] == 'E551001R':
df_simulation['ErrCode'][i] = 50
elif df_simulation['ErrCode'][i] == 'E632101L':
df_simulation['ErrCode'][i] = 51
elif df_simulation['ErrCode'][i] == 'E620004L':
df_simulation['ErrCode'][i] = 52
elif df_simulation['ErrCode'][i] == 'E620007R':
df_simulation['ErrCode'][i] = 53
newdata=df[(df.ErrCode < 54)] #把54以下的東西萃取出來
print(newdata)
newdata=newdata.pop('ErrCode') #將newdata的ErrCode那行全部彈出給新的newdata
print(newdata)
#print(df['ErrCode'][1]) #可印出單個格子的數值
print(len(newdata))
#print(newdata.loc[481]) 可以用index把值取出來
# 我希望index可以重排 http://sofasofa.24xi.org/forum_main_post.php?postid=1000649
newdata.index = range(len(newdata))
print(newdata)
input_data = pd.DataFrame()
#row_input_data= pd.DataFrame()
#input_data = pd.DataFrame([0]*53)
temp = pd.DataFrame([0]*53)
print(input_data)
stop=[] #紀錄第幾個index停機
#將每次停機做隔開
for i in range(len(newdata)):
if newdata.loc[i]==-1: #使用索引找值方式
stop.append(i) #抓出哪些index是-1
print(stop)
print(stop[0])
print(len(stop))
g=0
select_num=0
end=0
#stop會列出所有停機的index
#產生我們要用的輸入資料
for m in range(len(stop)):
if m >= 1:
end = stop[m-1]+1
for select_num in range(0,stop[m]-end):
#newdata.loc[k]
for k in range(g,g+select_num+1):
for count in range(1,54): #將編好號碼errorcode進行比對看誰有幾個
if newdata.loc[k] == count:
temp.loc[count-1]=temp.loc[count-1]+1 #因為編號是1~53 但是dataframe是0~52
break
temp.loc[53] = (select_num+1)/(stop[m]-end) #計算output的機率
#row_input_data=row_input_data.append(temp[0:53],ignore_index=True) #為了下面需求把input資料都拉成一列
input_data=pd.concat([input_data,temp],axis=1,ignore_index=True) #axis=1往右增加 https://tieba.baidu.com/p/3773675591
temp = pd.DataFrame([0]*53)
g=stop[m]+1
#將當前狀態做整理,才能預測目前危險指數
temp2 = pd.DataFrame([0]*53)
now_status = pd.DataFrame()
for select_num in range(0,len(newdata)-(stop[len(stop)-1]+1)):
for mm in range(stop[len(stop)-1]+1,select_num+1+stop[len(stop)-1]+1):
for count in range(1,54):
if newdata.loc[mm] == count:
temp2.loc[count-1] = temp2.loc[count-1]+1
break
now_status = pd.concat([now_status,temp2],axis=1,ignore_index=True)
temp2 = pd.DataFrame([0]*53)
now_status = now_status.T
print(input_data)
output_data = input_data.loc[53] #將輸出資料獨立出來
input_data = input_data.drop([53]) #將輸入資料獨立出來
output_data = output_data.T
input_data = input_data.T #一列一列排下去 一列有53個
#該把input_data所有的特徵做特徵正規化,也把當前狀態做正規化
print(input_data)
input_data_normal = (input_data) / (input_data.max() - input_data.min())
input_data_normal = input_data_normal.fillna(0) #有些特徵從來沒有數值所以分母在正規化會變成零就會變成NaN
now_status_normalize = now_status / (input_data.max() - input_data.min())
now_status_normalize = now_status_normalize.fillna(0)
upbound = input_data.max() #將上下界存起來後面測試資料要用
lowbound = input_data.min()
print(input_data_normal)
#print(row_input_data) #輸入資料獨立出來但是變成一整列 沒用到
print(output_data) #輸出資料
#現在狀態預測
now_status_in = np.array(now_status_normalize[:])
#模擬資料
simulation_data=df_simulation[(df_simulation.ErrCode < 54)]
simulation_data=simulation_data.pop('ErrCode')
simulation_data.index = range(len(simulation_data))
simulation_input_data = pd.DataFrame()
simulation_temp = pd.DataFrame([0]*53)
simulation_stop=[]
for i in range(len(simulation_data)):
if simulation_data.loc[i]==-1: #使用索引找值方式
simulation_stop.append(i) #抓出哪些index是-1
simulation_g=0
simulation_select_num=0
simulation_end=0
for m in range(len(simulation_stop)):
if m >= 1:
simulation_end = simulation_stop[m-1]+1
for simulation_select_num in range(0,simulation_stop[m]-simulation_end):
#newdata.loc[k]
for k in range(g,g+simulation_select_num+1):
for count in range(1,54): #將編好號碼errorcode進行比對看誰有幾個
if simulation_data.loc[k] == count:
simulation_temp.loc[count-1]=simulation_temp.loc[count-1]+1 #因為編號是1~53 但是dataframe是0~52
simulation_temp.loc[53] = (simulation_select_num+1)/(simulation_stop[m]-simulation_end) #計算output的機率
#row_input_data=row_input_data.append(temp[0:53],ignore_index=True) #為了下面需求把input資料都拉成一列
simulation_input_data=pd.concat([simulation_input_data,simulation_temp],axis=1,ignore_index=True) #axis=1往右增加 https://tieba.baidu.com/p/3773675591
simulation_temp = pd.DataFrame([0]*53)
g=simulation_stop[m]+1
simulation_output_data = simulation_input_data.loc[53] #將輸出資料獨立出來(output)
simulation_input_data = simulation_input_data.drop([53]) #將輸入資料獨立出來
simulation_output_data = simulation_output_data.T
simulation_input_data = simulation_input_data.T #一列一列排下去 一列有53個
simulation_input_data_normal = (simulation_input_data) / (simulation_input_data.max() - simulation_input_data.min())
simulation_input_data_normal = simulation_input_data_normal.fillna(0) #有些特徵從來沒有數值所以分母在正規化會變成零就會變成NaN
simulation_upbound = simulation_input_data.max() #將上下界存起來後面測試資料要用
simulation_lowbound = simulation_input_data.min()
simulation_batch_size = len(simulation_input_data_normal)
simulation_in = np.array(simulation_input_data_normal[:])
simulation_out = np.array(simulation_output_data[:]).astype(np.float32)
simulation_out=np.reshape(simulation_out,(simulation_batch_size,1))
#可調參數
batch_size = len(input_data_normal) #總體樣本 ERR300是69
train_size = 50 #訓練樣本
train_begin = 0 #訓練起始點
test_size = batch_size - train_size #測試樣本
train_input = np.array(input_data_normal[train_begin:train_size])
#train_input = np.array(input_data[train_begin:train_size])
train_output=np.array(output_data[train_begin:train_size]).astype(np.float32)
train_output=np.reshape(train_output,(train_size-train_begin,1))
test_input = np.array(input_data_normal[train_begin+train_size:batch_size][:])
#test_input = np.array(input_data[train_begin+train_size:batch_size][:])
test_output = np.array(output_data[train_begin+train_size:batch_size]).astype(np.float32)
test_output = np.reshape(test_output,(test_size,1))
print(test_input)
#print(test_output)
#print(train_input)
#train_input=np.reshape(train_input,[batch_size,53])[:,np.newaxis]
#np.split(train_output, 1, axis=0)
#print(train_output)
"""
print(input_data.T)
label=np.transpose(input_data)
print(label)
label1=np.array(label)
print(label1)
"""
#print(label1[1][53])
#print(input_data)
#print(input_data[53][53]) #完成第一行
#print(input_data.shape[1])
#--------------------------------------------------
# 機器學習七步驟
#--------------------------------------------------
#步驟1使用
with tf.name_scope("inputs"):
xs = tf.placeholder(tf.float32,[None,53],name='x_input') #1,None代表給多少數字沒關係 後面代表一次輸入多少
ys = tf.placeholder(tf.float32,[None,1],name='y_input') #69[None,1] shape=(69,)
#步驟2 創建layer
input_layer=add_layer(xs, 53, 9, n_layer = 1, activation_function = tf.nn.relu)
hidden_layer1=add_layer(input_layer, 9, 9, n_layer = 2, activation_function = tf.nn.relu6)
hidden_layer2=add_layer(hidden_layer1, 9, 9, n_layer = 3, activation_function = tf.nn.softmax)
output_layer=add_layer(hidden_layer2, 9, 1, n_layer=4, activation_function = None)
#步驟3 loss function
with tf.name_scope("loss"):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-output_layer),reduction_indices=[1]))
tf.summary.scalar('loss',loss)
#步驟3 cross_entropy function
#http://ithelp.ithome.com.tw/articles/10187002
#http://studyai.site/page/3
"""
with tf.name_scope("cross_entropy"):
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(output_layer), reduction_indices=[1]))
#cross_entropy = tf.cost.cross_entropy(output_layer,ys,'myloss')
print(output_layer)
print(ys)
cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output_layer, labels=ys, name='myloss'))
#cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=ys, logits=output_layer, name='entropy'),'entropy')
tf.summary.scalar('cross_entropy',cross_entropy)
"""
#步驟4 訓練次數
with tf.name_scope("train"):
#train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss) #
train_step=tf.train.AdamOptimizer(0.1).minimize(loss) #loss
#train_step=tf.train.GradientDescentOptimizer(0.05).minimize(cross_entropy) #cross_entropy
#train_step=tf.train.AdamOptimizer(0.001).minimize(cross_entropy) #cross_entropy
#步驟5 初始化
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("logss/", sess.graph)
#步驟5.5數據分類
"""
DATA = np.array([], dtype=np.float32)
for ii in range(0,53):
inputdata = label1[ii,:-1]
DATA=np.hstack(inputdata)
print(DATA)
"""
#print(np.random.rand(1024,1024))
#ax = fig.add_subplot(1,1,1)
#
#plt.show() #印上去程序會暫停
#plt.ion() #印完不暫停
#步驟6 開始訓練
for i in range(20000):
sess.run(train_step,feed_dict={xs:train_input,ys:train_output})
if i % 50 == 0:
result = sess.run(merged,feed_dict={xs:train_input,ys:train_output})
writer.add_summary(result,i)
output_ans=sess.run(output_layer,feed_dict={xs:train_input})
#print(i,sess.run(cross_entropy,feed_dict={xs:train_input,ys:train_output}))
see_loss = sess.run(loss,feed_dict={xs:train_input,ys:train_output})
print(i,see_loss)
#plt.plot([output_ans])
#步驟7 存模型
saver = tf.train.Saver()
save_path = saver.save(sess,'modelsave/save.ckpt')
#--------------------------------------------------
# 產出並印出最後一次訓練的結果
#--------------------------------------------------
fig = plt.figure()
t5=np.linspace(1,train_size,train_size)
t6=np.linspace(1,train_size,train_size)
plt.plot(t5, output_ans,'g',linewidth=2)
plt.plot(t6, train_output,'r',linewidth=2)
plt.title("forecast shutdown system(Red:true,G:forecast)")
plt.show()
#print(output_ans)
#print(train_output)
#--------------------------------------------------
# 產出並印出測試資料的結果
#--------------------------------------------------
fig = plt.figure()
test_predict=sess.run(output_layer,feed_dict={xs:test_input})
#plt.scatter(test_predict, test_output)
print(test_input)
# 如果test_predict超過1變1 小於-1變-1
test_predict=np.fmin(1.0, np.fmax(0.0, test_predict)) #http://blog.mwsoft.jp/article/176188532.html
print(test_predict)
# 轉成百分比
# 要帶入人工權重
artificial = np.array([0.996812, 0.999894, 0.625135, 0.769584, 0.527601, 0.989905,
0.596713, 0.956136, 0.334927, 0.68981, 0.186271, 0.51157, 0.967166, 0.00011,
0.27513, 0.630326,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
arti_weight = artificial * test_input
arti_sum = np.sum(arti_weight,axis=1) #axis=1代表同列相加
arti_sum = np.reshape(arti_sum,(len(arti_sum),1))
print(arti_weight)
print(test_predict)
arti_sum=np.fmin(1.0, np.fmax(0.0, arti_sum))
#test_predict = 50 * test_predict + 50 * artificial
test_predict = 100 * test_predict + 0 * arti_sum
test_output = 100 * test_output
#print(test_output)
#t1=[1 2 3 4 5 6 7 8 9 10 11 12 13 14 15]
#t2=[1 2 3 4 5 6 7 8 9 10 11 12 13 14 15]
t1=np.linspace(1,test_size,test_size)
t2=np.linspace(1,test_size,test_size)
plt.plot(t1, test_predict,'g',linewidth=2)
plt.plot(t2, test_output,'r',linewidth=2)
plt.xlabel('status')
plt.ylabel('dangerous percentage(%)')
plt.title("forecast shutdown system(Red:true,G:forecast)")
plt.show()
#--------------------------------------------------
# 產出當前狀態危險指數
#--------------------------------------------------
now_status_predict=sess.run(output_layer,feed_dict={xs:now_status_in})
print('status')
print(now_status_predict)
#--------------------------------------------------
# 帶入個別 error 找出最重要的 error
#--------------------------------------------------
find_important_error = pd.DataFrame(np.random.randn(53,53))
for i in range(53):
for j in range(53):
if i==j:
find_important_error[i][j]=1
else:
find_important_error[i][j]=0
#print(find_important_error)
#正規化
find_important_error = find_important_error/(upbound-lowbound)
print('haha')
print(upbound)
find_important_error = find_important_error.fillna(0)
find_important_error = find_important_error.replace(np.inf,0)
#轉numpy.array
find_important_error = np.array(find_important_error)
#轉tensorflow能吃的出入
fig = plt.figure()
individual_error = sess.run(output_layer,feed_dict={xs:find_important_error})
individual_error=np.fmin(1.0, np.fmax(0.0, individual_error)) #0-1之間
individual_error = individual_error * 100
print(individual_error)
#如果不曾看過的error 輸出結果就是零 現在的問題是輸入都是零卻還是有危險值輸出
# upbound會列出1~53 error code編號在所有當機情況中最多出現的次數
for i in range(53):
if upbound.loc[i] == 0:
individual_error[i]=0
print(individual_error)
t3 = np.linspace(1,53,53)
new_ticks = np.linspace(1,53,53)
plt.plot(t3, individual_error,'g',linewidth=2)
plt.xlim((1, 53))
plt.ylim((0, 100))
plt.xlabel('error code')
plt.ylabel('damage percentage')
plt.xticks(new_ticks)
plt.show()
#--------------------------------------------------
# 產生並印出模擬資料
#--------------------------------------------------
fig = plt.figure()
simulation_predict=sess.run(output_layer,feed_dict={xs:simulation_in})
#plt.scatter(test_predict, test_output)
print(simulation_in)
# 如果test_predict超過1變1 小於-1變-1
simulation_predict=np.fmin(1.0, np.fmax(0.0, simulation_predict)) #http://blog.mwsoft.jp/article/176188532.html
print(simulation_predict)
# 轉成百分比
# 要帶入人工權重
artificial = np.array([0.996812, 0.999894, 0.625135, 0.769584, 0.527601, 0.989905,
0.596713, 0.956136, 0.334927, 0.68981, 0.186271, 0.51157, 0.967166, 0.00011,
0.27513, 0.630326,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
arti_weight = artificial * simulation_in
arti_sum = np.sum(arti_weight,axis=1) #axis=1代表同列相加
arti_sum = np.reshape(arti_sum,(len(arti_sum),1))
print(arti_weight)
print(simulation_predict)
arti_sum=np.fmin(1.0, np.fmax(0.0, arti_sum))
#test_predict = 50 * test_predict + 50 * artificial
simulation_predict = 100 * simulation_predict + 0 * arti_sum
simulation_out = 100 * simulation_out
#print(test_output)
#t1=[1 2 3 4 5 6 7 8 9 10 11 12 13 14 15]
#t2=[1 2 3 4 5 6 7 8 9 10 11 12 13 14 15]
t8=np.linspace(1,simulation_batch_size,simulation_batch_size)
t9=np.linspace(1,simulation_batch_size,simulation_batch_size)
plt.plot(t8, simulation_predict,'g',linewidth=2)
plt.plot(t9, simulation_out,'r',linewidth=2)
plt.xlabel('status')
plt.ylabel('dangerous percentage(%)')
plt.title("forecast shutdown system(Red:true,G:forecast)")
plt.show()
#--------------------------------------------------
# 將 numpy array 存入 csv 檔
#--------------------------------------------------
#c=np.concatenate((individual_error,test_predict,test_output,simulation_predict,simulation_out),axis=1)
s1=pd.DataFrame(individual_error,columns=['individual_error'])
s2=pd.DataFrame(test_predict,columns=['test_predict'])
s3=pd.DataFrame(test_output,columns=['test_output'])
s4=pd.DataFrame(simulation_predict,columns=['simulation_predict'])
s5=pd.DataFrame(simulation_out,columns=['simulation_out'])
s6=pd.DataFrame(now_status_predict,columns=['now_status_predict'])
s1.to_csv('errcode.csv')
s_test = pd.concat([s2,s3],axis=1)
s_test.to_csv('test.csv')
s_simulation = pd.concat([s4,s5],axis=1)
s_simulation.to_csv('simulation.csv')
s6.to_csv('now_status_predict.csv')
#s_all = pd.concat([s1,s2,s3,s4,s5],axis=1)
#s_all.to_csv('generate.csv')
#np.savetxt("foo1.csv", zip(individual_error,test_predict,test_output,simulation_predict,simulation_out), delimiter=',', fmt='%f')
""" 小用法
#for j in range(k[0])
#print(df.columns)
#print(df.values)
#print(df['Situation'].isnull())
#print(df['Situation'].isnull().value_counts())
#df['Situation'].fillna(value=0)
#df['Duration'].replace('NaN','1')
#print(newdata.head(30)) #列出前三十列
""" | apache-2.0 |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/pandas/io/tests/test_pytables.py | 1 | 153844 | import nose
import sys
import os
import warnings
import tempfile
from contextlib import contextmanager
import datetime
import numpy as np
import pandas
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, Index, DatetimeIndex, isnull)
from pandas.io.pytables import (HDFStore, get_store, Term, read_hdf,
IncompatibilityWarning, PerformanceWarning,
AttributeConflictWarning, DuplicateWarning,
PossibleDataLossError, ClosedFileError)
from pandas.io import pytables as pytables
import pandas.util.testing as tm
from pandas.util.testing import (assert_panel4d_equal,
assert_panel_equal,
assert_frame_equal,
assert_series_equal)
from pandas import concat, Timestamp
from pandas import compat, _np_version_under1p7
from pandas.compat import range, lrange, u
from pandas.util.testing import assert_produces_warning
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
# contextmanager to ensure the file cleanup
def safe_remove(path):
if path is not None:
try:
os.remove(path)
except:
pass
def safe_close(store):
try:
if store is not None:
store.close()
except:
pass
def create_tempfile(path):
""" create an unopened named temporary file """
return os.path.join(tempfile.gettempdir(),path)
@contextmanager
def ensure_clean_store(path, mode='a', complevel=None, complib=None,
fletcher32=False):
try:
# put in the temporary path if we don't have one already
if not len(os.path.dirname(path)):
path = create_tempfile(path)
store = HDFStore(path, mode=mode, complevel=complevel,
complib=complib, fletcher32=False)
yield store
finally:
safe_close(store)
if mode == 'w' or mode == 'a':
safe_remove(path)
@contextmanager
def ensure_clean_path(path):
"""
return essentially a named temporary file that is not opened
and deleted on existing; if path is a list, then create and
return list of filenames
"""
try:
if isinstance(path, list):
filenames = [ create_tempfile(p) for p in path ]
yield filenames
else:
filenames = [ create_tempfile(path) ]
yield filenames[0]
finally:
for f in filenames:
safe_remove(f)
# set these parameters so we don't have file sharing
tables.parameters.MAX_NUMEXPR_THREADS = 1
tables.parameters.MAX_BLOSC_THREADS = 1
tables.parameters.MAX_THREADS = 1
def _maybe_remove(store, key):
"""For tests using tables, try removing the table to be sure there is
no content from previous tests using the same table name."""
try:
store.remove(key)
except:
pass
def compat_assert_produces_warning(w,f):
""" don't produce a warning under PY3 """
if compat.PY3:
f()
else:
with tm.assert_produces_warning(expected_warning=w):
f()
class TestHDFStore(tm.TestCase):
def setUp(self):
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.path = 'tmp.__%s__.h5' % tm.rands(10)
def tearDown(self):
pass
def test_factory_fun(self):
try:
with get_store(self.path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(self.path)
try:
with get_store(self.path) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(self.path) as tbl:
self.assertEquals(len(tbl), 1)
self.assertEquals(type(tbl['a']), DataFrame)
finally:
safe_remove(self.path)
def test_conv_read_write(self):
try:
def roundtrip(key, obj,**kwargs):
obj.to_hdf(self.path, key,**kwargs)
return read_hdf(self.path, key)
o = tm.makeTimeSeries()
assert_series_equal(o, roundtrip('series',o))
o = tm.makeStringSeries()
assert_series_equal(o, roundtrip('string_series',o))
o = tm.makeDataFrame()
assert_frame_equal(o, roundtrip('frame',o))
o = tm.makePanel()
assert_panel_equal(o, roundtrip('panel',o))
# table
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
df.to_hdf(self.path,'table',append=True)
result = read_hdf(self.path, 'table', where = ['index>2'])
assert_frame_equal(df[df.index>2],result)
finally:
safe_remove(self.path)
def test_api(self):
# GH4584
# API issue when to_hdf doesn't acdept append AND format args
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path,'df',append=True,format='table')
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
# append to False
df.iloc[:10].to_hdf(path,'df',append=False,format='table')
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path,'df',append=True)
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
# append to False
df.iloc[:10].to_hdf(path,'df',append=False,format='table')
df.iloc[10:].to_hdf(path,'df',append=True)
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',append=False,format='fixed')
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df',append=False,format='f')
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df',append=False)
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df')
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_store(self.path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=True,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
# append to False
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
# formats
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format=None)
assert_frame_equal(store.select('df'),df)
with ensure_clean_path(self.path) as path:
# invalid
df = tm.makeDataFrame()
self.assertRaises(ValueError, df.to_hdf, path,'df',append=True,format='f')
self.assertRaises(ValueError, df.to_hdf, path,'df',append=True,format='fixed')
self.assertRaises(TypeError, df.to_hdf, path,'df',append=True,format='foo')
self.assertRaises(TypeError, df.to_hdf, path,'df',append=False,format='bar')
def test_api_default_format(self):
# default_format option
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
pandas.set_option('io.hdf.default_format','fixed')
_maybe_remove(store,'df')
store.put('df',df)
self.assert_(not store.get_storer('df').is_table)
self.assertRaises(ValueError, store.append, 'df2',df)
pandas.set_option('io.hdf.default_format','table')
_maybe_remove(store,'df')
store.put('df',df)
self.assert_(store.get_storer('df').is_table)
_maybe_remove(store,'df2')
store.append('df2',df)
self.assert_(store.get_storer('df').is_table)
pandas.set_option('io.hdf.default_format',None)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
pandas.set_option('io.hdf.default_format','fixed')
df.to_hdf(path,'df')
with get_store(path) as store:
self.assert_(not store.get_storer('df').is_table)
self.assertRaises(ValueError, df.to_hdf, path,'df2', append=True)
pandas.set_option('io.hdf.default_format','table')
df.to_hdf(path,'df3')
with get_store(path) as store:
self.assert_(store.get_storer('df3').is_table)
df.to_hdf(path,'df4',append=True)
with get_store(path) as store:
self.assert_(store.get_storer('df4').is_table)
pandas.set_option('io.hdf.default_format',None)
def test_keys(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
self.assertEquals(len(store), 5)
self.assert_(set(
store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
with ensure_clean_store(self.path) as store:
repr(store)
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
store.append('e', tm.makePanel())
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001,1,2,0,0)
df['datetime2'] = datetime.datetime(2001,1,3,0,0)
df.ix[3:6,['obj1']] = np.nan
df = df.consolidate().convert_objects()
warnings.filterwarnings('ignore', category=PerformanceWarning)
store['df'] = df
warnings.filterwarnings('always', category=PerformanceWarning)
# make a random group in hdf space
store._handle.createGroup(store._handle.root,'bah')
repr(store)
str(store)
# storers
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
store.append('df',df)
s = store.get_storer('df')
repr(s)
str(s)
def test_contains(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
store['foo/bar'] = tm.makeDataFrame()
self.assert_('a' in store)
self.assert_('b' in store)
self.assert_('c' not in store)
self.assert_('foo/bar' in store)
self.assert_('/foo/bar' in store)
self.assert_('/foo/b' not in store)
self.assert_('bar' not in store)
# GH 2694
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
store['node())'] = tm.makeDataFrame()
self.assert_('node())' in store)
def test_versioning(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
self.assert_(store.root.a._v_attrs.pandas_version == '0.10.1')
self.assert_(store.root.b._v_attrs.pandas_version == '0.10.1')
self.assert_(store.root.df1._v_attrs.pandas_version == '0.10.1')
# write a file and wipe its versioning
_maybe_remove(store, 'df2')
store.append('df2', df)
# this is an error because its table_type is appendable, but no version
# info
store.get_node('df2')._v_attrs.pandas_version = None
self.assertRaises(Exception, store.select, 'df2')
def test_mode(self):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(self.path) as path:
# constructor
if mode in ['r','r+']:
self.assertRaises(IOError, HDFStore, path, mode=mode)
else:
store = HDFStore(path,mode=mode)
self.assert_(store._handle.mode == mode)
store.close()
with ensure_clean_path(self.path) as path:
# context
if mode in ['r','r+']:
def f():
with get_store(path,mode=mode) as store:
pass
self.assertRaises(IOError, f)
else:
with get_store(path,mode=mode) as store:
self.assert_(store._handle.mode == mode)
with ensure_clean_path(self.path) as path:
# conv write
if mode in ['r','r+']:
self.assertRaises(IOError, df.to_hdf, path, 'df', mode=mode)
df.to_hdf(path,'df',mode='w')
else:
df.to_hdf(path,'df',mode=mode)
# conv read
if mode in ['w']:
self.assertRaises(KeyError, read_hdf, path, 'df', mode=mode)
else:
result = read_hdf(path,'df',mode=mode)
assert_frame_equal(result,df)
check('r')
check('r+')
check('a')
check('w')
def test_reopen_handle(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path,mode='a')
store['a'] = tm.makeTimeSeries()
# invalid mode change
self.assertRaises(PossibleDataLossError, store.open, 'w')
store.close()
self.assert_(not store.is_open)
# truncation ok here
store.open('w')
self.assert_(store.is_open)
self.assertEquals(len(store), 0)
store.close()
self.assert_(not store.is_open)
store = HDFStore(path,mode='a')
store['a'] = tm.makeTimeSeries()
# reopen as read
store.open('r')
self.assert_(store.is_open)
self.assertEquals(len(store), 1)
self.assert_(store._mode == 'r')
store.close()
self.assert_(not store.is_open)
# reopen as append
store.open('a')
self.assert_(store.is_open)
self.assertEquals(len(store), 1)
self.assert_(store._mode == 'a')
store.close()
self.assert_(not store.is_open)
# reopen as append (again)
store.open('a')
self.assert_(store.is_open)
self.assertEquals(len(store), 1)
self.assert_(store._mode == 'a')
store.close()
self.assert_(not store.is_open)
def test_open_args(self):
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(path,mode='a',driver='H5FD_CORE',driver_core_backing_store=0)
store['df'] = df
store.append('df2',df)
tm.assert_frame_equal(store['df'],df)
tm.assert_frame_equal(store['df2'],df)
store.close()
# only supported on pytable >= 3.0.0
if LooseVersion(tables.__version__) >= '3.0.0':
# the file should not have actually been written
self.assert_(os.path.exists(path) is False)
def test_flush(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
left = store.get('a')
right = store['a']
tm.assert_series_equal(left, right)
left = store.get('/a')
right = store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, store.get, 'b')
def test_getattr(self):
with ensure_clean_store(self.path) as store:
s = tm.makeTimeSeries()
store['a'] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store,'a')
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store['df'] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
self.assertRaises(AttributeError, getattr, store, 'd')
for x in ['mode','path','handle','complib']:
self.assertRaises(AttributeError, getattr, store, x)
# not stores
for x in ['mode','path','handle','complib']:
getattr(store,"_%s" % x)
def test_put(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store['a'] = ts
store['b'] = df[:10]
store['foo/bar/bah'] = df[:10]
store['foo'] = df[:10]
store['/foo'] = df[:10]
store.put('c', df[:10], format='table')
# not OK, not a table
self.assertRaises(
ValueError, store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False in
# this case
# _maybe_remove(store, 'f')
# self.assertRaises(ValueError, store.put, 'f', df[10:], append=True)
# can't put to a table (use append instead)
self.assertRaises(ValueError, store.put, 'c', df[10:], append=True)
# overwrite table
store.put('c', df[:10], format='table', append=False)
tm.assert_frame_equal(df[:10], store['c'])
def test_put_string_index(self):
with ensure_clean_store(self.path) as store:
index = Index(
["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(20), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + ["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(21), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
def test_put_compression(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
store.put('c', df, format='table', complib='zlib')
tm.assert_frame_equal(store['c'], df)
# can't compress if format='fixed'
self.assertRaises(ValueError, store.put, 'b', df,
format='fixed', complib='zlib')
def test_put_compression_blosc(self):
tm.skip_if_no_package('tables', '2.2', app='blosc support')
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
# can't compress if format='fixed'
self.assertRaises(ValueError, store.put, 'b', df,
format='fixed', complib='blosc')
store.put('c', df, format='table', complib='blosc')
tm.assert_frame_equal(store['c'], df)
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_put_mixed_type(self):
df = tm.makeTimeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.ix[3:6, ['obj1']] = np.nan
df = df.consolidate().convert_objects()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
# cannot use assert_produces_warning here for some reason
# a PendingDeprecationWarning is also raised?
warnings.filterwarnings('ignore', category=PerformanceWarning)
store.put('df',df)
warnings.filterwarnings('always', category=PerformanceWarning)
expected = store.get('df')
tm.assert_frame_equal(expected,df)
def test_append(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
_maybe_remove(store, 'df2')
store.put('df2', df[:10], format='table')
store.append('df2', df[10:])
tm.assert_frame_equal(store['df2'], df)
_maybe_remove(store, 'df3')
store.append('/df3', df[:10])
store.append('/df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
# this is allowed by almost always don't want to do it
with tm.assert_produces_warning(expected_warning=tables.NaturalNameWarning):
_maybe_remove(store, '/df3 foo')
store.append('/df3 foo', df[:10])
store.append('/df3 foo', df[10:])
tm.assert_frame_equal(store['df3 foo'], df)
# panel
wp = tm.makePanel()
_maybe_remove(store, 'wp1')
store.append('wp1', wp.ix[:, :10, :])
store.append('wp1', wp.ix[:, 10:, :])
assert_panel_equal(store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :])
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
# test using axis labels
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=[
'items', 'major_axis', 'minor_axis'])
store.append('p4d', p4d.ix[:, :, 10:, :], axes=[
'items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d'], p4d)
# test using differnt number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
_maybe_remove(store, 'p4d2')
store.append(
'p4d2', p4d2, axes=['items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
_maybe_remove(store, 'wp1')
wp_append1 = wp.ix[:, :10, :]
store.append('wp1', wp_append1)
wp_append2 = wp.ix[:, 10:, :].reindex(items=wp.items[::-1])
store.append('wp1', wp_append2)
assert_panel_equal(store['wp1'], wp)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df['mixed_column'] = 'testing'
df.ix[2, 'mixed_column'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df)
tm.assert_frame_equal(store['df'], df)
# uints - test storage of uints
uint_data = DataFrame({'u08' : Series(np.random.random_integers(0, high=255, size=5), dtype=np.uint8),
'u16' : Series(np.random.random_integers(0, high=65535, size=5), dtype=np.uint16),
'u32' : Series(np.random.random_integers(0, high=2**30, size=5), dtype=np.uint32),
'u64' : Series([2**58, 2**59, 2**60, 2**61, 2**62], dtype=np.uint64)},
index=np.arange(5))
_maybe_remove(store, 'uints')
store.append('uints', uint_data)
tm.assert_frame_equal(store['uints'], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, 'uints')
store.append('uints', uint_data, data_columns=['u08','u16','u32']) # 64-bit indices not yet supported
tm.assert_frame_equal(store['uints'], uint_data)
def test_append_series(self):
with ensure_clean_store(self.path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append('ss', ss)
result = store['ss']
tm.assert_series_equal(result, ss)
self.assert_(result.name is None)
store.append('ts', ts)
result = store['ts']
tm.assert_series_equal(result, ts)
self.assert_(result.name is None)
ns.name = 'foo'
store.append('ns', ns)
result = store['ns']
tm.assert_series_equal(result, ns)
self.assert_(result.name == ns.name)
# select on the values
expected = ns[ns>60]
result = store.select('ns',Term('foo>60'))
tm.assert_series_equal(result,expected)
# select on the index and values
expected = ns[(ns>70) & (ns.index<90)]
result = store.select('ns',[Term('foo>70'), Term('index<90')])
tm.assert_series_equal(result,expected)
# multi-index
mi = DataFrame(np.random.randn(5,1),columns=['A'])
mi['B'] = np.arange(len(mi))
mi['C'] = 'foo'
mi.loc[3:5,'C'] = 'bar'
mi.set_index(['C','B'],inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append('mi', s)
tm.assert_series_equal(store['mi'], s)
def test_store_index_types(self):
# GH5386
# test storing various index types
with ensure_clean_store(self.path) as store:
def check(format,index):
df = DataFrame(np.random.randn(10,2),columns=list('AB'))
df.index = index(len(df))
_maybe_remove(store, 'df')
store.put('df',df,format=format)
assert_frame_equal(df,store['df'])
for index in [ tm.makeFloatIndex, tm.makeStringIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex ]:
check('table',index)
check('fixed',index)
# unicode
index = tm.makeUnicodeIndex
if compat.PY3:
check('table',index)
check('fixed',index)
else:
# only support for fixed types (and they have a perf warning)
self.assertRaises(TypeError, check, 'table', index)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
check('fixed',index)
def test_encoding(self):
if LooseVersion(tables.__version__) < '3.0.0':
raise nose.SkipTest('tables version does not support proper encoding')
if sys.byteorder != 'little':
raise nose.SkipTest('system byteorder is not little')
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A='foo',B='bar'),index=range(5))
df.loc[2,'A'] = np.nan
df.loc[3,'B'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df, encoding='ascii')
tm.assert_frame_equal(store['df'], df)
expected = df.reindex(columns=['A'])
result = store.select('df',Term('columns=A',encoding='ascii'))
tm.assert_frame_equal(result,expected)
def test_append_some_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A' : Series(np.random.randn(20)).astype('int32'),
'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime.datetime(2001,1,2,0,0) },
index=np.arange(20))
# some nans
_maybe_remove(store, 'df1')
df.ix[0:15,['A1','B','D','E']] = np.nan
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
# first column
df1 = df.copy()
df1.ix[:,'A1'] = np.nan
_maybe_remove(store, 'df1')
store.append('df1', df1[:10])
store.append('df1', df1[10:])
tm.assert_frame_equal(store['df1'], df1)
# 2nd column
df2 = df.copy()
df2.ix[:,'A2'] = np.nan
_maybe_remove(store, 'df2')
store.append('df2', df2[:10])
store.append('df2', df2[10:])
tm.assert_frame_equal(store['df2'], df2)
# datetimes
df3 = df.copy()
df3.ix[:,'E'] = np.nan
_maybe_remove(store, 'df3')
store.append('df3', df3[:10])
store.append('df3', df3[10:])
tm.assert_frame_equal(store['df3'], df3)
def test_append_all_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20)},
index=np.arange(20))
df.ix[0:15,:] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# tests the option io.hdf.dropna_table
pandas.set_option('io.hdf.dropna_table',False)
_maybe_remove(store, 'df3')
store.append('df3', df[:10])
store.append('df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
pandas.set_option('io.hdf.dropna_table',True)
_maybe_remove(store, 'df4')
store.append('df4', df[:10])
store.append('df4', df[10:])
tm.assert_frame_equal(store['df4'], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar'},
index=np.arange(20))
df.ix[0:15,:] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# nan some entire rows (but since we have dates they are still written!)
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime.datetime(2001,1,2,0,0) },
index=np.arange(20))
df.ix[0:15,:] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
def test_append_frame_column_oriented(self):
with ensure_clean_store(self.path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df.ix[:, :2], axes=['columns'])
store.append('df1', df.ix[:, 2:])
tm.assert_frame_equal(store['df1'], df)
result = store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select(
'df1', ('columns=A', Term('index=df.index[0:4]')))
expected = df.reindex(columns=['A'], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
self.assertRaises(TypeError, store.select, 'df1', (
'columns=A', Term('index>df.index[4]')))
def test_append_with_different_block_ordering(self):
#GH 4096; using same frames, but different block orderings
with ensure_clean_store(self.path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10,2),columns=list('AB'))
df['index'] = range(10)
df['index'] += i*10
df['int64'] = Series([1]*len(df),dtype='int64')
df['int16'] = Series([1]*len(df),dtype='int16')
if i % 2 == 0:
del df['int64']
df['int64'] = Series([1]*len(df),dtype='int64')
if i % 3 == 0:
a = df.pop('A')
df['A'] = a
df.set_index('index',inplace=True)
store.append('df',df)
# test a different ordering but with more fields (like invalid combinate)
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(10,2),columns=list('AB'), dtype='float64')
df['int64'] = Series([1]*len(df),dtype='int64')
df['int16'] = Series([1]*len(df),dtype='int16')
store.append('df',df)
# store additonal fields in different blocks
df['int16_2'] = Series([1]*len(df),dtype='int16')
self.assertRaises(ValueError, store.append, 'df', df)
# store multile additonal fields in different blocks
df['float_3'] = Series([1.]*len(df),dtype='float64')
self.assertRaises(ValueError, store.append, 'df', df)
def test_ndim_indexables(self):
""" test using ndim tables in new ways"""
with ensure_clean_store(self.path) as store:
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i, idx in enumerate(indexers):
self.assert_(getattr(getattr(
store.root, key).table.description, idx)._v_pos == i)
# append then change (will take existing schema)
indexers = ['items', 'major_axis', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store.select('p4d'), p4d)
check_indexers('p4d', indexers)
# same as above, but try to append with differnt axes
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :], axes=[
'labels', 'items', 'major_axis'])
assert_panel4d_equal(store.select('p4d'), p4d)
check_indexers('p4d', indexers)
# pass incorrect number of axes
_maybe_remove(store, 'p4d')
self.assertRaises(ValueError, store.append, 'p4d', p4d.ix[
:, :, :10, :], axes=['major_axis', 'minor_axis'])
# different than default indexables #1
indexers = ['labels', 'major_axis', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
check_indexers('p4d', indexers)
# different than default indexables #2
indexers = ['major_axis', 'labels', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
check_indexers('p4d', indexers)
# partial selection
result = store.select('p4d', ['labels=l1'])
expected = p4d.reindex(labels=['l1'])
assert_panel4d_equal(result, expected)
# partial selection2
result = store.select('p4d', [Term(
'labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])
expected = p4d.reindex(
labels=['l1'], items=['ItemA'], minor_axis=['B'])
assert_panel4d_equal(result, expected)
# non-existant partial selection
result = store.select('p4d', [Term(
'labels=l1'), Term('items=Item1'), Term('minor_axis=B')])
expected = p4d.reindex(labels=['l1'], items=[], minor_axis=['B'])
assert_panel4d_equal(result, expected)
def test_append_with_strings(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel()
wp2 = wp.rename_axis(
dict([(x, "%s_extra" % x) for x in wp.minor_axis]), axis=2)
def check_col(key,name,size):
self.assert_(getattr(store.get_storer(key).table.description,name).itemsize == size)
store.append('s1', wp, min_itemsize=20)
store.append('s1', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s1'], expected)
check_col('s1', 'minor_axis', 20)
# test dict format
store.append('s2', wp, min_itemsize={'minor_axis': 20})
store.append('s2', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s2'], expected)
check_col('s2', 'minor_axis', 20)
# apply the wrong field (similar to #1)
store.append('s3', wp, min_itemsize={'major_axis': 20})
self.assertRaises(ValueError, store.append, 's3', wp2)
# test truncation of bigger strings
store.append('s4', wp)
self.assertRaises(ValueError, store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big', df)
tm.assert_frame_equal(store.select('df_big'), df)
check_col('df_big', 'values_block_1', 15)
# appending smaller string ok
df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])
store.append('df_big', df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select('df_big'), expected)
check_col('df_big', 'values_block_1', 15)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big2', df, min_itemsize={'values': 50})
tm.assert_frame_equal(store.select('df_big2'), df)
check_col('df_big2', 'values_block_1', 50)
# bigger string on next append
store.append('df_new', df)
df_new = DataFrame(
[[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
self.assertRaises(ValueError, store.append, 'df_new', df_new)
# with nans
_maybe_remove(store, 'df')
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.ix[1:4, 'string'] = np.nan
df['string2'] = 'bar'
df.ix[4:8, 'string2'] = np.nan
df['string3'] = 'bah'
df.ix[1:, 'string3'] = np.nan
store.append('df', df)
result = store.select('df')
tm.assert_frame_equal(result, df)
with ensure_clean_store(self.path) as store:
def check_col(key,name,size):
self.assert_(getattr(store.get_storer(key).table.description,name).itemsize == size)
df = DataFrame(dict(A = 'foo', B = 'bar'),index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, 'df')
store.append('df', df, min_itemsize={'A' : 200 })
check_col('df', 'A', 200)
self.assert_(store.get_storer('df').data_columns == ['A'])
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 })
check_col('df', 'A', 200)
self.assert_(store.get_storer('df').data_columns == ['B','A'])
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns = ['B'], min_itemsize={'values' : 200 })
check_col('df', 'B', 200)
check_col('df', 'values_block_0', 200)
self.assert_(store.get_storer('df').data_columns == ['B'])
# infer the .typ on subsequent appends
_maybe_remove(store, 'df')
store.append('df', df[:5], min_itemsize=200)
store.append('df', df[5:], min_itemsize=200)
tm.assert_frame_equal(store['df'], df)
# invalid min_itemsize keys
df = DataFrame(['foo','foo','foo','barh','barh','barh'],columns=['A'])
_maybe_remove(store, 'df')
self.assertRaises(ValueError, store.append, 'df', df, min_itemsize={'foo' : 20, 'foobar' : 20})
def test_append_with_data_columns(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df.loc[:,'B'].iloc[0] = 1.
_maybe_remove(store, 'df')
store.append('df', df[:2], data_columns=['B'])
store.append('df', df[2:])
tm.assert_frame_equal(store['df'], df)
# check that we have indicies created
assert(store._handle.root.df.table.cols.index.is_indexed is True)
assert(store._handle.root.df.table.cols.B.is_indexed is True)
# data column searching
result = store.select('df', [Term('B>0')])
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select(
'df', [Term('B>0'), Term('index>df.index[3]')])
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new['string'] = 'foo'
df_new['string'][1:4] = np.nan
df_new['string'][5:6] = 'bar'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'])
result = store.select('df', [Term('string=foo')])
expected = df_new[df_new.string == 'foo']
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key,name,size):
self.assert_(getattr(store.get_storer(key).table.description,name).itemsize == size)
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'string': 30})
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['string'], min_itemsize=30)
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'values': 30})
check_col('df', 'string', 30)
with ensure_clean_store(self.path) as store:
df_new['string2'] = 'foobarbah'
df_new['string_block1'] = 'foobarbah1'
df_new['string_block2'] = 'foobarbah2'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string', 'string2'], min_itemsize={'string': 30, 'string2': 40, 'values': 50})
check_col('df', 'string', 30)
check_col('df', 'string2', 40)
check_col('df', 'values_block_1', 50)
with ensure_clean_store(self.path) as store:
# multiple data columns
df_new = df.copy()
df_new.loc[:,'A'].iloc[0] = 1.
df_new.loc[:,'B'].iloc[0] = -1.
df_new['string'] = 'foo'
df_new['string'][1:4] = np.nan
df_new['string'][5:6] = 'bar'
df_new['string2'] = 'foo'
df_new['string2'][2:5] = np.nan
df_new['string2'][7:8] = 'bar'
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['A', 'B', 'string', 'string2'])
result = store.select('df', [Term('string=foo'), Term(
'string2=foo'), Term('A>0'), Term('B<0')])
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select('df', [Term('string=foo'), Term(
'string2=cool')])
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'cool')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example
df_dc = df.copy()
df_dc['string'] = 'foo'
df_dc.ix[4:6, 'string'] = np.nan
df_dc.ix[7:9, 'string'] = 'bar'
df_dc['string2'] = 'cool'
df_dc['datetime'] = Timestamp('20010102')
df_dc = df_dc.convert_objects()
df_dc.ix[3:5, ['A', 'B', 'datetime']] = np.nan
_maybe_remove(store, 'df_dc')
store.append('df_dc', df_dc, data_columns=['B', 'C',
'string', 'string2', 'datetime'])
result = store.select('df_dc', [Term('B>0')])
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == foo'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (
df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range('1/1/2000', periods=8)
df_dc = DataFrame(np.random.randn(8, 3), index=index,
columns=['A', 'B', 'C'])
df_dc['string'] = 'foo'
df_dc.ix[4:6,'string'] = np.nan
df_dc.ix[7:9,'string'] = 'bar'
df_dc.ix[:,['B','C']] = df_dc.ix[:,['B','C']].abs()
df_dc['string2'] = 'cool'
# on-disk operations
store.append('df_dc', df_dc, data_columns = ['B', 'C', 'string', 'string2'])
result = store.select('df_dc', [ Term('B>0') ])
expected = df_dc[df_dc.B>0]
tm.assert_frame_equal(result,expected)
result = store.select('df_dc', ['B > 0', 'C > 0', 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == 'foo')]
tm.assert_frame_equal(result,expected)
with ensure_clean_store(self.path) as store:
# panel
# GH5717 not handling data_columns
np.random.seed(1234)
p = tm.makePanel()
store.append('p1',p)
tm.assert_panel_equal(store.select('p1'),p)
store.append('p2',p,data_columns=True)
tm.assert_panel_equal(store.select('p2'),p)
result = store.select('p2',where='ItemA>0')
expected = p.to_frame()
expected = expected[expected['ItemA']>0]
tm.assert_frame_equal(result.to_frame(),expected)
result = store.select('p2',where='ItemA>0 & minor_axis=["A","B"]')
expected = p.to_frame()
expected = expected[expected['ItemA']>0]
expected = expected[expected.reset_index(level=['major']).index.isin(['A','B'])]
tm.assert_frame_equal(result.to_frame(),expected)
def test_create_table_index(self):
with ensure_clean_store(self.path) as store:
def col(t,column):
return getattr(store.get_storer(t).table.cols,column)
# index=False
wp = tm.makePanel()
store.append('p5', wp, index=False)
store.create_table_index('p5', columns=['major_axis'])
assert(col('p5', 'major_axis').is_indexed is True)
assert(col('p5', 'minor_axis').is_indexed is False)
# index=True
store.append('p5i', wp, index=True)
assert(col('p5i', 'major_axis').is_indexed is True)
assert(col('p5i', 'minor_axis').is_indexed is True)
# default optlevels
store.get_storer('p5').create_index()
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
# let's change the indexing scheme
store.create_table_index('p5')
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', optlevel=9)
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', kind='full')
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'full')
store.create_table_index('p5', optlevel=1, kind='light')
assert(col('p5', 'major_axis').index.optlevel == 1)
assert(col('p5', 'minor_axis').index.kind == 'light')
# data columns
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df['string2'] = 'bar'
store.append('f', df, data_columns=['string', 'string2'])
assert(col('f', 'index').is_indexed is True)
assert(col('f', 'string').is_indexed is True)
assert(col('f', 'string2').is_indexed is True)
# specify index=columns
store.append(
'f2', df, index=['string'], data_columns=['string', 'string2'])
assert(col('f2', 'index').is_indexed is False)
assert(col('f2', 'string').is_indexed is True)
assert(col('f2', 'string2').is_indexed is False)
# try to index a non-table
_maybe_remove(store, 'f2')
store.put('f2', df)
self.assertRaises(TypeError, store.create_table_index, 'f2')
# try to change the version supports flag
from pandas.io import pytables
pytables._table_supports_index = False
self.assertRaises(Exception, store.create_table_index, 'f')
# test out some versions
original = tables.__version__
for v in ['2.2', '2.2b']:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.assertRaises(Exception, store.create_table_index, 'f')
for v in ['2.3.1', '2.3.1b', '2.4dev', '2.4', original]:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
store.create_table_index('f')
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = original
def test_big_table_frame(self):
raise nose.SkipTest('no big table frame')
# create and write a big table
df = DataFrame(np.random.randn(2000 * 100, 100), index=range(
2000 * 100), columns=['E%03d' % i for i in range(100)])
for x in range(20):
df['String%03d' % x] = 'string%03d' % x
import time
x = time.time()
with ensure_clean_store(self.path,mode='w') as store:
store.append('df', df)
rows = store.root.df.table.nrows
recons = store.select('df')
assert isinstance(recons, DataFrame)
print("\nbig_table frame [%s] -> %5.2f" % (rows, time.time() - x))
def test_big_table2_frame(self):
# this is a really big table: 1m rows x 60 float columns, 20 string, 20 datetime
# columns
raise nose.SkipTest('no big table2 frame')
# create and write a big table
print("\nbig_table2 start")
import time
start_time = time.time()
df = DataFrame(np.random.randn(1000 * 1000, 60), index=range(int(
1000 * 1000)), columns=['E%03d' % i for i in range(60)])
for x in range(20):
df['String%03d' % x] = 'string%03d' % x
for x in range(20):
df['datetime%03d' % x] = datetime.datetime(2001, 1, 2, 0, 0)
print("\nbig_table2 frame (creation of df) [rows->%s] -> %5.2f"
% (len(df.index), time.time() - start_time))
def f(chunksize):
with ensure_clean_store(self.path,mode='w') as store:
store.append('df', df, chunksize=chunksize)
r = store.root.df.table.nrows
return r
for c in [10000, 50000, 250000]:
start_time = time.time()
print("big_table2 frame [chunk->%s]" % c)
rows = f(c)
print("big_table2 frame [rows->%s,chunk->%s] -> %5.2f"
% (rows, c, time.time() - start_time))
def test_big_put_frame(self):
raise nose.SkipTest('no big put frame')
print("\nbig_put start")
import time
start_time = time.time()
df = DataFrame(np.random.randn(1000 * 1000, 60), index=range(int(
1000 * 1000)), columns=['E%03d' % i for i in range(60)])
for x in range(20):
df['String%03d' % x] = 'string%03d' % x
for x in range(20):
df['datetime%03d' % x] = datetime.datetime(2001, 1, 2, 0, 0)
print("\nbig_put frame (creation of df) [rows->%s] -> %5.2f"
% (len(df.index), time.time() - start_time))
with ensure_clean_store(self.path, mode='w') as store:
start_time = time.time()
store = HDFStore(self.path, mode='w')
store.put('df', df)
print(df.get_dtype_counts())
print("big_put frame [shape->%s] -> %5.2f"
% (df.shape, time.time() - start_time))
def test_big_table_panel(self):
raise nose.SkipTest('no big table panel')
# create and write a big table
wp = Panel(
np.random.randn(20, 1000, 1000), items=['Item%03d' % i for i in range(20)],
major_axis=date_range('1/1/2000', periods=1000), minor_axis=['E%03d' % i for i in range(1000)])
wp.ix[:, 100:200, 300:400] = np.nan
for x in range(100):
wp['String%03d'] = 'string%03d' % x
import time
x = time.time()
with ensure_clean_store(self.path, mode='w') as store:
store.append('wp', wp)
rows = store.root.wp.table.nrows
recons = store.select('wp')
assert isinstance(recons, Panel)
print("\nbig_table panel [%s] -> %5.2f" % (rows, time.time() - x))
def test_append_diff_item_order(self):
wp = tm.makePanel()
wp1 = wp.ix[:, :10, :]
wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]
with ensure_clean_store(self.path) as store:
store.put('panel', wp1, format='table')
self.assertRaises(ValueError, store.put, 'panel', wp2,
append=True)
def test_append_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.append('mi', df)
result = store.select('mi')
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select('mi',columns=['A','B'])
expected = df.reindex(columns=['A','B'])
tm.assert_frame_equal(result,expected)
with ensure_clean_path('test.hdf') as path:
df.to_hdf(path,'df',format='table')
result = read_hdf(path,'df',columns=['A','B'])
expected = df.reindex(columns=['A','B'])
tm.assert_frame_equal(result,expected)
def test_column_multiindex(self):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples([('A','a'), ('A','b'), ('B','a'), ('B','b')], names=['first','second'])
df = DataFrame(np.arange(12).reshape(3,4), columns=index)
with ensure_clean_store(self.path) as store:
store.put('df',df)
tm.assert_frame_equal(store['df'],df,check_index_type=True,check_column_type=True)
store.put('df1',df,format='table')
tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)
self.assertRaises(ValueError, store.put, 'df2',df,format='table',data_columns=['A'])
self.assertRaises(ValueError, store.put, 'df3',df,format='table',data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(self.path) as store:
store.append('df2', df)
store.append('df2', df)
tm.assert_frame_equal(store['df2'], concat((df,df)))
# non_index_axes name
df = DataFrame(np.arange(12).reshape(3,4), columns=Index(list('ABCD'),name='foo'))
with ensure_clean_store(self.path) as store:
store.put('df1',df,format='table')
tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)
def test_store_multiindex(self):
# validate multi-index names
# GH 5527
with ensure_clean_store(self.path) as store:
def make_index(names=None):
return MultiIndex.from_tuples([( datetime.datetime(2013,12,d), s, t) for d in range(1,3) for s in range(2) for t in range(3)],
names=names)
# no names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index())
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
# partial names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date',None,None]))
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
# series
_maybe_remove(store, 's')
s = Series(np.zeros(12), index=make_index(['date',None,None]))
store.append('s',s)
tm.assert_series_equal(store.select('s'),s)
# dup with column
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','a','t']))
self.assertRaises(ValueError, store.append, 'df',df)
# dup within level
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','date','date']))
self.assertRaises(ValueError, store.append, 'df',df)
# fully names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','s','t']))
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
def test_select_columns_in_where(self):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo_name', 'bar_name'])
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
expected = df[['A']]
tm.assert_frame_equal(store.select('df', columns=['A']), expected)
tm.assert_frame_equal(store.select('df', where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index,
name='A')
with ensure_clean_store(self.path) as store:
store.put('s', s, format='table')
tm.assert_series_equal(store.select('s', where="columns=['A']"),s)
def test_pass_spec_to_storer(self):
df = tm.makeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df',df)
self.assertRaises(TypeError, store.select, 'df', columns=['A'])
self.assertRaises(TypeError, store.select, 'df',where=[('columns=A')])
def test_append_misc(self):
with ensure_clean_store(self.path) as store:
# unsuported data types for non-tables
p4d = tm.makePanel4D()
self.assertRaises(TypeError, store.put,'p4d',p4d)
# unsuported data types
self.assertRaises(TypeError, store.put,'abc',None)
self.assertRaises(TypeError, store.put,'abc','123')
self.assertRaises(TypeError, store.put,'abc',123)
self.assertRaises(TypeError, store.put,'abc',np.arange(5))
df = tm.makeDataFrame()
store.append('df', df, chunksize=1)
result = store.select('df')
tm.assert_frame_equal(result, df)
store.append('df1', df, expectedrows=10)
result = store.select('df1')
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(self.path,mode='w') as store:
store.append('obj', obj, chunksize=c)
result = store.select('obj')
comparator(result,obj)
df = tm.makeDataFrame()
df['string'] = 'foo'
df['float322'] = 1.
df['float322'] = df['float322'].astype('float32')
df['bool'] = df['float322'] > 0
df['time1'] = Timestamp('20130101')
df['time2'] = Timestamp('20130102')
check(df, tm.assert_frame_equal)
p = tm.makePanel()
check(p, assert_panel_equal)
p4d = tm.makePanel4D()
check(p4d, assert_panel4d_equal)
# empty frame, GH4273
with ensure_clean_store(self.path) as store:
# 0 len
df_empty = DataFrame(columns=list('ABC'))
store.append('df',df_empty)
self.assertRaises(KeyError,store.select, 'df')
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10,3),columns=list('ABC'))
store.append('df',df)
assert_frame_equal(store.select('df'),df)
store.append('df',df_empty)
assert_frame_equal(store.select('df'),df)
# store
df = DataFrame(columns=list('ABC'))
store.put('df2',df)
assert_frame_equal(store.select('df2'),df)
# 0 len
p_empty = Panel(items=list('ABC'))
store.append('p',p_empty)
self.assertRaises(KeyError,store.select, 'p')
# repeated append of 0/non-zero frames
p = Panel(np.random.randn(3,4,5),items=list('ABC'))
store.append('p',p)
assert_panel_equal(store.select('p'),p)
store.append('p',p_empty)
assert_panel_equal(store.select('p'),p)
# store
store.put('p2',p_empty)
assert_panel_equal(store.select('p2'),p_empty)
def test_append_raise(self):
with ensure_clean_store(self.path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df['invalid'] = [['a']] * len(df)
self.assert_(df.dtypes['invalid'] == np.object_)
self.assertRaises(TypeError, store.append,'df',df)
# multiple invalid columns
df['invalid2'] = [['a']] * len(df)
df['invalid3'] = [['a']] * len(df)
self.assertRaises(TypeError, store.append,'df',df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001,1,2),index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df['invalid'] = s
self.assert_(df.dtypes['invalid'] == np.object_)
self.assertRaises(TypeError, store.append,'df', df)
# directy ndarray
self.assertRaises(TypeError, store.append,'df',np.arange(10))
# series directly
self.assertRaises(TypeError, store.append,'df',Series(np.arange(10)))
# appending an incompatbile table
df = tm.makeDataFrame()
store.append('df',df)
df['foo'] = 'foo'
self.assertRaises(ValueError, store.append,'df',df)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
with ensure_clean_store(self.path) as store:
store.put('frame', df1, format='table')
self.assertRaises(TypeError, store.put, 'frame', df2,
format='table', append=True)
def test_table_values_dtypes_roundtrip(self):
with ensure_clean_store(self.path) as store:
df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8')
store.append('df_f8', df1)
assert_series_equal(df1.dtypes,store['df_f8'].dtypes)
df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8')
store.append('df_i8', df2)
assert_series_equal(df2.dtypes,store['df_i8'].dtypes)
# incompatible dtype
self.assertRaises(ValueError, store.append, 'df_i8', df1)
# check creation/storage/retrieval of float32 (a bit hacky to actually create them thought)
df1 = DataFrame(np.array([[1],[2],[3]],dtype='f4'),columns = ['A'])
store.append('df_f4', df1)
assert_series_equal(df1.dtypes,store['df_f4'].dtypes)
assert df1.dtypes[0] == 'float32'
# check with mixed dtypes
df1 = DataFrame(dict([ (c,Series(np.random.randn(5),dtype=c)) for c in
['float32','float64','int32','int64','int16','int8'] ]))
df1['string'] = 'foo'
df1['float322'] = 1.
df1['float322'] = df1['float322'].astype('float32')
df1['bool'] = df1['float32'] > 0
df1['time1'] = Timestamp('20130101')
df1['time2'] = Timestamp('20130102')
store.append('df_mixed_dtypes1', df1)
result = store.select('df_mixed_dtypes1').get_dtype_counts()
expected = Series({ 'float32' : 2, 'float64' : 1,'int32' : 1, 'bool' : 1,
'int16' : 1, 'int8' : 1, 'int64' : 1, 'object' : 1,
'datetime64[ns]' : 2})
result.sort()
expected.sort()
tm.assert_series_equal(result,expected)
def test_table_mixed_dtypes(self):
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.ix[3:6, ['obj1']] = np.nan
df = df.consolidate().convert_objects()
with ensure_clean_store(self.path) as store:
store.append('df1_mixed', df)
tm.assert_frame_equal(store.select('df1_mixed'), df)
# panel
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp.consolidate()
with ensure_clean_store(self.path) as store:
store.append('p1_mixed', wp)
assert_panel_equal(store.select('p1_mixed'), wp)
# ndim
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['l1'] > 0
wp['bool2'] = wp['l2'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp.consolidate()
with ensure_clean_store(self.path) as store:
store.append('p4d_mixed', wp)
assert_panel4d_equal(store.select('p4d_mixed'), wp)
def test_unimplemented_dtypes_table_columns(self):
with ensure_clean_store(self.path) as store:
l = [('date', datetime.date(2001, 1, 2))]
# py3 ok for unicode
if not compat.PY3:
l.append(('unicode', u('\\u03c3')))
### currently not supported dtypes ####
for n, f in l:
df = tm.makeDataFrame()
df[n] = f
self.assertRaises(
TypeError, store.append, 'df1_%s' % n, df)
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['datetime1'] = datetime.date(2001, 1, 2)
df = df.consolidate().convert_objects()
with ensure_clean_store(self.path) as store:
# this fails because we have a date in the object block......
self.assertRaises(TypeError, store.append, 'df_unimplemented', df)
def test_append_with_timezones(self):
from datetime import timedelta
def compare(a,b):
tm.assert_frame_equal(a,b)
# compare the zones on each element
for c in a.columns:
for i in a.index:
a_e = a[c][i]
b_e = b[c][i]
if not (a_e == b_e and a_e.tz == b_e.tz):
raise AssertionError("invalid tz comparsion [%s] [%s]" % (a_e,b_e))
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A = [ Timestamp('20130102 2:00:00',tz='US/Eastern') + timedelta(hours=1)*i for i in range(5) ]))
store.append('df_tz',df,data_columns=['A'])
result = store['df_tz']
compare(result,df)
assert_frame_equal(result,df)
# select with tz aware
compare(store.select('df_tz',where=Term('A>=df.A[3]')),df[df.A>=df.A[3]])
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130103',tz='US/Eastern')),index=range(5))
store.append('df_tz',df)
result = store['df_tz']
compare(result,df)
assert_frame_equal(result,df)
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='EET')),index=range(5))
self.assertRaises(TypeError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz',df,data_columns=['A','B'])
result = store['df_tz']
compare(result,df)
assert_frame_equal(result,df)
# can't append with diff timezone
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='CET')),index=range(5))
self.assertRaises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A = Series(lrange(3), index=date_range('2000-1-1',periods=3,freq='H', tz='US/Eastern'))))
_maybe_remove(store, 'df')
store.put('df',df)
result = store.select('df')
assert_frame_equal(result,df)
_maybe_remove(store, 'df')
store.append('df',df)
result = store.select('df')
assert_frame_equal(result,df)
def test_store_timezone(self):
# GH2852
# issue storing datetime.date with a timezone as it resets when read back in a new timezone
import platform
if platform.system() == "Windows":
raise nose.SkipTest("timezone setting not supported on windows")
import datetime
import time
import os
# original method
with ensure_clean_store(self.path) as store:
today = datetime.date(2013,9,10)
df = DataFrame([1,2,3], index = [today, today, today])
store['obj1'] = df
result = store['obj1']
assert_frame_equal(result, df)
# with tz setting
orig_tz = os.environ.get('TZ')
def setTZ(tz):
if tz is None:
try:
del os.environ['TZ']
except:
pass
else:
os.environ['TZ']=tz
time.tzset()
try:
with ensure_clean_store(self.path) as store:
setTZ('EST5EDT')
today = datetime.date(2013,9,10)
df = DataFrame([1,2,3], index = [today, today, today])
store['obj1'] = df
setTZ('CST6CDT')
result = store['obj1']
assert_frame_equal(result, df)
finally:
setTZ(orig_tz)
def test_append_with_timedelta(self):
if _np_version_under1p7:
raise nose.SkipTest("requires numpy >= 1.7")
# GH 3577
# append timedelta
from datetime import timedelta
df = DataFrame(dict(A = Timestamp('20130101'), B = [ Timestamp('20130101') + timedelta(days=i,seconds=10) for i in range(10) ]))
df['C'] = df['A']-df['B']
df.ix[3:5,'C'] = np.nan
with ensure_clean_store(self.path) as store:
# table
_maybe_remove(store, 'df')
store.append('df',df,data_columns=True)
result = store.select('df')
assert_frame_equal(result,df)
result = store.select('df',Term("C<100000"))
assert_frame_equal(result,df)
result = store.select('df',Term("C","<",-3*86400))
assert_frame_equal(result,df.iloc[3:])
result = store.select('df',"C<'-3D'")
assert_frame_equal(result,df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select('df',"C<'-500000s'")
result = result.dropna(subset=['C'])
assert_frame_equal(result,df.iloc[6:])
result = store.select('df',"C<'-3.5D'")
result = result.iloc[1:]
assert_frame_equal(result,df.iloc[4:])
# fixed
_maybe_remove(store, 'df2')
store.put('df2',df)
result = store.select('df2')
assert_frame_equal(result,df)
def test_remove(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store['a'] = ts
store['b'] = df
_maybe_remove(store, 'a')
self.assertEquals(len(store), 1)
tm.assert_frame_equal(df, store['b'])
_maybe_remove(store, 'b')
self.assertEquals(len(store), 0)
# nonexistence
self.assertRaises(KeyError, store.remove, 'a_nonexistent_store')
# pathing
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'foo')
_maybe_remove(store, 'b/foo')
self.assertEquals(len(store), 1)
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'b')
self.assertEquals(len(store), 1)
# __delitem__
store['a'] = ts
store['b'] = df
del store['a']
del store['b']
self.assertEquals(len(store), 0)
def test_remove_where(self):
with ensure_clean_store(self.path) as store:
# non-existance
crit1 = Term('index>foo')
self.assertRaises(KeyError, store.remove, 'a', [crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel()
store.put('wp', wp, format='table')
store.remove('wp', ["minor_axis=['A', 'D']"])
rs = store.select('wp')
expected = wp.reindex(minor_axis=['B', 'C'])
assert_panel_equal(rs, expected)
# empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
# deleted number (entire table)
n = store.remove('wp', [])
assert(n == 120)
# non - empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
self.assertRaises(ValueError, store.remove,
'wp', ['foo'])
# selectin non-table with a where
# store.put('wp2', wp, format='f')
# self.assertRaises(ValueError, store.remove,
# 'wp2', [('column', ['A', 'D'])])
def test_remove_crit(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel()
# group row removal
date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10])
crit4 = Term('major_axis=date4')
store.put('wp3', wp, format='t')
n = store.remove('wp3', where=[crit4])
assert(n == 36)
result = store.select('wp3')
expected = wp.reindex(major_axis=wp.major_axis - date4)
assert_panel_equal(result, expected)
# upper half
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = Term('major_axis>date')
crit2 = Term("minor_axis=['A', 'D']")
n = store.remove('wp', where=[crit1])
assert(n == 56)
n = store.remove('wp', where=[crit2])
assert(n == 32)
result = store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
assert_panel_equal(result, expected)
# individual row elements
store.put('wp2', wp, format='table')
date1 = wp.major_axis[1:3]
crit1 = Term('major_axis=date1')
store.remove('wp2', where=[crit1])
result = store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis - date1)
assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = Term('major_axis=date2')
store.remove('wp2', where=[crit2])
result = store['wp2']
expected = wp.reindex(
major_axis=wp.major_axis - date1 - Index([date2]))
assert_panel_equal(result, expected)
date3 = [wp.major_axis[7], wp.major_axis[9]]
crit3 = Term('major_axis=date3')
store.remove('wp2', where=[crit3])
result = store['wp2']
expected = wp.reindex(
major_axis=wp.major_axis - date1 - Index([date2]) - Index(date3))
assert_panel_equal(result, expected)
# corners
store.put('wp4', wp, format='table')
n = store.remove(
'wp4', where=[Term('major_axis>wp.major_axis[-1]')])
result = store.select('wp4')
assert_panel_equal(result, wp)
def test_invalid_terms(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.ix[0:4,'string'] = 'bar'
wp = tm.makePanel()
p4d = tm.makePanel4D()
store.put('df', df, format='table')
store.put('wp', wp, format='table')
store.put('p4d', p4d, format='table')
# some invalid terms
self.assertRaises(ValueError, store.select, 'wp', "minor=['A', 'B']")
self.assertRaises(ValueError, store.select, 'wp', ["index=['20121114']"])
self.assertRaises(ValueError, store.select, 'wp', ["index=['20121114', '20121114']"])
self.assertRaises(TypeError, Term)
# more invalid
self.assertRaises(ValueError, store.select, 'df','df.index[3]')
self.assertRaises(SyntaxError, store.select, 'df','index>')
self.assertRaises(ValueError, store.select, 'wp', "major_axis<'20000108' & minor_axis['A', 'B']")
# from the docs
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10))
dfq.to_hdf(path,'dfq',format='table',data_columns=True)
# check ok
read_hdf(path,'dfq',where="index>Timestamp('20130104') & columns=['A', 'B']")
read_hdf(path,'dfq',where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10))
dfq.to_hdf(path,'dfq',format='table')
self.assertRaises(ValueError, read_hdf, path,'dfq',where="A>0 or C>0")
def test_terms(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel()
p4d = tm.makePanel4D()
store.put('wp', wp, table=True)
store.put('p4d', p4d, table=True)
# panel
result = store.select('wp', [Term(
'major_axis<"20000108"'), Term("minor_axis=['A', 'B']")])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
# with deprecation
result = store.select('wp', [Term(
'major_axis','<',"20000108"), Term("minor_axis=['A', 'B']")])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
tm.assert_panel_equal(result, expected)
# p4d
result = store.select('p4d', [Term('major_axis<"20000108"'),
Term("minor_axis=['A', 'B']"),
Term("items=['ItemA', 'ItemB']")])
expected = p4d.truncate(after='20000108').reindex(
minor=['A', 'B'], items=['ItemA', 'ItemB'])
assert_panel4d_equal(result, expected)
# back compat invalid terms
terms = [
dict(field='major_axis', op='>', value='20121114'),
[ dict(field='major_axis', op='>', value='20121114') ],
[ "minor_axis=['A','B']", dict(field='major_axis', op='>', value='20121114') ]
]
for t in terms:
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
Term(t)
# valid terms
terms = [
('major_axis=20121114'),
('major_axis>20121114'),
(("major_axis=['20121114', '20121114']"),),
('major_axis=datetime.datetime(2012, 11, 14)'),
'major_axis> 20121114',
'major_axis >20121114',
'major_axis > 20121114',
(("minor_axis=['A', 'B']"),),
(("minor_axis=['A', 'B']"),),
((("minor_axis==['A', 'B']"),),),
(("items=['ItemA', 'ItemB']"),),
('items=ItemA'),
]
for t in terms:
store.select('wp', t)
store.select('p4d', t)
# valid for p4d only
terms = [
(("labels=['l1', 'l2']"),),
Term("labels=['l1', 'l2']"),
]
for t in terms:
store.select('p4d', t)
def test_term_compat(self):
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
result = store.select('wp', [Term('major_axis>20000102'),
Term('minor_axis', '=', ['A','B']) ])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102'),['A','B']]
assert_panel_equal(result, expected)
store.remove('wp', Term('major_axis>20000103'))
result = store.select('wp')
expected = wp.loc[:,wp.major_axis<=Timestamp('20000103'),:]
assert_panel_equal(result, expected)
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
# stringified datetimes
result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2,0,0))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('major_axis','=',[datetime.datetime(2000,1,2,0,0),datetime.datetime(2000,1,3,0,0)])])
expected = wp.loc[:,[Timestamp('20000102'),Timestamp('20000103')]]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('minor_axis','=',['A','B'])])
expected = wp.loc[:,:,['A','B']]
assert_panel_equal(result, expected)
def test_same_name_scoping(self):
with ensure_clean_store(self.path) as store:
import pandas as pd
df = DataFrame(np.random.randn(20, 2),index=pd.date_range('20130101',periods=20))
store.put('df', df, table=True)
expected = df[df.index>pd.Timestamp('20130105')]
import datetime
result = store.select('df','index>datetime.datetime(2013,1,5)')
assert_frame_equal(result,expected)
from datetime import datetime
# technically an error, but allow it
result = store.select('df','index>datetime.datetime(2013,1,5)')
assert_frame_equal(result,expected)
result = store.select('df','index>datetime(2013,1,5)')
assert_frame_equal(result,expected)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_double_roundtrip(sp, assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_double_roundtrip(sp2, assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_double_roundtrip(sp3, assert_panel_equal,
check_panel_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0., 1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
self._check_roundtrip(DF, tm.assert_frame_equal)
def test_index_types(self):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(l, r,
check_dtype=True,
check_index_type=True,
check_series_type=True)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime(
2012, 1, 1), datetime.datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
raise nose.SkipTest("won't work on Python < 2.7")
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
raise nose.SkipTest('known failer on some windows platforms')
def test_frame(self):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal)
self._check_roundtrip(df, tm.assert_frame_equal)
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=True)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=True)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal)
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=True)
with ensure_clean_store(self.path) as store:
# not consolidated
df['foo'] = np.random.randn(len(df))
store['df'] = df
recons = store['df']
self.assert_(recons._data.is_consolidated())
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
df0 = DataFrame()
df1 = DataFrame(index=['a', 'b', 'c'])
df2 = DataFrame(columns=['d', 'e', 'f'])
self._check_roundtrip(s0, tm.assert_series_equal)
self._check_roundtrip(s1, tm.assert_series_equal)
self._check_roundtrip(df0, tm.assert_frame_equal)
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
def test_empty_series(self):
for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']:
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal)
def test_can_serialize_dates(self):
rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal)
def test_timezones(self):
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
self.assert_(recons.index.equals(rng))
self.assertEquals(rng.tz, recons.index.tz)
def test_fixed_offset_tz(self):
rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
self.assert_(recons.index.equals(rng))
self.assertEquals(rng.tz, recons.index.tz)
def test_store_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self._check_roundtrip(frame, tm.assert_frame_equal)
self._check_roundtrip(frame.T, tm.assert_frame_equal)
self._check_roundtrip(frame['A'], tm.assert_series_equal)
# check that the names are stored
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
assert(recons.index.names == ('foo', 'bar'))
def test_store_index_name(self):
df = tm.makeDataFrame()
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store['frame'] = df
recons = store['frame']
assert(recons.index.name == 'foo')
def test_store_series_name(self):
df = tm.makeDataFrame()
series = df['A']
with ensure_clean_store(self.path) as store:
store['series'] = series
recons = store['series']
assert(recons.name == 'A')
def test_store_mixed(self):
def _make_one():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
with ensure_clean_store(self.path) as store:
store['obj'] = df1
tm.assert_frame_equal(store['obj'], df1)
store['obj'] = df2
tm.assert_frame_equal(store['obj'], df2)
# check that can store Series of all of these types
self._check_roundtrip(df1['obj1'], tm.assert_series_equal)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal)
self._check_roundtrip(df1['int1'], tm.assert_series_equal)
# try with compression
self._check_roundtrip(df1['obj1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['int1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1, tm.assert_frame_equal,
compression=True)
def test_wide(self):
wp = tm.makePanel()
self._check_roundtrip(wp, assert_panel_equal)
def test_wide_table(self):
wp = tm.makePanel()
self._check_roundtrip_table(wp, assert_panel_equal)
def test_select_with_dups(self):
# single dtypes
df = DataFrame(np.random.randn(10,4),columns=['A','A','B','B'])
df.index = date_range('20130101 9:30',periods=10,freq='T')
with ensure_clean_store(self.path) as store:
store.append('df',df)
result = store.select('df')
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=df.columns)
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=['A'])
expected = df.loc[:,['A']]
assert_frame_equal(result,expected)
# dups accross dtypes
df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),
DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],
axis=1)
df.index = date_range('20130101 9:30',periods=10,freq='T')
with ensure_clean_store(self.path) as store:
store.append('df',df)
result = store.select('df')
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=df.columns)
expected = df
assert_frame_equal(result,expected,by_blocks=True)
expected = df.loc[:,['A']]
result = store.select('df',columns=['A'])
assert_frame_equal(result,expected,by_blocks=True)
expected = df.loc[:,['B','A']]
result = store.select('df',columns=['B','A'])
assert_frame_equal(result,expected,by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(self.path) as store:
store.append('df',df)
store.append('df',df)
expected = df.loc[:,['B','A']]
expected = concat([expected, expected])
result = store.select('df',columns=['B','A'])
assert_frame_equal(result,expected,by_blocks=True)
def test_wide_table_dups(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
store.put('panel', wp, format='table')
store.put('panel', wp, format='table', append=True)
with tm.assert_produces_warning(expected_warning=DuplicateWarning):
recons = store['panel']
assert_panel_equal(recons, wp)
def test_long(self):
def _check(left, right):
assert_panel_equal(left.to_panel(), right.to_panel())
wp = tm.makePanel()
self._check_roundtrip(wp.to_frame(), _check)
# empty
# self._check_roundtrip(wp.to_frame()[:0], _check)
def test_longpanel(self):
pass
def test_overwrite_node(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store['a'] = ts
tm.assert_series_equal(store['a'], ts)
def test_sparse_with_compression(self):
# GH 2931
# make sparse dataframe
df = DataFrame(np.random.binomial(n=1, p=.01, size=(1e3, 10))).to_sparse(fill_value=0)
# case 1: store uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = False,
check_frame_type=True)
# case 2: store compressed (works)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = 'zlib',
check_frame_type=True)
# set one series to be completely sparse
df[0] = np.zeros(1e3)
# case 3: store df with completely sparse series uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = False,
check_frame_type=True)
# case 4: try storing df with completely sparse series compressed (fails)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = 'zlib',
check_frame_type=True)
def test_select(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
# put/select ok
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
store.select('wp')
# non-table ok (where = None)
_maybe_remove(store, 'wp')
store.put('wp2', wp)
store.select('wp2')
# selection on the non-indexable with a large number of columns
wp = Panel(
np.random.randn(100, 100, 100), items=['Item%03d' % i for i in range(100)],
major_axis=date_range('1/1/2000', periods=100), minor_axis=['E%03d' % i for i in range(100)])
_maybe_remove(store, 'wp')
store.append('wp', wp)
items = ['Item%03d' % i for i in range(80)]
result = store.select('wp', Term('items=items'))
expected = wp.reindex(items=items)
assert_panel_equal(expected, result)
# selectin non-table with a where
# self.assertRaises(ValueError, store.select,
# 'wp2', ('column', ['A', 'D']))
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# equivalentsly
result = store.select('df', [("columns=['A', 'B']")])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['C', 'D'])
expected = df[df.A > 0].reindex(columns=['C', 'D'])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self):
with ensure_clean_store(self.path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300), A=np.random.randn(300)))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A'])
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")])
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5,2), columns =['A','B'])
df['object'] = 'foo'
df.ix[4:5,'object'] = 'bar'
df['boolv'] = df['A'] > 0
_maybe_remove(store, 'df')
store.append('df', df, data_columns = True)
expected = df[df.boolv == True].reindex(columns=['A','boolv'])
for v in [True,'true',1]:
result = store.select('df', Term('boolv == %s' % str(v)), columns = ['A','boolv'])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False ].reindex(columns=['A','boolv'])
for v in [False,'false',0]:
result = store.select('df', Term('boolv == %s' % str(v)), columns = ['A','boolv'])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, 'df_int')
store.append('df_int', df)
result = store.select(
'df_int', [Term("index<10"), Term("columns=['A']")])
expected = df.reindex(index=list(df.index)[0:10],columns=['A'])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(dict(A=np.random.rand(
20), B=np.random.rand(20), index=np.arange(20, dtype='f8')))
_maybe_remove(store, 'df_float')
store.append('df_float', df)
result = store.select(
'df_float', [Term("index<10.0"), Term("columns=['A']")])
expected = df.reindex(index=list(df.index)[0:10],columns=['A'])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(self.path) as store:
# floats w/o NaN
df = DataFrame(dict(cols = range(11), values = range(11)),dtype='float64')
df['cols'] = (df['cols']+10).apply(str)
store.append('df1',df,data_columns=True)
result = store.select(
'df1', where='values>2.0')
expected = df[df['values']>2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df['values']>2.0]
store.append('df2',df,data_columns=True,index=False)
result = store.select(
'df2', where='values>2.0')
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
#store.append('df3',df,data_columns=True)
#result = store.select(
# 'df3', where='values>2.0')
#tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols = range(11), values = range(11)),dtype='float64')
df['cols'] = (df['cols']+10).apply(str)
df.iloc[1] = np.nan
expected = df[df['values']>2.0]
store.append('df4',df,data_columns=True)
result = store.select(
'df4', where='values>2.0')
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300),
B=range(300),
users = ['a']*50 + ['b']*50 + ['c']*100 + ['a%03d' % i for i in range(100)]))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A', 'B', 'users'])
# regular select
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")])
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select('df', [Term("ts>=Timestamp('2012-02-01') & users=['a','b','c']")])
expected = df[ (df.ts >= Timestamp('2012-02-01')) & df.users.isin(['a','b','c']) ]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = [ 'a','b','c' ] + [ 'a%03d' % i for i in range(60) ]
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')"),Term('users=selector')])
expected = df[ (df.ts >= Timestamp('2012-02-01')) & df.users.isin(selector) ]
tm.assert_frame_equal(expected, result)
selector = range(100,200)
result = store.select('df', [Term('B=selector')])
expected = df[ df.B.isin(selector) ]
tm.assert_frame_equal(expected, result)
self.assert_(len(result) == 100)
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select('df', [Term('ts=selector')])
expected = df[ df.ts.isin(selector.values) ]
tm.assert_frame_equal(expected, result)
self.assert_(len(result) == 100)
def test_select_iterator(self):
# single table
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, 'df')
store.append('df', df)
expected = store.select('df')
results = []
for s in store.select('df',iterator=True):
results.append(s)
result = concat(results)
tm.assert_frame_equal(expected, result)
results = []
for s in store.select('df',chunksize=100):
results.append(s)
self.assert_(len(results) == 5)
result = concat(results)
tm.assert_frame_equal(expected, result)
results = []
for s in store.select('df',chunksize=150):
results.append(s)
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path,'df_non_table')
self.assertRaises(TypeError, read_hdf, path,'df_non_table',chunksize=100)
self.assertRaises(TypeError, read_hdf, path,'df_non_table',iterator=True)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path,'df',format='table')
results = []
for x in read_hdf(path,'df',chunksize=100):
results.append(x)
self.assert_(len(results) == 5)
result = concat(results)
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path,'df'))
# multiple
with ensure_clean_store(self.path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append('df1',df1,data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
store.append('df2',df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(
['df1', 'df2'], selector='df1')
results = []
for s in store.select_as_multiple(
['df1', 'df2'], selector='df1', chunksize=150):
results.append(s)
result = concat(results)
tm.assert_frame_equal(expected, result)
# where selection
#expected = store.select_as_multiple(
# ['df1', 'df2'], where= Term('A>0'), selector='df1')
#results = []
#for s in store.select_as_multiple(
# ['df1', 'df2'], where= Term('A>0'), selector='df1', chunksize=25):
# results.append(s)
#result = concat(results)
#tm.assert_frame_equal(expected, result)
def test_retain_index_attributes(self):
# GH 3499, losing frequency info on index recreation
df = DataFrame(dict(A = Series(lrange(3),
index=date_range('2000-1-1',periods=3,freq='H'))))
with ensure_clean_store(self.path) as store:
_maybe_remove(store,'data')
store.put('data', df, format='table')
result = store.get('data')
tm.assert_frame_equal(df,result)
for attr in ['freq','tz','name']:
for idx in ['index','columns']:
self.assert_(getattr(getattr(df,idx),attr,None) == getattr(getattr(result,idx),attr,None))
# try to append a table with a different frequency
with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):
df2 = DataFrame(dict(A = Series(lrange(3),
index=date_range('2002-1-1',periods=3,freq='D'))))
store.append('data',df2)
self.assert_(store.get_storer('data').info['index']['freq'] is None)
# this is ok
_maybe_remove(store,'df2')
df2 = DataFrame(dict(A = Series(lrange(3),
index=[Timestamp('20010101'),Timestamp('20010102'),Timestamp('20020101')])))
store.append('df2',df2)
df3 = DataFrame(dict(A = Series(lrange(3),index=date_range('2002-1-1',periods=3,freq='D'))))
store.append('df2',df3)
def test_retain_index_attributes2(self):
with ensure_clean_path(self.path) as path:
with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):
df = DataFrame(dict(A = Series(lrange(3), index=date_range('2000-1-1',periods=3,freq='H'))))
df.to_hdf(path,'data',mode='w',append=True)
df2 = DataFrame(dict(A = Series(lrange(3), index=date_range('2002-1-1',periods=3,freq='D'))))
df2.to_hdf(path,'data',append=True)
idx = date_range('2000-1-1',periods=3,freq='H')
idx.name = 'foo'
df = DataFrame(dict(A = Series(lrange(3), index=idx)))
df.to_hdf(path,'data',mode='w',append=True)
self.assert_(read_hdf(path,'data').index.name == 'foo')
with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):
idx2 = date_range('2001-1-1',periods=3,freq='H')
idx2.name = 'bar'
df2 = DataFrame(dict(A = Series(lrange(3), index=idx2)))
df2.to_hdf(path,'data',append=True)
self.assert_(read_hdf(path,'data').index.name is None)
def test_panel_select(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = ('major_axis>=date')
crit2 = ("minor_axis=['A', 'D']")
result = store.select('wp', [crit1, crit2])
expected = wp.truncate(before=date).reindex(minor=['A', 'D'])
assert_panel_equal(result, expected)
result = store.select(
'wp', ['major_axis>="20000124"', ("minor_axis=['A', 'B']")])
expected = wp.truncate(before='20000124').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
def test_frame_select(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('frame', df,format='table')
date = df.index[len(df) // 2]
crit1 = Term('index>=date')
crit2 = ("columns=['A', 'D']")
crit3 = ('columns=A')
result = store.select('frame', [crit1, crit2])
expected = df.ix[date:, ['A', 'D']]
tm.assert_frame_equal(result, expected)
result = store.select('frame', [crit3])
expected = df.ix[:, ['A']]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append('df_time', df)
self.assertRaises(
ValueError, store.select, 'df_time', [Term("index>0")])
# can't select if not written as table
# store['frame'] = df
# self.assertRaises(ValueError, store.select,
# 'frame', [crit1, crit2])
def test_frame_select_complex(self):
# select via complex criteria
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[df.index[0:4],'string'] = 'bar'
with ensure_clean_store(self.path) as store:
store.put('df', df, table=True, data_columns=['string'])
# empty
result = store.select('df', 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index>df.index[3]) & (df.string=='bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index>df.index[3]) & (df.string=='foo')]
tm.assert_frame_equal(result, expected)
# or
result = store.select('df', 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index>df.index[3]) | (df.string=='bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', '(index>df.index[3] & index<=df.index[6]) | string="bar"')
expected = df.loc[((df.index>df.index[3]) & (df.index<=df.index[6])) | (df.string=='bar')]
tm.assert_frame_equal(result, expected)
# invert
result = store.select('df', 'string!="bar"')
expected = df.loc[df.string!='bar']
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
self.assertRaises(NotImplementedError, store.select, 'df', '~(string="bar")')
# invert ok for filters
result = store.select('df', "~(columns=['A','B'])")
expected = df.loc[:,df.columns-['A','B']]
tm.assert_frame_equal(result, expected)
# in
result = store.select('df', "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index>df.index[3]].reindex(columns=['A','B'])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self):
with ensure_clean_path(['parms.hdf','hist.hdf']) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({ 'A' : [1,1,2,2,3] })
parms.to_hdf(pp,'df',mode='w',format='table',data_columns=['A'])
selection = read_hdf(pp,'df',where='A=[2,3]')
hist = DataFrame(np.random.randn(25,1),columns=['data'],
index=MultiIndex.from_tuples([ (i,j) for i in range(5) for j in range(5) ],
names=['l1','l2']))
hist.to_hdf(hh,'df',mode='w',format='table')
expected = read_hdf(hh,'df',where=Term('l1','=',[2,3,4]))
# list like
result = read_hdf(hh,'df',where=Term('l1','=',selection.index.tolist()))
assert_frame_equal(result, expected)
l = selection.index.tolist()
# sccope with list like
store = HDFStore(hh)
result = store.select('df',where='l1=l')
assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh,'df',where='l1=l')
assert_frame_equal(result, expected)
# index
index = selection.index
result = read_hdf(hh,'df',where='l1=index')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=selection.index')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=list(selection.index)')
assert_frame_equal(result, expected)
# sccope with index
store = HDFStore(hh)
result = store.select('df',where='l1=index')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=selection.index')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=list(selection.index)')
assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df, table=True)
# not implemented
self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A'] | columns=['B']")
# in theory we could deal with this
self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A','B'] & columns=['C']")
def test_string_select(self):
# GH 2973
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df['x'] = 'none'
df.ix[2:7,'x'] = ''
store.append('df',df,data_columns=['x'])
result = store.select('df',Term('x=none'))
expected = df[df.x == 'none']
assert_frame_equal(result,expected)
try:
result = store.select('df',Term('x!=none'))
expected = df[df.x != 'none']
assert_frame_equal(result,expected)
except Exception as detail:
print("[{0}]".format(detail))
print(store)
print(expected)
df2 = df.copy()
df2.loc[df2.x=='','x'] = np.nan
store.append('df2',df2,data_columns=['x'])
result = store.select('df2',Term('x!=none'))
expected = df2[isnull(df2.x)]
assert_frame_equal(result,expected)
# int ==/!=
df['int'] = 1
df.ix[2:7,'int'] = 2
store.append('df3',df,data_columns=['int'])
result = store.select('df3',Term('int=2'))
expected = df[df.int==2]
assert_frame_equal(result,expected)
result = store.select('df3',Term('int!=2'))
expected = df[df.int!=2]
assert_frame_equal(result,expected)
def test_read_column(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# error
self.assertRaises(KeyError, store.select_column, 'df', 'foo')
def f():
store.select_column('df', 'index', where = ['index>5'])
self.assertRaises(Exception, f)
# valid
result = store.select_column('df', 'index')
tm.assert_almost_equal(result.values, Series(df.index).values)
self.assert_(isinstance(result,Series))
# not a data indexable column
self.assertRaises(
ValueError, store.select_column, 'df', 'values_block_0')
# a data column
df2 = df.copy()
df2['string'] = 'foo'
store.append('df2', df2, data_columns=['string'])
result = store.select_column('df2', 'string')
tm.assert_almost_equal(result.values, df2['string'].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3['string'] = 'foo'
df3.ix[4:6, 'string'] = np.nan
store.append('df3', df3, data_columns=['string'])
result = store.select_column('df3', 'string')
tm.assert_almost_equal(result.values, df3['string'].values)
def test_coordinates(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# all
c = store.select_as_coordinates('df')
assert((c.values == np.arange(len(df.index))).all() == True)
# get coordinates back & test vs frame
_maybe_remove(store, 'df')
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
store.append('df', df)
c = store.select_as_coordinates('df', ['index<3'])
assert((c.values == np.arange(3)).all() == True)
result = store.select('df', where=c)
expected = df.ix[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates('df', ['index>=3', 'index<=4'])
assert((c.values == np.arange(2) + 3).all() == True)
result = store.select('df', where=c)
expected = df.ix[3:4, :]
tm.assert_frame_equal(result, expected)
self.assert_(isinstance(c, Index))
# multiple tables
_maybe_remove(store, 'df1')
_maybe_remove(store, 'df2')
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
c = store.select_as_coordinates('df1', ['A>0', 'B>0'])
df1_result = store.select('df1', c)
df2_result = store.select('df2', c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(1000,2),index=date_range('20000101',periods=1000))
store.append('df',df)
c = store.select_column('df','index')
where = c[DatetimeIndex(c).month==5].index
expected = df.iloc[where]
# locations
result = store.select('df',where=where)
tm.assert_frame_equal(result,expected)
# boolean
result = store.select('df',where=where)
tm.assert_frame_equal(result,expected)
# invalid
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df),dtype='float64'))
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)+1))
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)),start=5)
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)),start=5,stop=10)
# list
df = DataFrame(np.random.randn(10,2))
store.append('df2',df)
result = store.select('df2',where=[0,3,5])
expected = df.iloc[[0,3,5]]
tm.assert_frame_equal(result,expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select('df2',where=where)
expected = df.loc[where]
tm.assert_frame_equal(result,expected)
def test_append_to_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# exceptions
self.assertRaises(ValueError, store.append_to_multiple,
{'df1': ['A', 'B'], 'df2': None}, df, selector='df3')
self.assertRaises(ValueError, store.append_to_multiple,
{'df1': None, 'df2': None}, df, selector='df3')
self.assertRaises(
ValueError, store.append_to_multiple, 'df1', df, 'df1')
# regular operation
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1')
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.ix[1, ['A', 'B']] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=True)
result = store.select_as_multiple(['df1', 'df2'])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select('df1').index,
store.select('df2').index)
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=False)
self.assertRaises(
ValueError, store.select_as_multiple, ['df1', 'df2'])
assert not store.select('df1').index.equals(
store.select('df2').index)
def test_select_as_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
with ensure_clean_store(self.path) as store:
# no tables stored
self.assertRaises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
# exceptions
self.assertRaises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
self.assertRaises(Exception, store.select_as_multiple,
[None], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(TypeError, store.select_as_multiple,
['df1','df3'], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df3'], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(ValueError, store.select_as_multiple,
['df1','df2'], where=['A>0', 'B>0'], selector='df4')
# default select
result = store.select('df1', ['A>0', 'B>0'])
expected = store.select_as_multiple(
['df1'], where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
'df1', where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(['df1', 'df2'], where=[Term(
'index>df2.index[4]')], selector='df2')
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test excpection for diff rows
store.append('df3', tm.makeTimeDataFrame(nper=50))
self.assertRaises(ValueError, store.select_as_multiple,
['df1','df3'], where=['A>0', 'B>0'], selector='df1')
def test_start_stop(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append('df', df)
result = store.select(
'df', [Term("columns=['A']")], start=0, stop=5)
expected = df.ix[0:4, ['A']]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', [Term("columns=['A']")], start=30, stop=40)
assert(len(result) == 0)
assert(type(result) == DataFrame)
def test_select_filter_corner(self):
df = DataFrame(np.random.randn(50, 100))
df.index = ['%.3d' % c for c in df.index]
df.columns = ['%.3d' % c for c in df.columns]
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
crit = Term('columns=df.columns[:75]')
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.ix[:, df.columns[:75]])
def _check_roundtrip(self, obj, comparator, compression=False, **kwargs):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(self, obj, comparator, compression=False,
**kwargs):
options = {}
if compression:
options['complib'] = compression or _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
store['obj'] = retrieved
again = store['obj']
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, compression=False):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store.put('obj', obj, format='table')
retrieved = store['obj']
# sorted_obj = _test_sort(obj)
comparator(retrieved, obj)
def test_multiple_open_close(self):
# GH 4409, open & close multiple times
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',mode='w',format='table')
# single
store = HDFStore(path)
self.assert_('CLOSED' not in str(store))
self.assert_(store.is_open)
store.close()
self.assert_('CLOSED' in str(store))
self.assert_(not store.is_open)
with ensure_clean_path(self.path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
def f():
HDFStore(path)
self.assertRaises(ValueError, f)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
self.assert_('CLOSED' not in str(store1))
self.assert_('CLOSED' not in str(store2))
self.assert_(store1.is_open)
self.assert_(store2.is_open)
store1.close()
self.assert_('CLOSED' in str(store1))
self.assert_(not store1.is_open)
self.assert_('CLOSED' not in str(store2))
self.assert_(store2.is_open)
store2.close()
self.assert_('CLOSED' in str(store1))
self.assert_('CLOSED' in str(store2))
self.assert_(not store1.is_open)
self.assert_(not store2.is_open)
# nested close
store = HDFStore(path,mode='w')
store.append('df',df)
store2 = HDFStore(path)
store2.append('df2',df)
store2.close()
self.assert_('CLOSED' in str(store2))
self.assert_(not store2.is_open)
store.close()
self.assert_('CLOSED' in str(store))
self.assert_(not store.is_open)
# double closing
store = HDFStore(path,mode='w')
store.append('df', df)
store2 = HDFStore(path)
store.close()
self.assert_('CLOSED' in str(store))
self.assert_(not store.is_open)
store2.close()
self.assert_('CLOSED' in str(store2))
self.assert_(not store2.is_open)
# ops on a closed store
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',mode='w',format='table')
store = HDFStore(path)
store.close()
self.assertRaises(ClosedFileError, store.keys)
self.assertRaises(ClosedFileError, lambda : 'df' in store)
self.assertRaises(ClosedFileError, lambda : len(store))
self.assertRaises(ClosedFileError, lambda : store['df'])
self.assertRaises(ClosedFileError, lambda : store.df)
self.assertRaises(ClosedFileError, store.select, 'df')
self.assertRaises(ClosedFileError, store.get, 'df')
self.assertRaises(ClosedFileError, store.append, 'df2', df)
self.assertRaises(ClosedFileError, store.put, 'df3', df)
self.assertRaises(ClosedFileError, store.get_storer, 'df2')
self.assertRaises(ClosedFileError, store.remove, 'df2')
def f():
store.select('df')
tm.assertRaisesRegexp(ClosedFileError, 'file is not open', f)
def test_pytables_native_read(self):
try:
store = HDFStore(tm.get_data_path('legacy_hdf/pytables_native.h5'), 'r')
d2 = store['detector/readout']
assert isinstance(d2, DataFrame)
finally:
safe_close(store)
try:
store = HDFStore(tm.get_data_path('legacy_hdf/pytables_native2.h5'), 'r')
str(store)
d1 = store['detector']
assert isinstance(d1, DataFrame)
finally:
safe_close(store)
def test_legacy_read(self):
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy.h5'), 'r')
store['a']
store['b']
store['c']
store['d']
finally:
safe_close(store)
def test_legacy_table_read(self):
# legacy table types
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table.h5'), 'r')
store.select('df1')
store.select('df2')
store.select('wp1')
# force the frame
store.select('df2', typ='legacy_frame')
# old version warning
with tm.assert_produces_warning(expected_warning=IncompatibilityWarning):
self.assertRaises(
Exception, store.select, 'wp1', Term('minor_axis=B'))
df2 = store.select('df2')
result = store.select('df2', Term('index>df2.index[2]'))
expected = df2[df2.index > df2.index[2]]
assert_frame_equal(expected, result)
finally:
safe_close(store)
def test_legacy_0_10_read(self):
# legacy from 0.10
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_0.10.h5'), 'r')
str(store)
for k in store.keys():
store.select(k)
finally:
safe_close(store)
def test_legacy_0_11_read(self):
# legacy from 0.11
try:
path = os.path.join('legacy_hdf', 'legacy_table_0.11.h5')
store = HDFStore(tm.get_data_path(path), 'r')
str(store)
assert 'df' in store
assert 'df1' in store
assert 'mi' in store
df = store.select('df')
df1 = store.select('df1')
mi = store.select('mi')
assert isinstance(df, DataFrame)
assert isinstance(df1, DataFrame)
assert isinstance(mi, DataFrame)
finally:
safe_close(store)
def test_copy(self):
def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs):
try:
if f is None:
f = tm.get_data_path(os.path.join('legacy_hdf',
'legacy_0.10.h5'))
store = HDFStore(f, 'r')
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(new_f, keys = keys, propindexes = propindexes, **kwargs)
# check keys
if keys is None:
keys = store.keys()
self.assert_(set(keys) == set(tstore.keys()))
# check indicies & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
self.assert_(orig_t.nrows == new_t.nrows)
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
self.assert_(new_t[a.name].is_indexed == True)
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except:
pass
safe_remove(new_f)
do_copy()
do_copy(keys = ['/a','/b','/df1_mixed'])
do_copy(propindexes = False)
# new table
df = tm.makeDataFrame()
try:
st = HDFStore(self.path)
st.append('df', df, data_columns = ['A'])
st.close()
do_copy(f = self.path)
do_copy(f = self.path, propindexes = False)
finally:
safe_remove(self.path)
def test_legacy_table_write(self):
raise nose.SkipTest("skipping for now")
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table_%s.h5' % pandas.__version__), 'a')
df = tm.makeDataFrame()
wp = tm.makePanel()
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
store.append('mi', df)
df = DataFrame(dict(A = 'foo', B = 'bar'),index=lrange(10))
store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 })
store.append('wp', wp)
store.close()
def test_store_datetime_fractional_secs(self):
with ensure_clean_store(self.path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store['a'] = series
self.assertEquals(store['a'].index[0], dt)
def test_tseries_indices_series(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
assert_series_equal(result, ser)
self.assertEquals(type(result.index), type(ser.index))
self.assertEquals(result.index.freq, ser.index.freq)
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
assert_series_equal(result, ser)
self.assertEquals(type(result.index), type(ser.index))
self.assertEquals(result.index.freq, ser.index.freq)
def test_tseries_indices_frame(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
self.assertEquals(type(result.index), type(df.index))
self.assertEquals(result.index.freq, df.index.freq)
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
self.assertEquals(type(result.index), type(df.index))
self.assertEquals(result.index.freq, df.index.freq)
def test_unicode_index(self):
unicode_values = [u('\u03c3'), u('\u03c3\u03c3')]
def f():
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal)
compat_assert_produces_warning(PerformanceWarning,f)
def test_store_datetime_mixed(self):
df = DataFrame(
{'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']})
ts = tm.makeTimeSeries()
df['d'] = ts.index[:3]
self._check_roundtrip(df, tm.assert_frame_equal)
# def test_cant_write_multiindex_table(self):
# # for now, #1848
# df = DataFrame(np.random.randn(10, 4),
# index=[np.arange(5).repeat(2),
# np.tile(np.arange(2), 5)])
# self.assertRaises(Exception, store.put, 'foo', df, format='table')
def test_append_with_diff_col_name_types_raises_value_error(self):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({'a': np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({('1', 2): np.random.randn(10)})
df5 = DataFrame({('1', 2, object): np.random.randn(10)})
with ensure_clean_store(self.path) as store:
name = 'df_%s' % tm.rands(10)
store.append(name, df)
for d in (df2, df3, df4, df5):
with tm.assertRaises(ValueError):
store.append(name, d)
def _test_sort(obj):
if isinstance(obj, DataFrame):
return obj.reindex(sorted(obj.index))
elif isinstance(obj, Panel):
return obj.reindex(major=sorted(obj.major_axis))
else:
raise ValueError('type not supported here')
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
paulromano/openmc | tests/regression_tests/surface_tally/test.py | 9 | 7315 | import numpy as np
import openmc
import pandas as pd
from tests.testing_harness import PyAPITestHarness
class SurfaceTallyTestHarness(PyAPITestHarness):
def _build_inputs(self):
# Instantiate some Materials and register the appropriate Nuclides
uo2 = openmc.Material(name='UO2 fuel at 2.4% wt enrichment')
uo2.set_density('g/cc', 10.0)
uo2.add_nuclide('U238', 1.0)
uo2.add_nuclide('U235', 0.02)
uo2.add_nuclide('O16', 2.0)
borated_water = openmc.Material(name='Borated water')
borated_water.set_density('g/cm3', 1)
borated_water.add_nuclide('B10', 10e-5)
borated_water.add_nuclide('H1', 2.0)
borated_water.add_nuclide('O16', 1.0)
# Instantiate a Materials collection and export to XML
materials_file = openmc.Materials([uo2, borated_water])
materials_file.export_to_xml()
# Instantiate ZCylinder surfaces
fuel_or = openmc.ZCylinder(surface_id=1, x0=0, y0=0, r=1,
name='Fuel OR')
left = openmc.XPlane(surface_id=2, x0=-2, name='left')
right = openmc.XPlane(surface_id=3, x0=2, name='right')
bottom = openmc.YPlane(y0=-2, name='bottom')
top = openmc.YPlane(y0=2, name='top')
left.boundary_type = 'vacuum'
right.boundary_type = 'reflective'
top.boundary_type = 'reflective'
bottom.boundary_type = 'reflective'
# Instantiate Cells
fuel = openmc.Cell(name='fuel')
water = openmc.Cell(name='water')
# Use surface half-spaces to define regions
fuel.region = -fuel_or
water.region = +fuel_or & -right & +bottom & -top
# Register Materials with Cells
fuel.fill = uo2
water.fill = borated_water
# Instantiate pin cell Universe
pin_cell = openmc.Universe(name='pin cell')
pin_cell.add_cells([fuel, water])
# Instantiate root Cell and Universe
root_cell = openmc.Cell(name='root cell')
root_cell.region = +left & -right & +bottom & -top
root_cell.fill = pin_cell
root_univ = openmc.Universe(universe_id=0, name='root universe')
root_univ.add_cell(root_cell)
# Instantiate a Geometry, register the root Universe
geometry = openmc.Geometry(root_univ)
geometry.export_to_xml()
# Instantiate a Settings object, set all runtime parameters
settings_file = openmc.Settings()
settings_file.batches = 10
settings_file.inactive = 0
settings_file.particles = 1000
#settings_file.output = {'tallies': True}
# Create an initial uniform spatial source distribution
bounds = [-0.62992, -0.62992, -1, 0.62992, 0.62992, 1]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:],\
only_fissionable=True)
settings_file.source = openmc.source.Source(space=uniform_dist)
settings_file.export_to_xml()
# Tallies file
tallies_file = openmc.Tallies()
# Create partial current tallies from fuel to water
# Filters
two_groups = [0., 4e6, 20e6]
energy_filter = openmc.EnergyFilter(two_groups)
polar_filter = openmc.PolarFilter([0, np.pi / 4, np.pi])
azimuthal_filter = openmc.AzimuthalFilter([0, np.pi / 4, np.pi])
surface_filter = openmc.SurfaceFilter([1])
cell_from_filter = openmc.CellFromFilter(fuel)
cell_filter = openmc.CellFilter(water)
# Use Cell to cell filters for partial current
cell_to_cell_tally = openmc.Tally(name=str('fuel_to_water_1'))
cell_to_cell_tally.filters = [cell_from_filter, cell_filter, \
energy_filter, polar_filter, azimuthal_filter]
cell_to_cell_tally.scores = ['current']
tallies_file.append(cell_to_cell_tally)
# Use a Cell from + surface filters for partial current
cell_to_cell_tally = openmc.Tally(name=str('fuel_to_water_2'))
cell_to_cell_tally.filters = [cell_from_filter, surface_filter, \
energy_filter, polar_filter, azimuthal_filter]
cell_to_cell_tally.scores = ['current']
tallies_file.append(cell_to_cell_tally)
# Create partial current tallies from water to fuel
# Filters
cell_from_filter = openmc.CellFromFilter(water)
cell_filter = openmc.CellFilter(fuel)
# Cell to cell filters for partial current
cell_to_cell_tally = openmc.Tally(name=str('water_to_fuel_1'))
cell_to_cell_tally.filters = [cell_from_filter, cell_filter, \
energy_filter, polar_filter, azimuthal_filter]
cell_to_cell_tally.scores = ['current']
tallies_file.append(cell_to_cell_tally)
# Cell from + surface filters for partial current
cell_to_cell_tally = openmc.Tally(name=str('water_to_fuel_2'))
cell_to_cell_tally.filters = [cell_from_filter, surface_filter, \
energy_filter, polar_filter, azimuthal_filter]
cell_to_cell_tally.scores = ['current']
tallies_file.append(cell_to_cell_tally)
# Create a net current tally on inner surface using a surface filter
surface_filter = openmc.SurfaceFilter([1])
surf_tally1 = openmc.Tally(name='net_cylinder')
surf_tally1.filters = [surface_filter, energy_filter, polar_filter, \
azimuthal_filter]
surf_tally1.scores = ['current']
tallies_file.append(surf_tally1)
# Create a net current tally on left surface using a surface filter
# This surface has a vacuum boundary condition, so leakage is tallied
surface_filter = openmc.SurfaceFilter([2])
surf_tally2 = openmc.Tally(name='leakage_left')
surf_tally2.filters = [surface_filter, energy_filter, polar_filter, \
azimuthal_filter]
surf_tally2.scores = ['current']
tallies_file.append(surf_tally2)
# Create a net current tally on right surface using a surface filter
# This surface has a reflective boundary condition, so the net current
# should be zero.
surface_filter = openmc.SurfaceFilter([3])
surf_tally3 = openmc.Tally(name='net_right')
surf_tally3.filters = [surface_filter, energy_filter]
surf_tally3.scores = ['current']
tallies_file.append(surf_tally3)
surface_filter = openmc.SurfaceFilter([3])
surf_tally3 = openmc.Tally(name='net_right')
surf_tally3.filters = [surface_filter, energy_filter]
surf_tally3.scores = ['current']
tallies_file.append(surf_tally3)
tallies_file.export_to_xml()
def _get_results(self):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
sp = openmc.StatePoint(self._sp_name)
# Extract the tally data as a Pandas DataFrame.
df = pd.DataFrame()
for t in sp.tallies.values():
df = df.append(t.get_pandas_dataframe(), ignore_index=True)
# Extract the relevant data as a CSV string.
cols = ('mean', 'std. dev.')
return df.to_csv(None, columns=cols, index=False, float_format='%.7e')
return outstr
def test_surface_tally():
harness = SurfaceTallyTestHarness('statepoint.10.h5')
harness.main()
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/backends/backend_gdk.py | 10 | 17086 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import math
import os
import sys
import warnings
def fn_name(): return sys._getframe(1).f_code.co_name
import gobject
import gtk; gdk = gtk.gdk
import pango
pygtk_version_required = (2,2,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
import numpy as np
import matplotlib
from matplotlib import rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, restrict_dict, warn_deprecated
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D
from matplotlib.backends._backend_gdk import pixbuf_get_pixels_array
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
# Image formats that this backend supports - for FileChooser and print_figure()
IMAGE_FORMAT = ['eps', 'jpg', 'png', 'ps', 'svg'] + ['bmp'] # , 'raw', 'rgb']
IMAGE_FORMAT.sort()
IMAGE_FORMAT_DEFAULT = 'png'
class RendererGDK(RendererBase):
fontweights = {
100 : pango.WEIGHT_ULTRALIGHT,
200 : pango.WEIGHT_LIGHT,
300 : pango.WEIGHT_LIGHT,
400 : pango.WEIGHT_NORMAL,
500 : pango.WEIGHT_NORMAL,
600 : pango.WEIGHT_BOLD,
700 : pango.WEIGHT_BOLD,
800 : pango.WEIGHT_HEAVY,
900 : pango.WEIGHT_ULTRABOLD,
'ultralight' : pango.WEIGHT_ULTRALIGHT,
'light' : pango.WEIGHT_LIGHT,
'normal' : pango.WEIGHT_NORMAL,
'medium' : pango.WEIGHT_NORMAL,
'semibold' : pango.WEIGHT_BOLD,
'bold' : pango.WEIGHT_BOLD,
'heavy' : pango.WEIGHT_HEAVY,
'ultrabold' : pango.WEIGHT_ULTRABOLD,
'black' : pango.WEIGHT_ULTRABOLD,
}
# cache for efficiency, these must be at class, not instance level
layoutd = {} # a map from text prop tups to pango layouts
rotated = {} # a map from text prop tups to rotated text pixbufs
def __init__(self, gtkDA, dpi):
# widget gtkDA is used for:
# '<widget>.create_pango_layout(s)'
# cmap line below)
self.gtkDA = gtkDA
self.dpi = dpi
self._cmap = gtkDA.get_colormap()
self.mathtext_parser = MathTextParser("Agg")
def set_pixmap (self, pixmap):
self.gdkDrawable = pixmap
def set_width_height (self, width, height):
"""w,h is the figure w,h not the pixmap w,h
"""
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
transform = transform + Affine2D(). \
scale(1.0, -1.0).translate(0, self.height)
polygons = path.to_polygons(transform, self.width, self.height)
for polygon in polygons:
# draw_polygon won't take an arbitrary sequence -- it must be a list
# of tuples
polygon = [(int(np.round(x)), int(np.round(y))) for x, y in polygon]
if rgbFace is not None:
saveColor = gc.gdkGC.foreground
gc.gdkGC.foreground = gc.rgb_to_gdk_color(rgbFace)
self.gdkDrawable.draw_polygon(gc.gdkGC, True, polygon)
gc.gdkGC.foreground = saveColor
if gc.gdkGC.line_width > 0:
self.gdkDrawable.draw_lines(gc.gdkGC, polygon)
def draw_image(self, gc, x, y, im):
bbox = gc.get_clip_rectangle()
if bbox != None:
l,b,w,h = bbox.bounds
#rectangle = (int(l), self.height-int(b+h),
# int(w), int(h))
# set clip rect?
rows, cols = im.shape[:2]
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
has_alpha=True, bits_per_sample=8,
width=cols, height=rows)
array = pixbuf_get_pixels_array(pixbuf)
array[:, :, :] = im[::-1]
gc = self.new_gc()
y = self.height-y-rows
try: # new in 2.2
# can use None instead of gc.gdkGC, if don't need clipping
self.gdkDrawable.draw_pixbuf (gc.gdkGC, pixbuf, 0, 0,
int(x), int(y), cols, rows,
gdk.RGB_DITHER_NONE, 0, 0)
except AttributeError:
# deprecated in 2.2
pixbuf.render_to_drawable(self.gdkDrawable, gc.gdkGC, 0, 0,
int(x), int(y), cols, rows,
gdk.RGB_DITHER_NONE, 0, 0)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
x, y = int(x), int(y)
if x < 0 or y < 0: # window has shrunk and text is off the edge
return
if angle not in (0,90):
warnings.warn('backend_gdk: unable to draw text at angles ' +
'other than 0 or 90')
elif ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
elif angle==90:
self._draw_rotated_text(gc, x, y, s, prop, angle)
else:
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
if (x + w > self.width or y + h > self.height):
return
self.gdkDrawable.draw_layout(gc.gdkGC, x, y-h-b, layout)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
if angle==90:
width, height = height, width
x -= width
y -= height
imw = font_image.get_width()
imh = font_image.get_height()
N = imw * imh
# a numpixels by num fonts array
Xall = np.zeros((N,1), np.uint8)
image_str = font_image.as_str()
Xall[:,0] = np.fromstring(image_str, np.uint8)
# get the max alpha at each pixel
Xs = np.amax(Xall,axis=1)
# convert it to it's proper shape
Xs.shape = imh, imw
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, has_alpha=True,
bits_per_sample=8, width=imw, height=imh)
array = pixbuf_get_pixels_array(pixbuf)
rgb = gc.get_rgb()
array[:,:,0]=int(rgb[0]*255)
array[:,:,1]=int(rgb[1]*255)
array[:,:,2]=int(rgb[2]*255)
array[:,:,3]=Xs
try: # new in 2.2
# can use None instead of gc.gdkGC, if don't need clipping
self.gdkDrawable.draw_pixbuf (gc.gdkGC, pixbuf, 0, 0,
int(x), int(y), imw, imh,
gdk.RGB_DITHER_NONE, 0, 0)
except AttributeError:
# deprecated in 2.2
pixbuf.render_to_drawable(self.gdkDrawable, gc.gdkGC, 0, 0,
int(x), int(y), imw, imh,
gdk.RGB_DITHER_NONE, 0, 0)
def _draw_rotated_text(self, gc, x, y, s, prop, angle):
"""
Draw the text rotated 90 degrees, other angles are not supported
"""
# this function (and its called functions) is a bottleneck
# Pango 1.6 supports rotated text, but pygtk 2.4.0 does not yet have
# wrapper functions
# GTK+ 2.6 pixbufs support rotation
gdrawable = self.gdkDrawable
ggc = gc.gdkGC
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
x = int(x-h)
y = int(y-w)
if (x < 0 or y < 0 or # window has shrunk and text is off the edge
x + w > self.width or y + h > self.height):
return
key = (x,y,s,angle,hash(prop))
imageVert = self.rotated.get(key)
if imageVert != None:
gdrawable.draw_image(ggc, imageVert, 0, 0, x, y, h, w)
return
imageBack = gdrawable.get_image(x, y, w, h)
imageVert = gdrawable.get_image(x, y, h, w)
imageFlip = gtk.gdk.Image(type=gdk.IMAGE_FASTEST,
visual=gdrawable.get_visual(),
width=w, height=h)
if imageFlip == None or imageBack == None or imageVert == None:
warnings.warn("Could not renderer vertical text")
return
imageFlip.set_colormap(self._cmap)
for i in range(w):
for j in range(h):
imageFlip.put_pixel(i, j, imageVert.get_pixel(j,w-i-1) )
gdrawable.draw_image(ggc, imageFlip, 0, 0, x, y, w, h)
gdrawable.draw_layout(ggc, x, y-b, layout)
imageIn = gdrawable.get_image(x, y, w, h)
for i in range(w):
for j in range(h):
imageVert.put_pixel(j, i, imageIn.get_pixel(w-i-1,j) )
gdrawable.draw_image(ggc, imageBack, 0, 0, x, y, w, h)
gdrawable.draw_image(ggc, imageVert, 0, 0, x, y, h, w)
self.rotated[key] = imageVert
def _get_pango_layout(self, s, prop):
"""
Create a pango layout instance for Text 's' with properties 'prop'.
Return - pango layout (from cache if already exists)
Note that pango assumes a logical DPI of 96
Ref: pango/fonts.c/pango_font_description_set_size() manual page
"""
# problem? - cache gets bigger and bigger, is never cleared out
# two (not one) layouts are created for every text item s (then they
# are cached) - why?
key = self.dpi, s, hash(prop)
value = self.layoutd.get(key)
if value != None:
return value
size = prop.get_size_in_points() * self.dpi / 96.0
size = np.round(size)
font_str = '%s, %s %i' % (prop.get_name(), prop.get_style(), size,)
font = pango.FontDescription(font_str)
# later - add fontweight to font_str
font.set_weight(self.fontweights[prop.get_weight()])
layout = self.gtkDA.create_pango_layout(s)
layout.set_font_description(font)
inkRect, logicalRect = layout.get_pixel_extents()
self.layoutd[key] = layout, inkRect, logicalRect
return layout, inkRect, logicalRect
def flipy(self):
return True
def get_canvas_width_height(self):
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if ismath:
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
ll, lb, lw, lh = logicalRect
return w, h + 1, h - lh
def new_gc(self):
return GraphicsContextGDK(renderer=self)
def points_to_pixels(self, points):
return points/72.0 * self.dpi
class GraphicsContextGDK(GraphicsContextBase):
# a cache shared by all class instances
_cached = {} # map: rgb color -> gdk.Color
_joind = {
'bevel' : gdk.JOIN_BEVEL,
'miter' : gdk.JOIN_MITER,
'round' : gdk.JOIN_ROUND,
}
_capd = {
'butt' : gdk.CAP_BUTT,
'projecting' : gdk.CAP_PROJECTING,
'round' : gdk.CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
self.gdkGC = gtk.gdk.GC(renderer.gdkDrawable)
self._cmap = renderer._cmap
def rgb_to_gdk_color(self, rgb):
"""
rgb - an RGB tuple (three 0.0-1.0 values)
return an allocated gtk.gdk.Color
"""
try:
return self._cached[tuple(rgb)]
except KeyError:
color = self._cached[tuple(rgb)] = \
self._cmap.alloc_color(
int(rgb[0]*65535),int(rgb[1]*65535),int(rgb[2]*65535))
return color
#def set_antialiased(self, b):
# anti-aliasing is not supported by GDK
def set_capstyle(self, cs):
GraphicsContextBase.set_capstyle(self, cs)
self.gdkGC.cap_style = self._capd[self._capstyle]
def set_clip_rectangle(self, rectangle):
GraphicsContextBase.set_clip_rectangle(self, rectangle)
if rectangle is None:
return
l,b,w,h = rectangle.bounds
rectangle = (int(l), self.renderer.height-int(b+h)+1,
int(w), int(h))
#rectangle = (int(l), self.renderer.height-int(b+h),
# int(w+1), int(h+2))
self.gdkGC.set_clip_rectangle(rectangle)
def set_dashes(self, dash_offset, dash_list):
GraphicsContextBase.set_dashes(self, dash_offset, dash_list)
if dash_list == None:
self.gdkGC.line_style = gdk.LINE_SOLID
else:
pixels = self.renderer.points_to_pixels(np.asarray(dash_list))
dl = [max(1, int(np.round(val))) for val in pixels]
self.gdkGC.set_dashes(dash_offset, dl)
self.gdkGC.line_style = gdk.LINE_ON_OFF_DASH
def set_foreground(self, fg, isRGBA=False):
GraphicsContextBase.set_foreground(self, fg, isRGBA)
self.gdkGC.foreground = self.rgb_to_gdk_color(self.get_rgb())
def set_graylevel(self, frac):
GraphicsContextBase.set_graylevel(self, frac)
self.gdkGC.foreground = self.rgb_to_gdk_color(self.get_rgb())
def set_joinstyle(self, js):
GraphicsContextBase.set_joinstyle(self, js)
self.gdkGC.join_style = self._joind[self._joinstyle]
def set_linewidth(self, w):
GraphicsContextBase.set_linewidth(self, w)
if w == 0:
self.gdkGC.line_width = 0
else:
pixels = self.renderer.points_to_pixels(w)
self.gdkGC.line_width = max(1, int(np.round(pixels)))
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGDK(figure)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasGDK (FigureCanvasBase):
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
if self.__class__ == matplotlib.backends.backend_gdk.FigureCanvasGDK:
warn_deprecated('2.0', message="The GDK backend is "
"deprecated. It is untested, known to be "
"broken and will be removed in Matplotlib 2.2. "
"Use the Agg backend instead. "
"See Matplotlib usage FAQ for"
" more info on backends.",
alternative="Agg")
self._renderer_init()
def _renderer_init(self):
self._renderer = RendererGDK (gtk.DrawingArea(), self.figure.dpi)
def _render_figure(self, pixmap, width, height):
self._renderer.set_pixmap (pixmap)
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format, *args, **kwargs):
width, height = self.get_width_height()
pixmap = gtk.gdk.Pixmap (None, width, height, depth=24)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, 0, 8,
width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
# set the default quality, if we are writing a JPEG.
# http://www.pygtk.org/docs/pygtk/class-gdkpixbuf.html#method-gdkpixbuf--save
options = restrict_dict(kwargs, ['quality'])
if format in ['jpg','jpeg']:
if 'quality' not in options:
options['quality'] = rcParams['savefig.jpeg_quality']
options['quality'] = str(options['quality'])
pixbuf.save(filename, format, options=options)
| gpl-3.0 |
TUW-GEO/rt1 | rt1/general_functions.py | 1 | 10585 | # -*- coding: utf-8 -*-
"""helper functions that are used both in rtfits and rtplots"""
from itertools import tee, islice
from collections import OrderedDict
try:
import numpy as np
except ModuleNotFoundError:
pass
def rectangularize(array, return_mask=False, dim=None, return_masked=False, dtype=None):
"""
return a rectangularized version of the input-array by repeating the
last value to obtain the smallest possible rectangular shape.
input:
- array = [[1,2,3], [1], [1,2]]
output:
- return_masked=False: [[1,2,3], [1,1,1], [1,2,2]]
- return_masked=True: [[1,2,3], [1,--,--], [1,2,--]]
Parameters
----------
array: list of lists
the input-data that is intended to be rectangularized
return_mask: bool (default = False)
indicator if weights and mask should be evaluated or not
dim: int (default = None)
the dimension of the rectangularized array
if None, the shortest length of all sub-lists will be used
return_masked: bool (default=False)
indicator if a masked-array should be returned
dtype: type (default = None)
the dtype of the returned array. If None, the dtype of the first
element will be used
Returns
-------
new_array: array-like
a rectangularized version of the input-array
mask: array-like (only if 'weights_and_mask' is True)
a mask indicating the added values
"""
# use this method to get the dtype of the first element since it works with
# pandas-Series, lists, arrays, dict-value views, etc.
if dtype is None:
dtype = np.array(next(islice(array, 1))).dtype
if dim is None:
# get longest dimension of sub-arrays
dim = len(max(array, key=len))
if return_mask is True or return_masked is True:
newarray = np.empty((len(array), dim), dtype=dtype)
mask = np.full((len(array), dim), False, dtype=bool)
for i, s in enumerate(array):
le = len(s)
newarray[i, :le] = s
newarray[i, le:] = s[-1]
mask[i, le:] = True
if return_masked is True:
return np.ma.masked_array(newarray, mask)
else:
return [newarray, mask]
else:
newarray = np.empty((len(array), dim), dtype=dtype)
for i, s in enumerate(array):
le = len(s)
newarray[i, :le] = s
newarray[i, le:] = s[-1]
return newarray
def meandatetime(datetimes):
"""
calculate the average date from a given list of datetime-objects
(can be applied to a pandas-Series via Series.apply(meandatetime))
Parameters
----------
datetimes: list
a list of datetime-objects
Returns
-------
meandate: Timestamp
"""
if len(datetimes) == 1:
return datetimes[0]
x = datetimes
deltas = (x[0] - x[1:]) / len(x)
meandelta = sum(deltas)
meandate = x[0] - meandelta
return meandate
def dBsig0convert(val, inc, dB, sig0, fitdB, fitsig0):
"""
A convenience-function to convert an array of measurements (and it's
associated incidence-angles).
- between linear- and dB units `( val_dB = 10 * log10(val_linear) )`
- between sigma0 and intensity `( sig0 = 4 * pi * cos(inc) * I )`
Parameters
----------
val: array-like
the backscatter-values that should be converted
inc: array-like
the associated incidence-angle values (in radians)
dB: bool
indicator if the output-dataset should be in dB or not
sig0: bool
indicator if the output-values should be intensity or sigma_0
fitdB: bool
indicator if the input-values have been provided in linear-units
or in dB
fitsig0: bool
indicator if the input-values are given as sigma0 or intensity
Returns
-------
val : array-like
the converted values
"""
if sig0 is not fitsig0:
# if results are provided in dB convert them to linear units before
# applying the sig0-intensity conversion
if fitdB is True:
val = 10 ** (val / 10.0)
# convert sig0 to intensity
if sig0 is False and fitsig0 is True:
val = val / (4.0 * np.pi * np.cos(inc))
# convert intensity to sig0
if sig0 is True and fitsig0 is False:
val = 4.0 * np.pi * np.cos(inc) * val
# convert back to dB if required
if dB is True:
val = 10.0 * np.log10(val)
elif dB is not fitdB:
# if dB output is required, convert to dB
if dB is True and fitdB is False:
val = 10.0 * np.log10(val)
# if linear output is required, convert to linear units
if dB is False and fitdB is True:
val = 10 ** (val / 10.0)
return val
def pairwise(iterable, pairs=2):
"""
a generator to return n consecutive values from an iterable, e.g.:
pairs = 2
s -> (s0,s1), (s1,s2), (s2, s3), ...
pairs = 3
s -> (s0, s1, s2), (s1, s2, s3), (s2, s3, s4), ...
adapted from https://docs.python.org/3.7/library/itertools.html
"""
x = tee(iterable, pairs)
for n, n_iter in enumerate(x[1:]):
[next(n_iter, None) for i in range(n + 1)]
return zip(*x)
def split_into(iterable, sizes):
"""
a generator that splits the iterable into iterables with the given sizes
see more_itertools split_into for details:
https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.split_into
"""
it = iter(iterable)
for size in sizes:
if size is None:
yield list(it)
return
else:
yield list(islice(it, size))
def scale(x, out_range=(0, 1), domainfuncs=None):
"""
scale an array between out_range = (min, max) where the range of the
array is evaluated via the domainfuncs (min-function, max-funcion)
the default domainfuncs are:
>>> np.nanmin()
>>> np.nanmax()
>>> from itertools import partial
>>> partial(np.percentile, q=95)
Notice: using functions like np.percentile might result in values that
exceed the specified `out_range`! (e.g. if the out-range is (0,1),
a min-function of np.percentile(q=5) might result in negative values!)
"""
if domainfuncs is None:
domain = np.nanmin(x), np.nanmax(x)
else:
domain = domainfuncs[0](x), domainfuncs[1](x)
y = (x - (domain[1] + domain[0]) / 2) / (domain[1] - domain[0])
return y * (out_range[1] - out_range[0]) + (out_range[1] + out_range[0]) / 2
def update_progress(
progress, max_prog=100, title="", finalmsg=" DONE\r\n", progress2=None
):
"""
print a progress-bar
adapted from: https://blender.stackexchange.com/a/30739
"""
length = 25 # the length of the progress bar
block = int(round(length * progress / max_prog))
if progress2 is not None:
msg = (
f'\r{title} {"#"*block + "-"*(length-block)}'
+ f" {progress} [{progress2}] / {max_prog}"
)
else:
msg = (
f'\r{title} {"#"*block + "-"*(length-block)}' + f" {progress} / {max_prog}"
)
if progress >= max_prog:
msg = f"\r{finalmsg:<79}\n"
return msg
def dt_to_hms(td):
"""
convert a datetime.timedelta object into days, hours,
minutes and seconds
"""
days, hours, minutes = td.days, td.seconds // 3600, td.seconds % 3600 // 60
seconds = td.seconds - hours * 3600 - minutes * 60
return days, hours, minutes, seconds
def groupby_unsorted(a, key=lambda x: x, sort=False, get=lambda x: x):
"""
group the elements of the input-array and return it as a dict with a list
of the found values. optionally use a key- and a get- function.
if sort is True, a OrderedDict with sorted keys will be returned
roughly equivalent to:
>>> # if only the input-array a is provided
... {unique value of a: [found copies of the unique value]}
... # if a and a key-function is provided
... {key(a) : [...values with the same key(a)...]}
... # if both a key- and a get-function is provided
... {key(a) : [get(x) for x in ...values with the same key(a)...]}
"""
# always use an OrderedDict to ensure sort-order for python < 3.6
d = OrderedDict()
for item in a:
d.setdefault(key(item), []).append(get(item))
if sort is True:
return OrderedDict(sorted(d.items()))
else:
return d
def interpolate_to_index(data, index, data_index=None, **interp1d_kwargs):
"""
A wrapper around scipy.interp1d to interpolate a dataset to a given index
Parameters
----------
data : list, array-like, pandas.Series or pandas.DataFrame
The input-data as list, array, pandas.Series or pandas.DataFrame
If the data is provided as pandas Series or DataFrame, the index
must support a method .to_julian_date() to convert the timestamps
into numerical values.
index : array-like
the index to which the dataset should be interpolated.
It must support a method .to_julian_date()
data_index : TYPE, optional
DESCRIPTION. The default is None.
**interp1d_kwargs :
additional keyword-arguments passed to scipy.interpolate.interp1d
the default is (fill_value=None, bounds_error=False)
Returns
-------
TYPE
DESCRIPTION.
"""
from pandas import Series, DataFrame
from scipy.interpolate import interp1d
kwargs = dict(fill_value=None, bounds_error=False)
kwargs.update(interp1d_kwargs)
if isinstance(data, Series):
# perform a linear interpolation to the auxiliary data timestamps
f = interp1d(data.index.to_julian_date(), data.values, **kwargs)
x = f(index.to_julian_date())
return Series(x, index)
elif isinstance(data, DataFrame):
f = interp1d(data.index.to_julian_date(), data.values, axis=0, **kwargs)
x = f(index.to_julian_date())
return DataFrame(x, index, columns=data.columns)
elif isinstance(data, (list, np.ndarray)):
assert data_index is not None, (
'you must provide "data_index"' + "if data is provided as list or array"
)
f = interp1d(data_index.to_julian_date(), data.values, **kwargs)
x = f(index.to_julian_date())
return Series(x, index)
| apache-2.0 |
lekshmideepu/nest-simulator | pynest/examples/gap_junctions_inhibitory_network.py | 8 | 5982 | # -*- coding: utf-8 -*-
#
# gap_junctions_inhibitory_network.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Gap Junctions: Inhibitory network example
-----------------------------------------
This script simulates an inhibitory network of 500 Hodgkin-Huxley neurons.
Without the gap junctions (meaning for ``gap_weight = 0.0``) the network shows
an asynchronous irregular state that is caused by the external excitatory
Poissonian drive being balanced by the inhibitory feedback within the
network. With increasing `gap_weight` the network synchronizes:
For a lower gap weight of 0.3 nS the network remains in an asynchronous
state. With a weight of 0.54 nS the network switches randomly between the
asynchronous to the synchronous state, while for a gap weight of 0.7 nS a
stable synchronous state is reached.
This example is also used as test case 2 (see Figure 9 and 10)
in [1]_.
References
~~~~~~~~~~
.. [1] Hahne et al. (2015) A unified framework for spiking and gap-junction
interactions in distributed neuronal network simulations, Front.
Neuroinform. http://dx.doi.org/10.3389/neuro.11.012.2008
"""
import nest
import matplotlib.pyplot as plt
import numpy
n_neuron = 500
gap_per_neuron = 60
inh_per_neuron = 50
delay = 1.0
j_exc = 300.
j_inh = -50.
threads = 8
stepsize = 0.05
simtime = 501.
gap_weight = 0.3
nest.ResetKernel()
###############################################################################
# First we set the random seed, adjust the kernel settings and create
# ``hh_psc_alpha_gap`` neurons, ``spike_recorder`` and ``poisson_generator``.
numpy.random.seed(1)
nest.SetKernelStatus({'resolution': 0.05,
'total_num_virtual_procs': threads,
'print_time': True,
# Settings for waveform relaxation
# 'use_wfr': False uses communication in every step
# instead of an iterative solution
'use_wfr': True,
'wfr_comm_interval': 1.0,
'wfr_tol': 0.0001,
'wfr_max_iterations': 15,
'wfr_interpolation_order': 3})
neurons = nest.Create('hh_psc_alpha_gap', n_neuron)
sr = nest.Create("spike_recorder")
pg = nest.Create("poisson_generator", params={'rate': 500.0})
###############################################################################
# Each neuron shall receive ``inh_per_neuron = 50`` inhibitory synaptic inputs
# that are randomly selected from all other neurons, each with synaptic
# weight ``j_inh = -50.0`` pA and a synaptic delay of 1.0 ms. Furthermore each
# neuron shall receive an excitatory external Poissonian input of 500.0 Hz
# with synaptic weight ``j_exc = 300.0`` pA and the same delay.
# The desired connections are created with the following commands:
conn_dict = {'rule': 'fixed_indegree',
'indegree': inh_per_neuron,
'allow_autapses': False,
'allow_multapses': True}
syn_dict = {'synapse_model': 'static_synapse',
'weight': j_inh,
'delay': delay}
nest.Connect(neurons, neurons, conn_dict, syn_dict)
nest.Connect(pg, neurons, 'all_to_all',
syn_spec={'synapse_model': 'static_synapse',
'weight': j_exc,
'delay': delay})
###############################################################################
# Then the neurons are connected to the ``spike_recorder`` and the initial
# membrane potential of each neuron is set randomly between -40 and -80 mV.
nest.Connect(neurons, sr)
neurons.V_m = nest.random.uniform(min=-80., max=-40.)
#######################################################################################
# Finally gap junctions are added to the network. :math:`(60*500)/2` ``gap_junction``
# connections are added randomly resulting in an average of 60 gap-junction
# connections per neuron. We must not use the ``fixed_indegree`` oder
# ``fixed_outdegree`` functionality of ``nest.Connect()`` to create the
# connections, as ``gap_junction`` connections are bidirectional connections
# and we need to make sure that the same neurons are connected in both ways.
# This is achieved by creating the connections on the Python level with the
# `random` module of the Python Standard Library and connecting the neurons
# using the ``make_symmetric`` flag for ``one_to_one`` connections.
n_connection = int(n_neuron * gap_per_neuron / 2)
neuron_list = neurons.tolist()
connections = numpy.random.choice(neuron_list, [n_connection, 2])
for source_node_id, target_node_id in connections:
nest.Connect(nest.NodeCollection([source_node_id]),
nest.NodeCollection([target_node_id]),
{'rule': 'one_to_one', 'make_symmetric': True},
{'synapse_model': 'gap_junction', 'weight': gap_weight})
###############################################################################
# In the end we start the simulation and plot the spike pattern.
nest.Simulate(simtime)
events = sr.events
times = events['times']
spikes = events['senders']
n_spikes = sr.n_events
hz_rate = (1000.0 * n_spikes / simtime) / n_neuron
plt.figure(1)
plt.plot(times, spikes, 'o')
plt.title(f'Average spike rate (Hz): {hz_rate:.2f}')
plt.xlabel('time (ms)')
plt.ylabel('neuron no')
plt.show()
| gpl-2.0 |
ArtsiomCh/tensorflow | tensorflow/examples/get_started/regression/imports85.py | 24 | 6638 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A dataset loader for imports85.data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pass
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data"
# Order is important for the csv-readers, so we use an OrderedDict here.
defaults = collections.OrderedDict([
("symboling", [0]),
("normalized-losses", [0.0]),
("make", [""]),
("fuel-type", [""]),
("aspiration", [""]),
("num-of-doors", [""]),
("body-style", [""]),
("drive-wheels", [""]),
("engine-location", [""]),
("wheel-base", [0.0]),
("length", [0.0]),
("width", [0.0]),
("height", [0.0]),
("curb-weight", [0.0]),
("engine-type", [""]),
("num-of-cylinders", [""]),
("engine-size", [0.0]),
("fuel-system", [""]),
("bore", [0.0]),
("stroke", [0.0]),
("compression-ratio", [0.0]),
("horsepower", [0.0]),
("peak-rpm", [0.0]),
("city-mpg", [0.0]),
("highway-mpg", [0.0]),
("price", [0.0])
]) # pyformat: disable
types = collections.OrderedDict((key, type(value[0]))
for key, value in defaults.items())
def _get_imports85():
path = tf.contrib.keras.utils.get_file(URL.split("/")[-1], URL)
return path
def dataset(y_name="price", train_fraction=0.7):
"""Load the imports85 data as a (train,test) pair of `Dataset`.
Each dataset generates (features_dict, label) pairs.
Args:
y_name: The name of the column to use as the label.
train_fraction: A float, the fraction of data to use for training. The
remainder will be used for evaluation.
Returns:
A (train,test) pair of `Datasets`
"""
# Download and cache the data
path = _get_imports85()
# Define how the lines of the file should be parsed
def decode_line(line):
"""Convert a csv line into a (features_dict,label) pair."""
# Decode the line to a tuple of items based on the types of
# csv_header.values().
items = tf.decode_csv(line, list(defaults.values()))
# Convert the keys and items to a dict.
pairs = zip(defaults.keys(), items)
features_dict = dict(pairs)
# Remove the label from the features_dict
label = features_dict.pop(y_name)
return features_dict, label
def has_no_question_marks(line):
"""Returns True if the line of text has no question marks."""
# split the line into an array of characters
chars = tf.string_split(line[tf.newaxis], "").values
# for each character check if it is a question mark
is_question = tf.equal(chars, "?")
any_question = tf.reduce_any(is_question)
no_question = ~any_question
return no_question
def in_training_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# If you randomly split the dataset you won't get the same split in both
# sessions if you stop and restart training later. Also a simple
# random split won't work with a dataset that's too big to `.cache()` as
# we are doing here.
num_buckets = 1000000
bucket_id = tf.string_to_hash_bucket_fast(line, num_buckets)
# Use the hash bucket id as a random number that's deterministic per example
return bucket_id < int(train_fraction * num_buckets)
def in_test_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# Items not in the training set are in the test set.
# This line must use `~` instead of `not` because `not` only works on python
# booleans but we are dealing with symbolic tensors.
return ~in_training_set(line)
base_dataset = (tf.contrib.data
# Get the lines from the file.
.TextLineDataset(path)
# drop lines with question marks.
.filter(has_no_question_marks))
train = (base_dataset
# Take only the training-set lines.
.filter(in_training_set)
# Decode each line into a (features_dict, label) pair.
.map(decode_line)
# Cache data so you only decode the file once.
.cache())
# Do the same for the test-set.
test = (base_dataset.filter(in_test_set).cache().map(decode_line))
return train, test
def raw_dataframe():
"""Load the imports85 data as a pd.DataFrame."""
# Download and cache the data
path = _get_imports85()
# Load it into a pandas dataframe
df = pd.read_csv(path, names=types.keys(), dtype=types, na_values="?")
return df
def load_data(y_name="price", train_fraction=0.7, seed=None):
"""Get the imports85 data set.
A description of the data is available at:
https://archive.ics.uci.edu/ml/datasets/automobile
The data itself can be found at:
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Args:
y_name: the column to return as the label.
train_fraction: the fraction of the dataset to use for training.
seed: The random seed to use when shuffling the data. `None` generates a
unique shuffle every run.
Returns:
a pair of pairs where the first pair is the training data, and the second
is the test data:
`(x_train, y_train), (x_test, y_test) = get_imports85_dataset(...)`
`x` contains a pandas DataFrame of features, while `y` contains the label
array.
"""
# Load the raw data columns.
data = raw_dataframe()
# Delete rows with unknowns
data = data.dropna()
# Shuffle the data
np.random.seed(seed)
# Split the data into train/test subsets.
x_train = data.sample(frac=train_fraction, random_state=seed)
x_test = data.drop(x_train.index)
# Extract the label from the features dataframe.
y_train = x_train.pop(y_name)
y_test = x_test.pop(y_name)
return (x_train, y_train), (x_test, y_test)
| apache-2.0 |
Jimmy-Morzaria/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 121 | 6117 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
adrpar/incubator-airflow | airflow/contrib/hooks/salesforce_hook.py | 30 | 12110 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a Salesforce Hook
which allows you to connect to your Salesforce instance,
retrieve data from it, and write that data to a file
for other uses.
NOTE: this hook also relies on the simple_salesforce package:
https://github.com/simple-salesforce/simple-salesforce
"""
from simple_salesforce import Salesforce
from airflow.hooks.base_hook import BaseHook
import logging
import json
import pandas as pd
import time
class SalesforceHook(BaseHook):
def __init__(
self,
conn_id,
*args,
**kwargs
):
"""
Create new connection to Salesforce
and allows you to pull data out of SFDC and save it to a file.
You can then use that file with other
Airflow operators to move the data into another data source
:param conn_id: the name of the connection that has the parameters
we need to connect to Salesforce.
The conenction shoud be type `http` and include a
user's security token in the `Extras` field.
.. note::
For the HTTP connection type, you can include a
JSON structure in the `Extras` field.
We need a user's security token to connect to Salesforce.
So we define it in the `Extras` field as:
`{"security_token":"YOUR_SECRUITY_TOKEN"}`
"""
self.conn_id = conn_id
self._args = args
self._kwargs = kwargs
# get the connection parameters
self.connection = self.get_connection(conn_id)
self.extras = self.connection.extra_dejson
def sign_in(self):
"""
Sign into Salesforce.
If we have already signed it, this will just return the original object
"""
if hasattr(self, 'sf'):
return self.sf
# connect to Salesforce
sf = Salesforce(
username=self.connection.login,
password=self.connection.password,
security_token=self.extras['security_token'],
instance_url=self.connection.host
)
self.sf = sf
return sf
def make_query(self, query):
"""
Make a query to Salesforce. Returns result in dictionary
:param query: The query to make to Salesforce
"""
self.sign_in()
logging.info("Querying for all objects")
query = self.sf.query_all(query)
logging.info(
"Received results: Total size: {0}; Done: {1}".format(
query['totalSize'], query['done']
)
)
query = json.loads(json.dumps(query))
return query
def describe_object(self, obj):
"""
Get the description of an object from Salesforce.
This description is the object's schema
and some extra metadata that Salesforce stores for each object
:param obj: Name of the Salesforce object
that we are getting a description of.
"""
self.sign_in()
return json.loads(json.dumps(self.sf.__getattr__(obj).describe()))
def get_available_fields(self, obj):
"""
Get a list of all available fields for an object.
This only returns the names of the fields.
"""
self.sign_in()
desc = self.describe_object(obj)
return [f['name'] for f in desc['fields']]
def _build_field_list(self, fields):
# join all of the fields in a comma seperated list
return ",".join(fields)
def get_object_from_salesforce(self, obj, fields):
"""
Get all instances of the `object` from Salesforce.
For each model, only get the fields specified in fields.
All we really do underneath the hood is run:
SELECT <fields> FROM <obj>;
"""
field_string = self._build_field_list(fields)
query = "SELECT {0} FROM {1}".format(field_string, obj)
logging.info(
"Making query to salesforce: {0}".format(
query if len(query) < 30
else " ... ".join([query[:15], query[-15:]])
)
)
return self.make_query(query)
@classmethod
def _to_timestamp(cls, col):
"""
Convert a column of a dataframe to UNIX timestamps if applicable
:param col: A Series object representing a column of a dataframe.
"""
# try and convert the column to datetimes
# the column MUST have a four digit year somewhere in the string
# there should be a better way to do this,
# but just letting pandas try and convert every column without a format
# caused it to convert floats as well
# For example, a column of integers
# between 0 and 10 are turned into timestamps
# if the column cannot be converted,
# just return the original column untouched
try:
col = pd.to_datetime(col)
except ValueError:
logging.warning(
"Could not convert field to timestamps: {0}".format(col.name)
)
return col
# now convert the newly created datetimes into timestamps
# we have to be careful here
# because NaT cannot be converted to a timestamp
# so we have to return NaN
converted = []
for i in col:
try:
converted.append(i.timestamp())
except ValueError:
converted.append(pd.np.NaN)
except AttributeError:
converted.append(pd.np.NaN)
# return a new series that maintains the same index as the original
return pd.Series(converted, index=col.index)
def write_object_to_file(
self,
query_results,
filename,
fmt="csv",
coerce_to_timestamp=False,
record_time_added=False
):
"""
Write query results to file.
Acceptable formats are:
- csv:
comma-seperated-values file. This is the default format.
- json:
JSON array. Each element in the array is a different row.
- ndjson:
JSON array but each element is new-line deliminated
instead of comman deliminated like in `json`
This requires a significant amount of cleanup.
Pandas doesn't handle output to CSV and json in a uniform way.
This is especially painful for datetime types.
Pandas wants to write them as strings in CSV,
but as milisecond Unix timestamps.
By default, this function will try and leave all values as
they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes
to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your
datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:param filename: the name of the file where the data
should be dumped to
:param fmt: the format you want the output in.
*Default:* csv.
:param coerce_to_timestamp: True if you want all datetime fields to be
converted into Unix timestamps.
False if you want them to be left in the
same format as they were in Salesforce.
Leaving the value as False will result
in datetimes being strings.
*Defaults to False*
:param record_time_added: *(optional)* True if you want to add a
Unix timestamp field to the resulting data
that marks when the data
was fetched from Salesforce.
*Default: False*.
"""
fmt = fmt.lower()
if fmt not in ['csv', 'json', 'ndjson']:
raise ValueError("Format value is not recognized: {0}".format(fmt))
# this line right here will convert all integers to floats if there are
# any None/np.nan values in the column
# that's because None/np.nan cannot exist in an integer column
# we should write all of our timestamps as FLOATS in our final schema
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [c.lower() for c in df.columns]
# convert columns with datetime strings to datetimes
# not all strings will be datetimes, so we ignore any errors that occur
# we get the object's definition at this point and only consider
# features that are DATE or DATETIME
if coerce_to_timestamp and df.shape[0] > 0:
# get the object name out of the query results
# it's stored in the "attributes" dictionary
# for each returned record
object_name = query_results[0]['attributes']['type']
logging.info("Coercing timestamps for: {0}".format(object_name))
schema = self.describe_object(object_name)
# possible columns that can be convereted to timestamps
# are the ones that are either date or datetime types
# strings are too general and we risk unintentional conversion
possible_timestamp_cols = [
i['name'].lower()
for i in schema['fields']
if i['type'] in ["date", "datetime"] and
i['name'].lower() in df.columns
]
df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(
lambda x: self._to_timestamp(x)
)
if record_time_added:
fetched_time = time.time()
df["time_fetched_from_salesforce"] = fetched_time
# write the CSV or JSON file depending on the option
# NOTE:
# datetimes here are an issue.
# There is no good way to manage the difference
# for to_json, the options are an epoch or a ISO string
# but for to_csv, it will be a string output by datetime
# For JSON we decided to output the epoch timestamp in seconds
# (as is fairly standard for JavaScript)
# And for csv, we do a string
if fmt == "csv":
# there are also a ton of newline objects
# that mess up our ability to write to csv
# we remove these newlines so that the output is a valid CSV format
logging.info("Cleaning data and writing to CSV")
possible_strings = df.columns[df.dtypes == "object"]
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\r\n", "")
)
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\n", "")
)
# write the dataframe
df.to_csv(filename, index=False)
elif fmt == "json":
df.to_json(filename, "records", date_unit="s")
elif fmt == "ndjson":
df.to_json(filename, "records", lines=True, date_unit="s")
return df
| apache-2.0 |
shyamalschandra/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
wesleyegberto/courses-projects | ia/machine-learning-algoritmos-classificacao/situacao_cliente_2_kfold.py | 1 | 3916 | #!-*- encoding: utf-8 -*-
"""
Código 2.
Cenário igual ao código 1, porém o treino é feito utilizando a técnica do k-fold.
Esta técnica consiste em treinar o modelo usando a permutação das partes dos dados de
treinamento que são divididos em k partes, uma parte delas será usada para teste e o
restante para treino. Após este primeiro
round é feito outro treino utilizando outra parte.
Ex.: Massa com 3 partes: A, B e C.
Primeiro round: Treino com A e B, então testa com C.
Segunda round: Treino com A e C, então testa com B.
Terceiro round: Treino com B e C, então testa com A.
"""
import pandas as pd
from collections import Counter
import numpy as np
# from sklearn.cross_validation import cross_val_score # Python 2
from sklearn.model_selection import cross_val_score # Python 3
df = pd.read_csv('situacao_cliente.csv')
X_df = df[['recencia','frequencia', 'semanas_de_inscricao']]
Y_df = df['situacao']
Xdummies_df = pd.get_dummies(X_df)
Ydummies_df = Y_df
X = Xdummies_df.values
Y = Ydummies_df.values
porcentagem_de_treino = 0.8
tamanho_de_treino = int(porcentagem_de_treino * len(Y))
tamanho_de_validacao = len(Y) - tamanho_de_treino
treino_dados = X[:tamanho_de_treino]
treino_marcacoes = Y[:tamanho_de_treino]
validacao_dados = X[tamanho_de_treino:]
validacao_marcacoes = Y[tamanho_de_treino:]
"""
Efetua o treinamento e teste do modelo usando o K-Fold.
"""
def fit_and_predict(nome, modelo, treino_dados, treino_marcacoes):
k = 10 # qtde de divisões
scores = cross_val_score(modelo, treino_dados, treino_marcacoes, cv = k)
media_taxa_acerto = np.mean(scores)
print("Média de acerto do algoritmo {0}: {1}".format(nome, media_taxa_acerto))
return media_taxa_acerto
# Valida o modelo
def teste_real(modelo, validacao_dados, validacao_marcacoes):
resultado = modelo.predict(validacao_dados)
acertos = resultado == validacao_marcacoes
total_de_acertos = sum(acertos)
total_de_elementos = len(validacao_marcacoes)
taxa_de_acerto = 100.0 * total_de_acertos / total_de_elementos
msg = "Taxa de acerto do vencedor entre os dois algoritmos no mundo real: {0}".format(taxa_de_acerto)
print(msg)
resultados = {}
from sklearn.naive_bayes import MultinomialNB
modeloMultinomial = MultinomialNB()
resultadoMultinomial = fit_and_predict("MultinomialNB", modeloMultinomial, treino_dados, treino_marcacoes)
resultados[resultadoMultinomial] = modeloMultinomial
from sklearn.ensemble import AdaBoostClassifier
modeloAdaBoost = AdaBoostClassifier(random_state = 0)
resultadoAdaBoost = fit_and_predict("AdaBoostClassifier", modeloAdaBoost, treino_dados, treino_marcacoes)
resultados[resultadoAdaBoost] = modeloAdaBoost
# Algoritmo Um Contra Resto usando LinearSVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
modeloOneVsRest = OneVsRestClassifier(LinearSVC(random_state = 0)) # elimina randomizacao (sempre mesmo resultado)
resultadoOneVsRest = fit_and_predict("OneVsRest", modeloOneVsRest, treino_dados, treino_marcacoes)
resultados[resultadoOneVsRest] = modeloOneVsRest
# Algoritmo Um Contra Um (todas as categorias são testadas entre si)
from sklearn.multiclass import OneVsOneClassifier
modeloOneVsOne = OneVsOneClassifier(LinearSVC(random_state = 0))
resultadoOneVsOne = fit_and_predict("OneVsOne", modeloOneVsOne, treino_dados, treino_marcacoes)
resultados[resultadoOneVsOne] = modeloOneVsOne
maximo = max(resultados)
vencedor = resultados[maximo]
print("Vencedor: ")
print(vencedor)
# Treina o modelo vencedor
vencedor.fit(treino_dados, treino_marcacoes)
teste_real(vencedor, validacao_dados, validacao_marcacoes)
acerto_base = max(Counter(validacao_marcacoes).values())
taxa_de_acerto_base = 100.0 * acerto_base / len(validacao_marcacoes)
print("Taxa de acerto base: %f" % taxa_de_acerto_base)
total_de_elementos = len(validacao_dados)
print("Total de teste: %d" % total_de_elementos) | apache-2.0 |
bmazin/SDR | Projects/FirmwareTests/pfb/loadWaveLut.py | 1 | 10907 | import matplotlib, time, struct
import numpy as np
import matplotlib.pyplot as plt
import casperfpga
import corr
import logging
from myQdr import Qdr as myQdr
import types
import sys
from Utils import bin
import functools
def loadQdrInFirmware(fpga,loadChoice=0,blockPrefix='dds_lut_',nQdrRows=2**20):
"""
load the qdr with values within the fpga
loadChoice can be 0 for loading all zeros
1 for loading all ones
2 for loading all twos
3 for loading a sequence that matches the row number
4 to set 32 bits to 1
5 to load 36 bits to 1
6 to set every other bit in 32 bits to 1, starting with a 1
7 to set every other bit in 36 bits to 1,
"""
fpga.write_int(blockPrefix+'n_qdr_rows',nQdrRows)
fpga.write_int(blockPrefix+'test_load_qdr',0)
fpga.write_int(blockPrefix+'load_data_choice',loadChoice) #2, for all twos
time.sleep(.2)
fpga.write_int(blockPrefix+'test_load_qdr',1)
time.sleep(2)
fpga.write_int(blockPrefix+'test_load_qdr',0)
time.sleep(1)
def readMemory(roach,memName,nSamples,nBytesPerSample=4,bQdrFlip=False):
"""
read a byte string from a bram or qdr, and parse it into an array
"""
if nBytesPerSample == 4:
formatChar = 'L'
elif nBytesPerSample == 8:
formatChar = 'Q'
else:
raise TypeError('nBytesPerSample must be 4 or 8')
memStr = roach.read(memName,nSamples*nBytesPerSample)
memValues = np.array(list(struct.unpack('>{}{}'.format(nSamples,formatChar),memStr)),dtype=np.uint64)
if bQdrFlip:
memValues = np.right_shift(memValues,32)+np.left_shift(np.bitwise_and(memValues, int('1'*32,2)),32)
#Unfortunately, with the current qdr calibration, the addresses in katcp and firmware are shifted (rolled) relative to each other
#so to compensate we roll the values to write here
#this will work if you are reading the same length vector that you wrote (and rolled) in katcp
memValues = np.roll(memValues,1)
return list(memValues)
def writeBram(fpga,memName,valuesToWrite,start=0,nRows=2**10):
nBytesPerSample = 8
formatChar = 'Q'
memValues = np.array(valuesToWrite,dtype=np.uint64) #cast signed values
nValues = len(valuesToWrite)
toWriteStr = struct.pack('>{}{}'.format(nValues,formatChar),*memValues)
fpga.blindwrite(memName,toWriteStr,start)
def writeQdr(fpga,memName,valuesToWrite,start=0,bQdrFlip=True,nQdrRows=2**20):
nBytesPerSample = 8
formatChar = 'Q'
memValues = np.array(valuesToWrite,dtype=np.uint64) #cast signed values
nValues = len(valuesToWrite)
if bQdrFlip: #For some reason, on Roach2 with the current qdr calibration, the 64 bit word seen in firmware
#has the first and second 32 bit chunks swapped compared to the 64 bit word sent by katcp, so to accomadate
#we swap those chunks here, so they will be in the right order in firmware
mask32 = int('1'*32,2)
memValues = (memValues >> 32)+((memValues & mask32) << 32)
#Unfortunately, with the current qdr calibration, the addresses in katcp and firmware are shifted (rolled) relative to each other
#so to compensate we roll the values to write here
memValues = np.roll(memValues,-1)
toWriteStr = struct.pack('>{}{}'.format(nValues,formatChar),*memValues)
fpga.blindwrite(memName,toWriteStr,start)
def generateTones(freqs,nSamples=2**23,amplitudes=None,phases=None,sampleRate=2e9,nBitsPerSampleComponent=12,dynamicRange=1.):
freqs = np.array(freqs)
nFreqs = len(freqs)
freqResolution = 1.*sampleRate / nSamples
#make a list of frequencies that we can generate with our limited frequency resolution that are closest
# to the requested frequencies
quantFreqs = freqResolution * np.round(freqs/freqResolution)
print quantFreqs
maxValue = int(np.round(dynamicRange*2**(nBitsPerSampleComponent - 1)-1)) #1 bit for sign
if phases is None:
phases = np.random.uniform(0,2.*np.pi,nFreqs)
dt = 1. / sampleRate
t = dt*np.arange(nSamples)
iValList = []
qValList = []
for iFreq,freq in enumerate(quantFreqs):
phi = 2.*np.pi*freq*t
iValues = np.cos(phi+phases[iFreq])
qValues = np.sin(phi+phases[iFreq])
iValList.append(iValues)
qValList.append(qValues)
iValues = np.sum(iValList,axis=0)
qValues = np.sum(qValList,axis=0)
highestVal = np.max((np.abs(iValues).max(),np.abs(qValues).max()))
iValues = np.array(np.round(maxValue / highestVal * iValues),dtype=np.int)
qValues = np.array(np.round(maxValue / highestVal * qValues),dtype=np.int)
return {'I':iValues,'Q':qValues,'quantizedFreqs':quantFreqs}
def formatWaveForMem(iVals,qVals,nBitsPerSamplePair=24,nSamplesPerCycle=8,nMems=3,nBitsPerMemRow=64):
nBitsPerSampleComponent = nBitsPerSamplePair / 2
#I vals and Q vals are 12 bits, combine them into 24 bit vals
iqVals = (iVals << nBitsPerSampleComponent) + qVals
iqRows = np.reshape(iqVals,(-1,nSamplesPerCycle))
#shift earlier (leftmost columns) the most
#we need to set dtype to object to use python's native long type
colBitShifts = nBitsPerSamplePair*(np.arange(nSamplesPerCycle,dtype=object)[::-1])
iqRowVals = np.sum(iqRows<<colBitShifts,axis=1) #shift each col by specified amount, and sum each row
#Now we have 2**20 row values, each is 192 bits and contain 8 IQ pairs
#next we have divide these 192 bit rows into three 64-bit qdr rows
#Mem0 has the most significant bits
memRowBitmask = int('1'*nBitsPerMemRow,2)
memMaskShifts = nBitsPerMemRow*np.arange(nMems,dtype=object)[::-1]
#now do bitwise_and each value with the mask, and shift back down
memRowVals = (iqRowVals[:,np.newaxis] >> memMaskShifts) & memRowBitmask
#now each column contains the 64-bit qdr values to be sent to a particular qdr
return memRowVals
def loadWaveToMem(fpga,waveFreqs=[10.4e6],phases=None,sampleRate=2.e9,nSamplesPerCycle=8,nSamples=2**23,nBytesPerMemSample=8,nBitsPerSamplePair=24,memNames = ['qdr0_memory','qdr1_memory','qdr2_memory'],memType='qdr',dynamicRange=1.):
#define some globals
#startRegisterName = 'run'
#startRegisterName = 'test_load_qdr'
nBitsPerMemRow = nBytesPerMemSample*8 #64
nBitsPerSampleComponent = nBitsPerSamplePair/2
nMems = len(memNames)
nMemRowsToUse = nSamples/nSamplesPerCycle
tone = generateTones(waveFreqs,phases=phases,nSamples=nSamples,sampleRate=sampleRate,dynamicRange=dynamicRange)
complexTone = tone['I'] + 1j*tone['Q']
iVals,qVals = tone['I'],tone['Q']
memVals = formatWaveForMem(iVals,qVals,nMems=nMems,nBitsPerSamplePair=nBitsPerSamplePair,nSamplesPerCycle=nSamplesPerCycle,nBitsPerMemRow=nBitsPerMemRow)
#fpga.write_int(startRegisterName,0) #halt reading from mem while writing
time.sleep(.1)
#loadQdrInFirmware(fpga,loadChoice=0) #clear mem
fpga.write_int('dds_lut_n_qdr_rows',nMemRowsToUse)
for iMem in xrange(nMems):
if memType == 'qdr':
writeQdr(fpga,memName=memNames[iMem],valuesToWrite=memVals[:,iMem],bQdrFlip=(memType=='qdr'))
elif memType == 'bram':
writeBram(fpga,memName=memNames[iMem],valuesToWrite=memVals[:,iMem])
time.sleep(.5)
#fpga.write_int(startRegisterName,1) #start reading from mem in firmware
#np.savez('tone.npz',complexTone=complexTone,tone=tone,sampleRate=sampleRate,memVals=memVals,waveFreqs=waveFreqs,quantFreqs=tone['quantizedFreqs'])
return {'tone':complexTone,'memVals':memVals,'quantFreqs':tone['quantizedFreqs']}
if __name__=='__main__':
if len(sys.argv) > 1:
ip = sys.argv[1]
else:
ip='10.0.0.112'
fpga = casperfpga.katcp_fpga.KatcpFpga(ip,timeout=50.)
if not fpga.is_running():
print 'Firmware is not running, start firmware and calibrate first!'
exit(0)
fpga.get_system_information()
print 'Fpga Clock Rate:',fpga.estimate_fpga_clock()
time.sleep(1)
np.random.seed(0)
instrument = 'darkness'
memType = 'bram'
#memType = 'qdr'
if instrument == 'arcons':
sampleRate = 512.e6
nSamplesPerCycle = 2
nBins = 512
snapshotNames = ['bin0','bin1']
if memType == 'qdr':
memNames = ['qdr0_memory']
elif instrument == 'darkness':
sampleRate = 2.e9
nSamplesPerCycle = 8
nBins = 2048
snapshotNames = ['bin0','bin1','bin2','bin3','bin4','bin5','bin6','bin7']
if memType == 'qdr':
memNames = ['qdr0_memory','qdr1_memory','qdr2_memory']
elif memType == 'bram':
memNames = ['dds_lut_mem0','dds_lut_mem1','dds_lut_mem2']
else:
print 'unrecognized instrument',instrument
exit(1)
nQdrRowsToUse = 2**10
nQdrRows = 2**20
nBytesPerQdrSample = 8
nBitsPerSamplePair = 24
nSamples = nSamplesPerCycle*nQdrRowsToUse
binSpacing = sampleRate/nBins
freq = 10.*binSpacing
startRegisterName = 'run'
if memType == 'qdr':
fpga.write_int(startRegisterName,0) #halt reading from mem while writing
elif memType == 'bram':
fpga.write_int(startRegisterName,1) #halt firmware writing from mem while writing
loadDict = loadWaveToMem(fpga,waveFreqs=[freq],phases=None,sampleRate=sampleRate,nSamplesPerCycle=nSamplesPerCycle,nBytesPerMemSample=nBytesPerQdrSample,nBitsPerSamplePair=nBitsPerSamplePair,memNames = memNames,nSamples=nSamples,memType=memType)
fpga.write_int(startRegisterName,1) #halt reading from mem while writing
#should be running
# fill = np.zeros(1024)
# fpga.write_int(startRegisterName,0)
# writeQdr(fpga,memNames=qdrMemNames,valuesToWrite=fill,start=8*len(fill),bQdrFlip=True,nQdrRows=2**20)
# fpga.write_int(startRegisterName,1)
memVals = loadDict['memVals']
snapNames = ['dds_lut_dataOut0','dds_lut_dataOut1','dds_lut_dataOut2','dds_lut_addr']
snaps = [fpga.snapshots[name] for name in snapNames]
fpga.write_int('dds_lut_addrTrig',0) #set the address to first trigger on
for snap in snaps:
snap.arm()
time.sleep(.1)
fpga.write_int('dds_lut_snap',1)#trigger snapshots
snapData = [np.array(snap.read(timeout=10)['data']['data'],dtype=object) for snap in snaps]
snapData = np.array(snapData)
snapData = snapData.T
fpga.write_int('dds_lut_snap',0)#trigger snapshots
readQ = readMemory(fpga,memNames[0],nSamples=nQdrRowsToUse,nBytesPerSample=8,bQdrFlip=(memType=='qdr'))
readQ = np.array(readQ,dtype=object)
readQ = readQ[:,np.newaxis]
print 'snap','qdrOut','katcpRead'
print snapData[0,:],memVals[0,:],readQ[0,:]
#print '%X'%memVals[0,0],'is',memVals[0,0]
np.savetxt('memVals.txt',memVals,fmt='%X')
np.savetxt('snapData.txt',snapData,fmt='%X')
print 'Error Count:',np.sum(snapData[:nQdrRowsToUse,:-1] != memVals[:,:])
print 'done!'
| gpl-2.0 |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/misopy/kde_subclass.py | 3 | 6199 |
'''subclassing kde
Author: josef pktd
'''
import numpy as np
import scipy
from scipy import stats
import matplotlib.pylab as plt
class gaussian_kde_set_covariance(stats.gaussian_kde):
'''
from Anne Archibald in mailinglist:
http://www.nabble.com/Width-of-the-gaussian-in-stats.kde.gaussian_kde---td19558924.html#a19558924
'''
def __init__(self, dataset, covariance):
self.covariance = covariance
scipy.stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance(self):
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n
class gaussian_kde_covfact(stats.gaussian_kde):
def __init__(self, dataset, covfact = 'scotts'):
self.covfact = covfact
scipy.stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance_(self):
'''not used'''
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n
def covariance_factor(self):
if self.covfact in ['sc', 'scotts']:
return self.scotts_factor()
if self.covfact in ['si', 'silverman']:
return self.silverman_factor()
elif self.covfact:
return float(self.covfact)
else:
raise ValueError, \
'covariance factor has to be scotts, silverman or a number'
def reset_covfact(self, covfact):
self.covfact = covfact
self.covariance_factor()
self._compute_covariance()
def plotkde(covfact):
gkde.reset_covfact(covfact)
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation - ' + str(gkde.covfact))
plt.legend()
from numpy.testing import assert_array_almost_equal, \
assert_almost_equal, assert_
def test_kde_1d():
np.random.seed(8765678)
n_basesample = 500
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
print xnmean, xnstd
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density funtion for the kde for some points
xs = np.linspace(-7,7,501)
kdepdf = gkde.evaluate(xs)
normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
print 'MSE', np.sum((kdepdf - normpdf)**2)
print 'axabserror', np.max(np.abs(kdepdf - normpdf))
intervall = xs[1] - xs[0]
assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
#assert_array_almost_equal(kdepdf, normpdf, decimal=2)
print gkde.integrate_gaussian(0.0, 1.0)
print gkde.integrate_box_1d(-np.inf, 0.0)
print gkde.integrate_box_1d(0.0, np.inf)
print gkde.integrate_box_1d(-np.inf, xnmean)
print gkde.integrate_box_1d(xnmean, np.inf)
assert_almost_equal(gkde.integrate_box_1d(xnmean, np.inf), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box_1d(-np.inf, xnmean), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(xnmean, np.inf), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*intervall, decimal=2)
assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
(kdepdf*normpdf).sum()*intervall, decimal=2)
## assert_almost_equal(gkde.integrate_gaussian(0.0, 1.0),
## (kdepdf*normpdf).sum()*intervall, decimal=2)
if __name__ == '__main__':
# generate a sample
n_basesample = 1000
np.random.seed(8765678)
alpha = 0.6 #weight for (prob of) lower distribution
mlow, mhigh = (-3,3) #mean locations for gaussian mixture
xn = np.concatenate([mlow + np.random.randn(alpha * n_basesample),
mhigh + np.random.randn((1-alpha) * n_basesample)])
# get kde for original sample
#gkde = stats.gaussian_kde(xn)
gkde = gaussian_kde_covfact(xn, 0.1)
# evaluate the density funtion for the kde for some points
ind = np.linspace(-7,7,101)
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation')
plt.legend()
gkde = gaussian_kde_covfact(xn, 'scotts')
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation')
plt.legend()
#plt.show()
for cv in ['scotts', 'silverman', 0.05, 0.1, 0.5]:
plotkde(cv)
test_kde_1d()
np.random.seed(8765678)
n_basesample = 1000
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
# get kde for original sample
gkde = stats.gaussian_kde(xn)
| apache-2.0 |
siou83/trading-with-python | historicDataDownloader/historicDataDownloader.py | 77 | 4526 | '''
Created on 4 aug. 2012
Copyright: Jev Kuznetsov
License: BSD
a module for downloading historic data from IB
'''
import ib
import pandas
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from time import sleep
import tradingWithPython.lib.logger as logger
from pandas import DataFrame, Index
import datetime as dt
from timeKeeper import TimeKeeper
import time
timeFormat = "%Y%m%d %H:%M:%S"
class DataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = DataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1800 S',barSizeSetting='1 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(1)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self,contract, dateTuple ):
''' get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
'''
openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime,closeTime,freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract,t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
if __name__=='__main__':
dl = Downloader(debug=True)
c = Contract()
c.m_symbol = 'SPY'
c.m_secType = 'STK'
c.m_exchange = 'SMART'
c.m_currency = 'USD'
df = dl.getIntradayData(c, (2012,8,6))
df.to_csv('test.csv')
# df = dl.requestData(c, '20120803 22:00:00')
# df.to_csv('test1.csv')
# df = dl.requestData(c, '20120803 21:30:00')
# df.to_csv('test2.csv')
dl.disconnect()
print 'Done.' | bsd-3-clause |
kcrandall/Kaggle_Mercedes_Manufacturing | spark/experiements/jingning/get_type_lists.py | 6 | 1232 | def get_type_lists(frame, rejects=['Id', 'ID','id'],frame_type='h2o'):
"""Creates lists of numeric and categorical variables.
:param frame: The frame from which to determine types.
:param rejects: Variable names not to be included in returned lists.
:param frame_type: The type of frame being used. Accepted: ['h2o','pandas','spark']
:return: Tuple of lists for numeric and categorical variables in the frame.
"""
#Handle spark type data frames
if frame_type == 'spark':
nums, cats = [], []
for key, val in frame.dtypes:
if key not in rejects:
if val == 'string':
cats.append(key)
else: # ['int','double']
nums.append(key)
print('Numeric =', nums)
print()
print('Categorical =', cats)
return nums, cats
else:
nums, cats = [], []
for key, val in frame.types.items():
if key not in rejects:
if val == 'enum':
cats.append(key)
else:
nums.append(key)
print('Numeric =', nums)
print()
print('Categorical =', cats)
return nums, cats
| mit |
martinahogg/machinelearning | logistic-regression/logistic-regression.py | 1 | 1318 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
def sigmoid(a):
return 1 / (1 + np.exp(-a))
# Prepare the sample inputs with three dimensions. The
# first column is a bias of ones. For the first 50
# rows the second and third column values are
# distributed around -2. For the last 50 rows the
# second and third colum values are distributed
# around +2.
X = np.ones((100,3))
X[:,1:] = np.random.randn(100,2)
X[:50,1:] = X[:50,1:] - 2 * np.ones((50,2))
X[50:,1:] = X[50:,1:] + 2 * np.ones((50,2))
# Prepare the sample outputs. For the first 50
# rows the target is 0. For the last 50 rows it
# is one.
T = np.array([0]*50 + [1]*50)
X, T = shuffle(X, T)
# Use the first 50 rows of our samples to train
# the model.
XTrain = X[:-50]
TTrain = T[:-50]
# Training
learning_rate = 0.001
w = np.random.randn(3)
for i in range(1000):
THatTrain = sigmoid(XTrain.dot(w))
delta = THatTrain - TTrain
gradient = 2 * XTrain.T.dot(delta)
w = w - (learning_rate * gradient)
print("Training classification_rate:", np.mean(TTrain == np.round(THatTrain)))
# Testing
XTest = X[-50:]
TTest = T[-50:]
THatTest = sigmoid(XTest.dot(w))
print("Testing classification_rate:", np.mean(TTest == np.round(THatTest)))
plt.scatter(XTest[:,1], XTest[:,2], c=THatTest, s=100, alpha=0.5)
plt.show() | apache-2.0 |
geomalgo/geomalgo | examples/triangle2d/example_triangle2d.py | 2 | 1401 | """
=================================
Triangle2D
=================================
Create a triangle
"""
import numpy as np
import matplotlib.pylab as plt
import geomalgo as ga
# Create segment XY.
X = ga.Point2D(2, 1, name='X')
Y = ga.Point2D(5, 4, name='Y')
Z = ga.Point2D(5, 1, name='Z')
XYZ = ga.Triangle2D(X, Y, Z)
# Plot triangle.
X.plot()
Y.plot()
Z.plot(offset=(0.2, 0))
XYZ.plot()
# Retrive points.
print('First triangle point: ', XYZ.A)
print('Second triangle point: ', XYZ.B)
print('Third triangle point: ', XYZ.C)
# Retrive various information.
print('Triangle area: ', XYZ.area)
print('Triangle center: ', XYZ.center)
print('Triangle is counterclockwise: ', XYZ.counterclockwise)
# Test point inclusion
P = ga.Point2D(4, 4)
print('Triangle includes point (4,4):', XYZ.includes_point(P))
P.y = 2
print('Triangle includes point (4,2):', XYZ.includes_point(P))
# Interpolation
def f(x, y):
return 3*x + y - 1
x = np.array([X.x, Y.x, Z.x])
y = np.array([X.y, Y.y, Z.y])
data = f(x, y)
actual = XYZ.interpolate(data, P)
expected = f(P.x, P.y)
print('data interpolated on P: {} (expected: {})'.format(actual, expected))
# In any triangle point coordinate change, .recompute() muse be called.
X.x = 3
Y.y = 3
print('Old area:', XYZ.area)
XYZ.recompute()
print('New area:', XYZ.area)
# Adjust the plot.
plt.axis('scaled')
plt.xlim(1, 6)
plt.ylim(0, 5)
plt.grid()
plt.show()
| gpl-3.0 |
arokem/scipy | scipy/spatial/_spherical_voronoi.py | 3 | 10833 | """
Spherical Voronoi Code
.. versionadded:: 0.18.0
"""
#
# Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson,
# Nikolai Nowaczyk, Joe Pitt-Francis, 2015.
#
# Distributed under the same BSD license as SciPy.
#
import warnings
import numpy as np
import scipy
from . import _voronoi
from scipy.spatial import cKDTree
__all__ = ['SphericalVoronoi']
class SphericalVoronoi:
""" Voronoi diagrams on the surface of a sphere.
.. versionadded:: 0.18.0
Parameters
----------
points : ndarray of floats, shape (npoints, ndim)
Coordinates of points from which to construct a spherical
Voronoi diagram.
radius : float, optional
Radius of the sphere (Default: 1)
center : ndarray of floats, shape (ndim,)
Center of sphere (Default: origin)
threshold : float
Threshold for detecting duplicate points and
mismatches between points and sphere parameters.
(Default: 1e-06)
Attributes
----------
points : double array of shape (npoints, ndim)
the points in `ndim` dimensions to generate the Voronoi diagram from
radius : double
radius of the sphere
center : double array of shape (ndim,)
center of the sphere
vertices : double array of shape (nvertices, ndim)
Voronoi vertices corresponding to points
regions : list of list of integers of shape (npoints, _ )
the n-th entry is a list consisting of the indices
of the vertices belonging to the n-th point in points
Raises
------
ValueError
If there are duplicates in `points`.
If the provided `radius` is not consistent with `points`.
Notes
-----
The spherical Voronoi diagram algorithm proceeds as follows. The Convex
Hull of the input points (generators) is calculated, and is equivalent to
their Delaunay triangulation on the surface of the sphere [Caroli]_.
The Convex Hull neighbour information is then used to
order the Voronoi region vertices around each generator. The latter
approach is substantially less sensitive to floating point issues than
angle-based methods of Voronoi region vertex sorting.
Empirical assessment of spherical Voronoi algorithm performance suggests
quadratic time complexity (loglinear is optimal, but algorithms are more
challenging to implement).
References
----------
.. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of
points on or close to a sphere. Research Report RR-7004, 2009.
See Also
--------
Voronoi : Conventional Voronoi diagrams in N dimensions.
Examples
--------
Do some imports and take some points on a cube:
>>> from matplotlib import colors
>>> from mpl_toolkits.mplot3d.art3d import Poly3DCollection
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import SphericalVoronoi
>>> from mpl_toolkits.mplot3d import proj3d
>>> # set input data
>>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0],
... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ])
Calculate the spherical Voronoi diagram:
>>> radius = 1
>>> center = np.array([0, 0, 0])
>>> sv = SphericalVoronoi(points, radius, center)
Generate plot:
>>> # sort vertices (optional, helpful for plotting)
>>> sv.sort_vertices_of_regions()
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> # plot the unit sphere for reference (optional)
>>> u = np.linspace(0, 2 * np.pi, 100)
>>> v = np.linspace(0, np.pi, 100)
>>> x = np.outer(np.cos(u), np.sin(v))
>>> y = np.outer(np.sin(u), np.sin(v))
>>> z = np.outer(np.ones(np.size(u)), np.cos(v))
>>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
>>> # plot generator points
>>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
>>> # plot Voronoi vertices
>>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
... c='g')
>>> # indicate Voronoi regions (as Euclidean polygons)
>>> for region in sv.regions:
... random_color = colors.rgb2hex(np.random.rand(3))
... polygon = Poly3DCollection([sv.vertices[region]], alpha=1.0)
... polygon.set_color(random_color)
... ax.add_collection3d(polygon)
>>> plt.show()
"""
def __init__(self, points, radius=1, center=None, threshold=1e-06):
if radius is None:
radius = 1.
warnings.warn('`radius` is `None`. '
'This will raise an error in a future version. '
'Please provide a floating point number '
'(i.e. `radius=1`).',
DeprecationWarning)
self.points = points
self.radius = radius
self.threshold = threshold
self._dim = len(points[0])
if center is None:
self.center = np.zeros(self._dim)
else:
self.center = np.array(center)
# test degenerate input
self._rank = np.linalg.matrix_rank(self.points - self.center,
tol=self.threshold * self.radius)
if self._rank <= 1:
raise ValueError("Rank of input points must be at least 2")
if cKDTree(self.points).query_pairs(self.threshold * self.radius):
raise ValueError("Duplicate generators present.")
radii = np.linalg.norm(self.points - self.center, axis=1)
max_discrepancy = np.abs(radii - self.radius).max()
if max_discrepancy >= self.threshold * self.radius:
raise ValueError("Radius inconsistent with generators.")
self.vertices = None
self.regions = None
self._tri = None
self._calc_vertices_regions()
def _handle_geodesic_input(self):
# center the points
centered = self.points - self.center
# calculate an orthogonal transformation using SVD
_, _, vh = np.linalg.svd(centered)
# calculate the north and south poles in this basis
poles = [[0, 0, self.radius], [0, 0, -self.radius]] @ vh
# project points into inverse basis (such that z-components are zero)
circle = centered @ vh.T[:, :2]
# simplicial neighbors are adjacent on the circle
angles = np.arctan2(circle[:, 1], circle[:, 0])
indices = np.argsort(angles)
# Voronoi vertices lie halfway between neighboring pairs
vertices = centered[indices] + centered[np.roll(indices, 1)]
vertices /= np.linalg.norm(vertices, axis=1)[:, np.newaxis]
vertices *= self.radius
# north and south poles are also Voronoi vertices
vertices = np.concatenate((vertices, poles))
# each region contains two vertices from the plane and the north and
# south poles
invf = np.argsort(indices)
invb = np.argsort(np.roll(indices, 1))
n = len(self.points)
regions = np.vstack([invf, # forward neighbor
[n] * n, # north pole
invb, # backward neighbor
[n + 1] * n]).T # south pole
self.regions = [list(region) for region in regions]
self.vertices = vertices + self.center
def _calc_vertices_regions(self):
"""
Calculates the Voronoi vertices and regions of the generators stored
in self.points. The vertices will be stored in self.vertices and the
regions in self.regions.
This algorithm was discussed at PyData London 2015 by
Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk
"""
if self._dim == 3 and self._rank == 2:
self._handle_geodesic_input()
return
# get Convex Hull
self._tri = scipy.spatial.ConvexHull(self.points)
# get circumcenters of Convex Hull triangles from facet equations
# for 3D input circumcenters will have shape: (2N-4, 3)
self.vertices = self.radius * self._tri.equations[:, :-1] + self.center
# calculate regions from triangulation
# for 3D input simplex_indices will have shape: (2N-4,)
simplex_indices = np.arange(self._tri.simplices.shape[0])
# for 3D input tri_indices will have shape: (6N-12,)
tri_indices = np.column_stack([simplex_indices] * self._dim).ravel()
# for 3D input point_indices will have shape: (6N-12,)
point_indices = self._tri.simplices.ravel()
# for 3D input indices will have shape: (6N-12,)
indices = np.argsort(point_indices, kind='mergesort')
# for 3D input flattened_groups will have shape: (6N-12,)
flattened_groups = tri_indices[indices].astype(np.intp)
# intervals will have shape: (N+1,)
intervals = np.cumsum(np.bincount(point_indices + 1))
# split flattened groups to get nested list of unsorted regions
groups = [list(flattened_groups[intervals[i]:intervals[i + 1]])
for i in range(len(intervals) - 1)]
self.regions = groups
def sort_vertices_of_regions(self):
"""Sort indices of the vertices to be (counter-)clockwise ordered.
Raises
------
TypeError
If the points are not three-dimensional.
Notes
-----
For each region in regions, it sorts the indices of the Voronoi
vertices such that the resulting points are in a clockwise or
counterclockwise order around the generator point.
This is done as follows: Recall that the n-th region in regions
surrounds the n-th generator in points and that the k-th
Voronoi vertex in vertices is the circumcenter of the k-th triangle
in _tri.simplices. For each region n, we choose the first triangle
(=Voronoi vertex) in _tri.simplices and a vertex of that triangle
not equal to the center n. These determine a unique neighbor of that
triangle, which is then chosen as the second triangle. The second
triangle will have a unique vertex not equal to the current vertex or
the center. This determines a unique neighbor of the second triangle,
which is then chosen as the third triangle and so forth. We proceed
through all the triangles (=Voronoi vertices) belonging to the
generator in points and obtain a sorted version of the vertices
of its surrounding region.
"""
if self._dim != 3:
raise TypeError("Only supported for three-dimensional point sets")
if self._rank == 2:
return # regions are sorted by construction
_voronoi.sort_vertices_of_regions(self._tri.simplices, self.regions)
| bsd-3-clause |
schets/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 4 | 13432 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_tfidf` function will in addition do a simple tf-idf
vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
open(archive_path, 'wb').write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
open(cache_path, 'wb').write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
yaukwankiu/armor | tests/roughwork20140523.py | 1 | 4951 | thisScript = "roughwork20140523.py"
import pickle, os, shutil, time
from armor import pattern
from armor import defaultParameters as dp
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
root = dp.rootFolder
timeString = str(int(time.time()))
######################################################################
#
# 1
#dataSource = "Numerical_Spectrum_for_March2014_Rainband_WRF"
#inputFolder = root+ "labLogs/2014-5-14-modifiedMexicanHatTest13_2/"
# 2
#dataSource = "Numerical_Spectrum_for_March2014_Rainband"
#inputFolder = root+ "labLogs/2014-5-13-modifiedMexicanHatTest13/"
# 3
#dataSource = "Numerical_Spectrum_for_Typhoon_Kong-Rey"
#inputFolder = root+ "labLogs/2014-5-7-modifiedMexicanHatTest10/"
# 4
#dataSource = "Numerical_Spectrum_for_Typhoon_Kong-Rey_WRF"
#inputFolder = root+ "labLogs/2014-5-7-modifiedMexicanHatTest9/"
############
# 2014-05-26
# 1
#dataSource = "Numerical_Spectrum_for_Typhoon_Kong-Rey_WRF"
#inputFolder = root + "labLogs/2014-5-26-modifiedMexicanHatTest17_kongreywrf/"
# 2
#dataSource = "Numerical_Spectrum_for_Typhoon_Kong-Rey_RADAR"
#inputFolder = root + "labLogs/2014-5-26-modifiedMexicanHatTest17_kongreycompref/"
# 3
dataSource = "Numerical_Spectrum_for_Kong-Rey_COMPREF-sigmaPower2"
inputFolder = root + "labLogs/2014-5-26-modifiedMexicanHatTest10/"
#
############################################################################
outputFolder = root+"labLogs/%d-%d-%d-modifiedMexicanHatTest17_%s/" % (dp.year, dp.month, dp.day, dataSource)
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
shutil.copyfile(root+"python/armor/tests/"+thisScript, outputFolder+thisScript)
open(outputFolder+thisScript,'a').write('\n# outputFolder:\n# ' + outputFolder)
L = os.listdir(inputFolder)
L = [v for v in L if ".pydump" in v and "responseImagesList" in v]
L = [inputFolder+v for v in L]
print len(L)
N = len(L)
## test/parameter setup
sigmas = []
for i in range(3):
responseImages = pickle.load(open(L[i],'r'))
M = responseImages[0]['matrix']
sigma = responseImages[0]['sigma']
height, width = M.shape
for j in range(len(responseImages)):
M = responseImages[j]['matrix']
M = M*(M>0)
sigma = responseImages[j]['sigma']
sigmas.append(sigma)
print j, sigma, '\t', M.min(), '\t', M.max()
print "sleeping 2 seconds"
time.sleep(2)
sigmas = sorted(list(set(sigmas)))
### end test/parameter setup
# makeing the 3d plot
X, Y = np.meshgrid(range(20), sigmas)
I, J = Y, X
Z = np.zeros(X.shape)
for i in range(len(L)):
responseImages = pickle.load(open(L[i],'r'))
M = responseImages[0]['matrix']
sigma = responseImages[0]['sigma']
z = np.zeros(X.shape)
for j in range(len(responseImages)):
M = responseImages[j]['matrix']
M = M*(M>0)
sigma = responseImages[j]['sigma']
print j, sigma, '\t', M.min(), '\t', M.max()
h = np.histogram(M, bins=20, range=(0,20))
z[j,:] = h[0]
Z += z
# making the chart
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, np.log2(Y), np.log2(Z), rstride=1, cstride=1) #key line
plt.title(dataSource+ " " + str(i) + "DBZ images\n"+\
"x-axis: response intensity(from 0 to 20)\n"+\
"y-axis: log_2(sigma)\n"+\
"z-axis: log_2(count)\n")
plt.xlabel('response intensity')
plt.ylabel('log2(sigma)')
# saving
fig.savefig(outputFolder+ "3d_numspec_plot_log2scale.png", dpi=200)
pickle.dump({"X": X, "Y":Y, "Z":Z}, open(outputFolder+'XYZ.pydump','w'))
#pickle.dump(fig,open(outputFolder+"fig.pydump","w")) #doesn't work
logFile = open(timeString+"logs.txt",'w')
logFile.write("i = " + str(i)
logFile.write("\noutputFile = " + str(outputFile)
logFile.write("\ndataSource = " + str(dataSource)
logFile.close()
fig.show()
open(outputFolder+thisScript,'a').write('\n\n outputFolder\n '+outputFolder)
"""
to see the final fig, go to the output folder, enter python interactive mode, and:
inputFolder="" #<-- fix it yourself
dataSource="" # ditto
import pickle
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
xyz = pickle.load(open(inputFolder+'XYZ.pydump','r'))
X = xyz['X']
Y = xyz['Y']
Z = xyz['Z']
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, np.log2(Y), np.log2(Z), rstride=1, cstride=1) #key line
plt.title(dataSource+ " " + str(i) + "DBZ images\n"+\
"x-axis: response intensity(from 0 to 20)\n"+\
"y-axis: log_2(sigma)\n"+\
"z-axis: log_2(count)\n")
plt.xlabel('response intensity')
plt.ylabel('log2(sigma)')
fig.show()
"""
print "outputFolder:", outputFolder
| cc0-1.0 |
hlin117/statsmodels | statsmodels/stats/tests/test_anova.py | 29 | 19073 | # -*- coding: utf-8 -*-
from statsmodels.compat.python import StringIO
import numpy as np
from statsmodels.stats.anova import anova_lm
from statsmodels.formula.api import ols
from pandas import read_table
kidney_table = StringIO("""Days Duration Weight ID
0.0 1 1 1
2.0 1 1 2
1.0 1 1 3
3.0 1 1 4
0.0 1 1 5
2.0 1 1 6
0.0 1 1 7
5.0 1 1 8
6.0 1 1 9
8.0 1 1 10
2.0 1 2 1
4.0 1 2 2
7.0 1 2 3
12.0 1 2 4
15.0 1 2 5
4.0 1 2 6
3.0 1 2 7
1.0 1 2 8
5.0 1 2 9
20.0 1 2 10
15.0 1 3 1
10.0 1 3 2
8.0 1 3 3
5.0 1 3 4
25.0 1 3 5
16.0 1 3 6
7.0 1 3 7
30.0 1 3 8
3.0 1 3 9
27.0 1 3 10
0.0 2 1 1
1.0 2 1 2
1.0 2 1 3
0.0 2 1 4
4.0 2 1 5
2.0 2 1 6
7.0 2 1 7
4.0 2 1 8
0.0 2 1 9
3.0 2 1 10
5.0 2 2 1
3.0 2 2 2
2.0 2 2 3
0.0 2 2 4
1.0 2 2 5
1.0 2 2 6
3.0 2 2 7
6.0 2 2 8
7.0 2 2 9
9.0 2 2 10
10.0 2 3 1
8.0 2 3 2
12.0 2 3 3
3.0 2 3 4
7.0 2 3 5
15.0 2 3 6
4.0 2 3 7
9.0 2 3 8
6.0 2 3 9
1.0 2 3 10
""")
class TestAnovaLM(object):
@classmethod
def setupClass(cls):
# kidney data taken from JT's course
# don't know the license
kidney_table.seek(0)
cls.data = read_table(kidney_table, sep="\s+")
cls.kidney_lm = ols('np.log(Days+1) ~ C(Duration) * C(Weight)',
data=cls.data).fit()
def test_results(self):
Df = np.array([1, 2, 2, 54])
sum_sq = np.array([2.339693, 16.97129, 0.6356584, 28.9892])
mean_sq = np.array([2.339693, 8.485645, 0.3178292, 0.536837])
f_value = np.array([4.358293, 15.80674, 0.5920404, np.nan])
pr_f = np.array([0.0415617, 3.944502e-06, 0.5567479, np.nan])
results = anova_lm(self.kidney_lm)
np.testing.assert_equal(results['df'].values, Df)
np.testing.assert_almost_equal(results['sum_sq'].values, sum_sq, 4)
np.testing.assert_almost_equal(results['F'].values, f_value, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, pr_f)
class TestAnovaLMNoconstant(object):
@classmethod
def setupClass(cls):
# kidney data taken from JT's course
# don't know the license
kidney_table.seek(0)
cls.data = read_table(kidney_table, sep="\s+")
cls.kidney_lm = ols('np.log(Days+1) ~ C(Duration) * C(Weight) - 1',
data=cls.data).fit()
def test_results(self):
Df = np.array([2, 2, 2, 54])
sum_sq = np.array([158.6415227, 16.97129, 0.6356584, 28.9892])
mean_sq = np.array([79.3207613, 8.485645, 0.3178292, 0.536837])
f_value = np.array([147.7557648, 15.80674, 0.5920404, np.nan])
pr_f = np.array([1.262324e-22, 3.944502e-06, 0.5567479, np.nan])
results = anova_lm(self.kidney_lm)
np.testing.assert_equal(results['df'].values, Df)
np.testing.assert_almost_equal(results['sum_sq'].values, sum_sq, 4)
np.testing.assert_almost_equal(results['F'].values, f_value, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, pr_f)
# > sum2.lm = lm(logDays ~ Duration * Weight - 1, contrasts=list(Duration=contr.sum, Weight=contr.sum))
# > anova.lm.sum2 <- anova(sum2.lm)
# > anova.lm.sum2
# Analysis of Variance Table
#
# Response: logDays
# Df Sum Sq Mean Sq F value Pr(>F)
# Duration 2 158.642 79.321 147.756 < 2.2e-16 ***
# Weight 2 16.971 8.486 15.807 3.945e-06 ***
# Duration:Weight 2 0.636 0.318 0.592 0.5567
# Residuals 54 28.989 0.537
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
class TestAnovaLMCompare(TestAnovaLM):
def test_results(self):
new_model = ols("np.log(Days+1) ~ C(Duration) + C(Weight)",
self.data).fit()
results = anova_lm(new_model, self.kidney_lm)
Res_Df = np.array([
56, 54
])
RSS = np.array([
29.62486, 28.9892
])
Df = np.array([
0, 2
])
Sum_of_Sq = np.array([
np.nan, 0.6356584
])
F = np.array([
np.nan, 0.5920404
])
PrF = np.array([
np.nan, 0.5567479
])
np.testing.assert_equal(results["df_resid"].values, Res_Df)
np.testing.assert_almost_equal(results["ssr"].values, RSS, 4)
np.testing.assert_almost_equal(results["df_diff"].values, Df)
np.testing.assert_almost_equal(results["ss_diff"].values, Sum_of_Sq)
np.testing.assert_almost_equal(results["F"].values, F)
np.testing.assert_almost_equal(results["Pr(>F)"].values, PrF)
class TestAnovaLMCompareNoconstant(TestAnovaLM):
def test_results(self):
new_model = ols("np.log(Days+1) ~ C(Duration) + C(Weight) - 1",
self.data).fit()
results = anova_lm(new_model, self.kidney_lm)
Res_Df = np.array([
56, 54
])
RSS = np.array([
29.62486, 28.9892
])
Df = np.array([
0, 2
])
Sum_of_Sq = np.array([
np.nan, 0.6356584
])
F = np.array([
np.nan, 0.5920404
])
PrF = np.array([
np.nan, 0.5567479
])
np.testing.assert_equal(results["df_resid"].values, Res_Df)
np.testing.assert_almost_equal(results["ssr"].values, RSS, 4)
np.testing.assert_almost_equal(results["df_diff"].values, Df)
np.testing.assert_almost_equal(results["ss_diff"].values, Sum_of_Sq)
np.testing.assert_almost_equal(results["F"].values, F)
np.testing.assert_almost_equal(results["Pr(>F)"].values, PrF)
class TestAnova2(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
3.067066, 13.27205, 0.1905093, 27.60181
])
Df = np.array([
1, 2, 2, 51
])
F_value = np.array([
5.667033, 12.26141, 0.1760025, np.nan
])
PrF = np.array([
0.02106078, 4.487909e-05, 0.8391231, np.nan
])
results = anova_lm(anova_ii, typ="II")
np.testing.assert_equal(results['df'].values, Df)
np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F_value, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova2Noconstant(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum) - 1",
data).fit()
Sum_Sq = np.array([
154.7131692, 13.27205, 0.1905093, 27.60181
])
Df = np.array([
2, 2, 2, 51
])
F_value = np.array([
142.9321191, 12.26141, 0.1760025, np.nan
])
PrF = np.array([
1.238624e-21, 4.487909e-05, 0.8391231, np.nan
])
results = anova_lm(anova_ii, typ="II")
np.testing.assert_equal(results['df'].values, Df)
np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F_value, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
# > sum2.lm.dropped <- lm(logDays ~ Duration * Weight - 1, dta.dropped,
# contrasts=list(Duration=contr.sum, Weight=contr.sum))
# > anova.ii.dropped2 <- Anova(sum2.lm.dropped, type='II')
# > anova.ii.dropped2
# Anova Table (Type II tests)
#
# Response: logDays
# Sum Sq Df F value Pr(>F)
# Duration 154.713 2 142.932 < 2.2e-16 ***
# Weight 13.272 2 12.261 4.488e-05 ***
# Duration:Weight 0.191 2 0.176 0.8391
# Residuals 27.602 51
class TestAnova2HC0(TestAnovaLM):
#NOTE: R doesn't return SSq with robust covariance. Why?
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 2, 2, 51
])
F = np.array([
6.972744, 13.7804, 0.1709936, np.nan
])
PrF = np.array([
0.01095599, 1.641682e-05, 0.8433081, np.nan
])
results = anova_lm(anova_ii, typ="II", robust="hc0")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova2HC1(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 2, 2, 51
])
F = np.array([
6.238771, 12.32983, 0.1529943, np.nan
])
PrF = np.array([
0.01576555, 4.285456e-05, 0.858527, np.nan
])
results = anova_lm(anova_ii, typ="II", robust="hc1")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova2HC2(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 2, 2, 51
])
F = np.array([
6.267499, 12.25354, 0.1501224, np.nan
])
PrF = np.array([
0.01554009, 4.511826e-05, 0.8609815, np.nan
])
results = anova_lm(anova_ii, typ="II", robust="hc2")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova2HC3(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 2, 2, 51
])
F = np.array([
5.633786, 10.89842, 0.1317223, np.nan
])
PrF = np.array([
0.02142223, 0.0001145965, 0.8768817, np.nan
])
results = anova_lm(anova_ii, typ="II", robust="hc3")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova3(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_iii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 1, 2, 2, 51
])
F_value = np.array([
279.7545, 5.367071, 12.43245, 0.1760025, np.nan
])
PrF = np.array([
2.379855e-22, 0.02457384, 3.999431e-05, 0.8391231, np.nan
])
results = anova_lm(anova_iii, typ="III")
np.testing.assert_equal(results['df'].values, Df)
np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F_value, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova3HC0(TestAnovaLM):
#NOTE: R doesn't return SSq with robust covariance. Why?
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_iii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 1, 2, 2, 51
])
F = np.array([
298.3404, 5.723638, 13.76069, 0.1709936, np.nan
])
PrF = np.array([
5.876255e-23, 0.02046031, 1.662826e-05, 0.8433081, np.nan
])
results = anova_lm(anova_iii, typ="III", robust="hc0")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova3HC1(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_iii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 1, 2, 2, 51
])
F = np.array([
266.9361, 5.12115, 12.3122, 0.1529943, np.nan
])
PrF = np.array([
6.54355e-22, 0.02792296, 4.336712e-05, 0.858527, np.nan
])
results = anova_lm(anova_iii, typ="III", robust="hc1")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova3HC2(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_iii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 1, 2, 2, 51
])
F = np.array([
264.5137, 5.074677, 12.19158, 0.1501224, np.nan
])
PrF = np.array([
7.958286e-22, 0.02860926, 4.704831e-05, 0.8609815, np.nan
])
results = anova_lm(anova_iii, typ="III", robust="hc2")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova3HC3(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_iii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 1, 2, 2, 51
])
F = np.array([
234.4026, 4.496996, 10.79903, 0.1317223, np.nan
])
PrF = np.array([
1.037224e-20, 0.03883841, 0.0001228716, 0.8768817, np.nan
])
results = anova_lm(anova_iii, typ="III", robust="hc3")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb-failure'], exit=False)
| bsd-3-clause |
AISpace2/AISpace2 | aipython/relnCollFilt_stats.py | 1 | 13113 | # relnCollFilt.py - Latent Feature-based Collaborative Filtering
# AIFCA Python3 code Version 0.7. Documentation at http://artint.info/code/python/
# Artificial Intelligence: Foundations of Computational Agents
# http://artint.info
# Copyright David L Poole and Alan K Mackworth 2017.
# This work is licensed under a Creative Commons
# Attribution-NonCommercial-ShareAlike 4.0 International License.
# See: http://creativecommons.org/licenses/by-nc-sa/4.0/deed.en
import random
import urllib.request
import matplotlib.pyplot as plt
from learnProblem import Learner
from utilities import Displayable
class CF_learner(Learner):
def __init__(self,
rating_set, # a Rating_set object
rating_subset=None, # subset of ratings to be used as training ratings
test_subset=None, # subset of ratings to be used as test ratings
step_size=0.01, # gradient descent step size
reglz=1.0, # the weight for the regularization terms
num_features=10, # number of hidden features
feature_range=0.02 # features are initialized to be between
# -feature_range and feature_range
):
self.rating_set = rating_set
self.ratings = rating_subset or rating_set.training_ratings # whichever is not empty
if test_subset is None:
self.test_ratings = self.rating_set.test_ratings
else:
self.test_ratings = test_subset
self.step_size = step_size
self.reglz = reglz
self.num_features = num_features
self.num_ratings = len(self.ratings)
self.ave_rating = (sum(rating for (user, item, rating, timestamp) in self.ratings)
/ self.num_ratings)
self.set_users_that_rated = {user for (user, item, rating, timestamp) in self.ratings}
self.set_items_rated = {item for (user, item, rating, timestamp) in self.ratings}
self.max_item_index = max(self.set_items_rated) + 1
self.max_user_index = max(self.set_users_that_rated) + 1
self.user_offset = [0] * (self.max_user_index)
self.item_offset = [0] * (self.max_item_index)
self.user_feat = [[random.uniform(-feature_range, feature_range)
for f in range(num_features)]
for i in range(self.max_user_index)]
self.item_feat = [[random.uniform(-feature_range, feature_range)
for f in range(num_features)]
for i in range(self.max_item_index)]
self.iter = 0
def stats(self):
self.display(1, "ave sumsq error of mean for training=",
sum((self.ave_rating - rating)**2 for (user, item, rating, timestamp)
in self.ratings) / len(self.ratings))
self.display(1, "ave sumsq error of mean for test=",
sum((self.ave_rating - rating)**2 for (user, item, rating, timestamp)
in self.test_ratings) / len(self.test_ratings))
self.display(1, "error on training set",
self.evaluate(self.ratings))
self.display(1, "error on test set",
self.evaluate(self.test_ratings))
def prediction(self, user, item):
"""Returns prediction for this user on this item."""
return (self.ave_rating
+ self.user_offset[user]
+ self.item_offset[item]
+ sum([self.user_feat[user][f] * self.item_feat[item][f]
for f in range(self.num_features)]))
def learn(self, num_iter=50):
""" do num_iter iterations of gradient descent."""
for i in range(num_iter):
self.iter += 1
abs_error = 0
sumsq_error = 0
for (user, item, rating, timestamp) in random.sample(self.ratings, len(self.ratings)):
error = self.prediction(user, item) - rating
abs_error += abs(error)
sumsq_error += error * error
self.user_offset[user] -= self.step_size * error
self.item_offset[item] -= self.step_size * error
for f in range(self.num_features):
self.user_feat[user][f] -= self.step_size * error * self.item_feat[item][f]
self.item_feat[item][f] -= self.step_size * error * self.user_feat[user][f]
for user in range(self.max_user_index):
self.user_offset[user] -= self.step_size * self.reglz * self.user_offset[user]
for f in range(self.num_features):
self.user_feat[user][f] -= self.step_size * self.reglz * self.user_feat[user][f]
for item in range(self.max_item_index):
self.item_offset[item] -= self.step_size * self.reglz * self.item_offset[item]
for f in range(self.num_features):
self.item_feat[item][f] -= self.step_size * self.reglz * self.item_feat[item][f]
self.display(1, "Iteration", self.iter,
"(Ave Abs,AveSumSq) training =", self.evaluate(self.ratings),
"test =", self.evaluate(self.test_ratings))
def evaluate(self, ratings):
"""returns (avergage_absolute_error, average_sum_squares_error) for ratings
"""
abs_error = 0
sumsq_error = 0
if not ratings:
return (0, 0)
for (user, item, rating, timestamp) in ratings:
error = self.prediction(user, item) - rating
abs_error += abs(error)
sumsq_error += error * error
return abs_error / len(ratings), sumsq_error / len(ratings)
def plot_predictions(self,
examples="test"):
"""
examples is either "test" or "training" or the actual examples
"""
if examples == "test":
examples = self.test_ratings
elif examples == "training":
examples = self.ratings
plt.ion()
plt.xlabel("prediction")
plt.ylabel("cumulative proportion")
self.actuals = [[] for r in range(0, 6)]
for (user, item, rating, timestamp) in examples:
self.actuals[rating].append(self.prediction(user, item))
for rating in range(1, 6):
self.actuals[rating].sort()
numrat = len(self.actuals[rating])
yvals = [i / numrat for i in range(numrat)]
plt.plot(self.actuals[rating], yvals, label="rating=" + str(rating))
plt.legend()
plt.draw()
def plot_feature(self,
f, # feature
plot_all=False, # true if all points should be plotted
num_points=200 # number of random points plotted if not all
):
"""plot some of the user-movie ratings,
if plot_all is true
num_points is the number of points selected at random plotted.
the plot has the users on the x-axis sorted by their value on feature f and
with the items on the y-axis sorted by their value on feature f and
the ratings plotted at the corresponding x-y position.
"""
plt.ion()
plt.xlabel("users")
plt.ylabel("items")
user_vals = [self.user_feat[user][f]
for user in range(self.max_user_index + 1)
if user in self.set_users_that_rated]
item_vals = [self.item_feat[item][f]
for item in range(self.max_item_index + 1)
if item in self.set_items_rated]
plt.axis([min(user_vals) - 0.02,
max(user_vals) + 0.05,
min(item_vals) - 0.02,
max(item_vals) + 0.05])
if plot_all:
for (user, item, rating, timestamp) in self.ratings:
plt.text(self.user_feat[user][f],
self.item_feat[item][f],
str(rating))
else:
for i in range(num_points):
(user, item, rating, timestamp) = random.choice(self.ratings)
plt.text(self.user_feat[user][f],
self.item_feat[item][f],
str(rating))
plt.show()
class Rating_set(Displayable):
def __init__(self,
date_split=892000000,
local_file=False,
url="http://files.grouplens.org/datasets/movielens/ml-100k/u.data",
file_name="u.data"):
self.display(1, "reading...")
if local_file:
lines = open(file_name, 'r')
else:
lines = (line.decode('utf-8') for line in urllib.request.urlopen(url))
all_ratings = (tuple(int(e) for e in line.strip().split('\t'))
for line in lines)
self.training_ratings = []
self.training_stats = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0}
self.test_ratings = []
self.test_stats = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0}
for rate in all_ratings:
if rate[3] < date_split: # rate[3] is timestamp
self.training_ratings.append(rate)
self.training_stats[rate[2]] += 1
else:
self.test_ratings.append(rate)
self.test_stats[rate[2]] += 1
self.display(1, "...read:", len(self.training_ratings), "training ratings and",
len(self.test_ratings), "test ratings")
self.tr_users = {user for (user, item, rating, timestamp) in self.training_ratings}
self.test_users = {user for (user, item, rating, timestamp) in self.test_ratings}
self.display(1, "users:", len(self.tr_users), "training,", len(self.test_users), "test,",
len(self.tr_users & self.test_users), "in common")
tr_items = {item for (user, item, rating, timestamp) in self.training_ratings}
test_items = {item for (user, item, rating, timestamp) in self.test_ratings}
self.display(1, "items:", len(tr_items), "training,", len(test_items), "test,",
len(tr_items & test_items), "in common")
self.display(1, "Rating statistics for training set: ", self.training_stats)
self.display(1, "Rating statistics for test set: ", self.test_stats)
def create_top_subset(self, num_items=30, num_users=30):
"""Returns a subset of the ratings by picking the most rated items,
and then the users that have most ratings on these, and then all of the
ratings that involve these users and items.
"""
max_item_index = max(item for (user, item, rating, timestamp) in self.training_ratings)
item_counts = [0] * (max_item_index + 1)
for (user, item, rating, timestamp) in self.training_ratings:
item_counts[item] += 1
items_sorted = sorted((item_counts[item], item) for item in range(max_item_index))
top_items = items_sorted[-num_items:]
set_top_items = set(item for (count, item) in top_items)
max_user_index = max(user for (user, item, rating, timestamp) in self.training_ratings)
user_counts = [0] * (max_user_index + 1)
for (user, item, rating, timestamp) in self.training_ratings:
if item in set_top_items:
user_counts[user] += 1
users_sorted = sorted((user_counts[user], user)
for user in range(max_user_index))
top_users = users_sorted[-num_users:]
set_top_users = set(user for (count, user) in top_users)
used_ratings = [(user, item, rating, timestamp)
for (user, item, rating, timestamp) in self.training_ratings
if user in set_top_users and item in set_top_items]
return used_ratings
movielens = Rating_set()
learner0 = CF_learner(movielens, num_features=1)
# learner0.learn(50)
# learner0.plot_predictions(examples = "training")
# learner0.plot_predictions(examples = "test")
# learner0.plot_feature(0)
#movielens_subset = movielens.create_top_subset(num_items = 20, num_users = 20)
#learner1 = CF_learner(movielens, rating_subset=movielens_subset, test_subset=[], num_features=1)
# learner1.learn(1000)
# learner1.plot_feature(0,plot_all=True)
def sorted_r(lst):
for i in range(len(lst) - 1):
if lst[i][3] > lst[i + 1][3]:
return False
return True
def splits(rs):
users = {u: 0 for u in rs.tr_users.union(rs.test_users)}
stats = [0] * 200
max_stats = 0
nr = 0
for (u, i, r, d) in rs.training_ratings + rs.test_ratings:
users[u] += 1
nr += 1
if nr % 10000 == 0:
stats = [0] * 2000
max_stats = 0
for (u, n) in users.items(): # n is number of items u rated
stats[n] += 1
if n > max_stats:
max_stats = n
print(nr, d, stats[:max_stats + 1], max_stats)
splits(movielens)
| gpl-3.0 |
arhik/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qt4agg.py | 70 | 4985 | """
Render to qt from agg
"""
from __future__ import division
import os, sys
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
from backend_qt4 import QtCore, QtGui, FigureManagerQT, FigureCanvasQT,\
show, draw_if_interactive, backend_version, \
NavigationToolbar2QT
DEBUG = False
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
if DEBUG: print 'backend_qtagg.new_figure_manager'
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasQTAgg( thisFig )
return FigureManagerQT( canvas, num )
class NavigationToolbar2QTAgg(NavigationToolbar2QT):
def _get_canvas(self, fig):
return FigureCanvasQTAgg(fig)
class FigureManagerQTAgg(FigureManagerQT):
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
print "Classic toolbar is not supported"
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2QTAgg(canvas, parent)
else:
toolbar = None
return toolbar
class FigureCanvasQTAgg( FigureCanvasQT, FigureCanvasAgg ):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQtAgg: ', figure
FigureCanvasQT.__init__( self, figure )
FigureCanvasAgg.__init__( self, figure )
self.drawRect = False
self.rect = []
self.replot = True
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
def resizeEvent( self, e ):
FigureCanvasQT.resizeEvent( self, e )
def drawRectangle( self, rect ):
self.rect = rect
self.drawRect = True
self.repaint( )
def paintEvent( self, e ):
"""
Draw to the Agg backend and then copy the image to the qt.drawable.
In Qt, all drawing should be done inside of here when a widget is
shown onscreen.
"""
#FigureCanvasQT.paintEvent( self, e )
if DEBUG: print 'FigureCanvasQtAgg.paintEvent: ', self, \
self.get_width_height()
# only replot data when needed
if type(self.replot) is bool: # might be a bbox for blitting
if self.replot:
FigureCanvasAgg.draw(self)
# matplotlib is in rgba byte order. QImage wants to put the bytes
# into argb format and is in a 4 byte unsigned int. Little endian
# system is LSB first and expects the bytes in reverse order
# (bgra).
if QtCore.QSysInfo.ByteOrder == QtCore.QSysInfo.LittleEndian:
stringBuffer = self.renderer._renderer.tostring_bgra()
else:
stringBuffer = self.renderer._renderer.tostring_argb()
qImage = QtGui.QImage(stringBuffer, self.renderer.width,
self.renderer.height,
QtGui.QImage.Format_ARGB32)
p = QtGui.QPainter(self)
p.drawPixmap(QtCore.QPoint(0, 0), QtGui.QPixmap.fromImage(qImage))
# draw the zoom rectangle to the QPainter
if self.drawRect:
p.setPen( QtGui.QPen( QtCore.Qt.black, 1, QtCore.Qt.DotLine ) )
p.drawRect( self.rect[0], self.rect[1], self.rect[2], self.rect[3] )
p.end()
# we are blitting here
else:
bbox = self.replot
l, b, r, t = bbox.extents
w = int(r) - int(l)
h = int(t) - int(b)
t = int(b) + h
reg = self.copy_from_bbox(bbox)
stringBuffer = reg.to_string_argb()
qImage = QtGui.QImage(stringBuffer, w, h, QtGui.QImage.Format_ARGB32)
pixmap = QtGui.QPixmap.fromImage(qImage)
p = QtGui.QPainter( self )
p.drawPixmap(QtCore.QPoint(l, self.renderer.height-t), pixmap)
p.end()
self.replot = False
self.drawRect = False
def draw( self ):
"""
Draw the figure when xwindows is ready for the update
"""
if DEBUG: print "FigureCanvasQtAgg.draw", self
self.replot = True
FigureCanvasAgg.draw(self)
self.update()
# Added following line to improve realtime pan/zoom on windows:
QtGui.qApp.processEvents()
def blit(self, bbox=None):
"""
Blit the region in bbox
"""
self.replot = bbox
l, b, w, h = bbox.bounds
t = b + h
self.update(l, self.renderer.height-t, w, h)
def print_figure(self, *args, **kwargs):
FigureCanvasAgg.print_figure(self, *args, **kwargs)
self.draw()
| agpl-3.0 |
nelango/ViralityAnalysis | model/lib/sklearn/metrics/tests/test_ranking.py | 6 | 41689 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
expected_auc = _auc(y_true, probas_pred)
for drop in [True, False]:
fpr, tpr, thresholds = roc_curve(y_true, probas_pred,
drop_intermediate=drop)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=False)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_roc_curve_drop_intermediate():
# Test that drop_intermediate drops the correct thresholds
y_true = [0, 0, 0, 0, 1, 1]
y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds, [1., 0.7, 0.])
# Test dropping thresholds with repeating scores
y_true = [0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1]
y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9,
0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds,
[1.0, 0.9, 0.7, 0.6, 0.])
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| mit |
antoniosehk/WSDeepNN | MLModel.py | 1 | 3473 | import numpy as np
from sklearn import svm
from sklearn import tree
from sklearn import metrics
from keras.datasets import mnist
from keras.utils import np_utils
#from keras.layers import Input, Dense, Dropout
#from keras.models import Model, load_model
#from keras.callbacks import ModelCheckpoint
from DataUtils import DataUtils
class MLModel:
# Initialization
def __init__(self, nb_classes,
seed=False):
if seed:
np.random.seed(0)
self.nb_classes = nb_classes
self.model = None
# Configuration
def config(self, clf="dt"):
if clf.lower() == "dt":
self.model = tree.DecisionTreeClassifier()
elif clf.lower() == "svm":
self.model = svm.SVC(kernel='rbf',
probability=True)
else:
self.model = tree.DecisionTreeClassifier()
# Fit
def fit(self, X_train, y_train):
self.model.fit(X_train, y_train)
def _proba(self, X_test):
return self.model.predict_proba(X_test)
def _predict(self, X_test):
return self.model.predict(X_test)
def predict_proba(self, X_test):
return self._proba(X_test)
def predict(self, X_test):
return self._predict(X_test)
def evaluate(self, X_test, y_test):
Y_test = np_utils.to_categorical(y_test, self.nb_classes)
y_pred = self.predict(X_test)
y_proba = self.predict_proba(X_test)
confusion_matrix = metrics.confusion_matrix(y_test, y_pred)
precision = metrics.precision_score(y_test, y_pred)
recall = metrics.recall_score(y_test, y_pred)
#specificity = specificity_score(y_test, y_pred)
#gmean = np.sqrt(recall * specificity)
f1 = metrics.f1_score(y_test, y_pred)
pr_auc = metrics.average_precision_score(Y_test, y_proba)
roc_auc = metrics.roc_auc_score(Y_test, y_proba)
return confusion_matrix, precision, recall, f1, pr_auc, roc_auc
if __name__ == '__main__':
# this function turns the label vector into anomaly vector
def anomaly(y_train, y_test, anomaly_label):
y_train = DataUtils.anomaly(y_train, anomaly_label)
y_test = DataUtils.anomaly(y_test, anomaly_label)
return y_train, y_test
# this function prints the metrics in CSV format
def show(score):
confusion_matrix, precision, recall, f1, prc_auc, roc_auc = score
print "TN,FP,FN,TP,Precision,Recall,F1,PRC,ROC"
print "%d,%d,%d,%d,%.5f,%.5f,%.5f,%.5f,%.5f" \
%(confusion_matrix[0,0], confusion_matrix[0,1],
confusion_matrix[1,0], confusion_matrix[1,1],
precision, recall, f1, prc_auc, roc_auc)
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# set the anomaly label
anomaly_label = 9
# modify the y_train and y_test
y_train, y_test = anomaly(y_train, y_test, anomaly_label)
# preprocess
input_dim = 784
X_train = X_train.reshape(60000, input_dim)
X_test = X_test.reshape(10000, input_dim)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# normalization
X_train /= 255
X_test /= 255
# obtain the number of classes
nb_classes = np.size(np.unique(y_train))
print "Anomaly label: %d" %(anomaly_label)
print "Number of classes: %d" %(nb_classes)
# training
mlModel = MLModel(nb_classes, seed=True)
mlModel.config(clf='svm')
mlModel.fit(X_train, y_train)
# evaluate on training data
print "Training"
score = mlModel.evaluate(X_train, y_train)
show(score)
# evaluate on testing data
print "Testing"
score = mlModel.evaluate(X_test, y_test)
show(score)
| mit |
ericdill/xray-vision | xray_vision/messenger/mpl/cross_section_2d.py | 4 | 21948 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from .. import QtCore, QtGui
from matplotlib import colors
from matplotlib.cm import datad
import numpy as np
from . import AbstractMPLMessenger
from .. import AbstractMessenger2D
from ...backend.mpl.cross_section_2d import CrossSection2DView
from ...backend.mpl import cross_section_2d as View
from ...backend.mpl import AbstractMPLDataView
import logging
logger = logging.getLogger(__name__)
class CrossSection2DMessenger(AbstractMessenger2D, AbstractMPLMessenger):
"""
This is a thin wrapper around mpl.CrossSectionViewer which
manages the Qt side of the figure creation and provides slots
to pass commands down to the gui-independent layer
"""
def __init__(self, data_list, key_list, parent=None, *args, **kwargs):
# call up the inheritance chain
super(CrossSection2DMessenger, self).__init__(*args, **kwargs)
# init the appropriate view
self._view = CrossSection2DView(fig=self._fig, data_list=data_list,
key_list=key_list)
# TODO: Address issue of data storage in the cross section widget
self._ctrl_widget = CrossSection2DControlWidget(name="2-D CrossSection"
" Controls",
init_img=data_list[0],
num_images=len(
key_list))
# connect signals to slots
self.connect_sigs_to_slots()
def connect_sigs_to_slots(self):
"""
Connect the signals of the control box to the slots of the messenger
"""
# standard data manipulation signal/slot pairs
# TODO Fix this connection. It throws an exception b/c the connection fails
self._ctrl_widget.sig_update_norm.connect(self.sl_update_norm)
# standard mpl signal/slot pairs
self._ctrl_widget._cm_cb.editTextChanged[str].connect(
self.sl_update_cmap)
self._ctrl_widget._cm_cb.setEditText(self._ctrl_widget.default_cmap)
# signal/slot pairs specific to the CrossSection2DView
self._ctrl_widget.sig_update_limit_function.connect(
self.sl_update_limit_func)
self._ctrl_widget._slider_img.valueChanged.connect(self.sl_update_image)
self._ctrl_widget.sig_update_interpolation.connect(
self._view.update_interpolation)
@QtCore.Slot(int)
def sl_update_image(self, img_idx):
"""
updates the image shown in the widget, assumed to be the same size
"""
self._view.update_image(img_idx)
self.sl_update_view()
im = self._view._data_dict[self._view._key_list[img_idx]]
self._ctrl_widget.set_im_lim(lo=np.min(im), hi=np.max(im))
@QtCore.Slot(np.ndarray)
def sl_replace_image(self, img):
"""
Replaces the image shown in the widget, rebulids everything
(so swap axis will work)
"""
raise NotImplementedError()
@QtCore.Slot(object)
def sl_update_limit_func(self, limit_func):
"""
Updates the type of limit computation function used
"""
self._view.set_limit_func(limit_func)
self.sl_update_view()
class CrossSection2DControlWidget(QtGui.QDockWidget):
"""
This object contains the CrossSectionViewer (2D Image Display) and
finish the doc string...
"""
# set up the signals
sig_update_image = QtCore.Signal(int)
sig_update_norm = QtCore.Signal(colors.Normalize)
sig_update_limit_function = QtCore.Signal(object)
sig_update_interpolation = QtCore.Signal(str)
# some defaults
default_cmap = AbstractMPLDataView._default_cmap
_CMAPS = list(datad.keys())
_CMAPS.sort()
def __init__(self, name, init_img, num_images):
QtGui.QDockWidget.__init__(self, name)
# make the control widget float
self.setFloating(True)
# add a widget that lives in the floating control widget
self._widget = QtGui.QWidget(self)
# give the widget to the dock widget
self.setWidget(self._widget)
# create a layout
ctrl_layout = QtGui.QVBoxLayout()
# set the layout to the widget
self._widget.setLayout(ctrl_layout)
self._axis_order = np.arange(init_img.ndim+1)
self._lo = np.min(init_img)
self._hi = np.max(init_img)
# set up axis swap buttons
self._cb_ax1 = QtGui.QComboBox(parent=self)
self._cb_ax2 = QtGui.QComboBox(parent=self)
self._btn_swap = QtGui.QPushButton('Swap Axes', parent=self)
self.init_swap_btns(self._cb_ax1, self._cb_ax2, self._btn_swap)
# set up slider and spinbox
self._slider_img = QtGui.QSlider(parent=self)
self._spin_img = QtGui.QSpinBox(parent=self)
# init the slider and spinbox
self.init_img_changer(self._slider_img, self._spin_img, num_images)
widget_box1 = QtGui.QVBoxLayout()
slider_label = QtGui.QLabel("&Frame")
slider_label.setBuddy(self._slider_img)
widget_box1_hbox = QtGui.QHBoxLayout()
widget_box1_hbox.addWidget(self._slider_img)
widget_box1_hbox.addWidget(self._spin_img)
widget_box1.addWidget(slider_label)
widget_box1.addLayout(widget_box1_hbox)
# set up color map combo box
self._cm_cb = QtGui.QComboBox(parent=self)
self.init_cmap_box(self._cm_cb)
# set up the interpolation combo box
self._cmb_interp = QtGui.QComboBox(parent=self)
self._cmb_interp.addItems(CrossSection2DView.interpolation)
# set up intensity manipulation combo box
intensity_behavior_data = [(View.fullrange_limit_factory,
self._no_limit_config),
(View.percentile_limit_factory,
self._percentile_config),
(View.absolute_limit_factory,
self._absolute_limit_config)]
intensity_behavior_types = ['full range',
'percentile',
'absolute']
self._intensity_behav_dict = {k: v for k, v in zip(
intensity_behavior_types,
intensity_behavior_data)}
# TODO should not have to hard-code this, but it is getting
# called before it is fully updated, figure out why
self._limit_factory = View.fullrange_limit_factory
self._cmbbox_intensity_behavior = QtGui.QComboBox(parent=self)
self._cmbbox_intensity_behavior.addItems(intensity_behavior_types)
# can add PowerNorm, BoundaryNorm, but those require extra inputs
norm_names = ['linear', 'log']
norm_funcs = [colors.Normalize, colors.LogNorm]
self._norm_dict = {k: v for k, v in zip(norm_names, norm_funcs)}
self._cmbbox_norm = QtGui.QComboBox(parent=self)
self._cmbbox_norm.addItems(norm_names)
# set up intensity manipulation spin boxes
# create the intensity manipulation spin boxes
self._spin_min = QtGui.QDoubleSpinBox(parent=self)
self._spin_max = QtGui.QDoubleSpinBox(parent=self)
self._spin_step = QtGui.QDoubleSpinBox(parent=self)
self.init_spinners(self._spin_min, self._spin_max, self._spin_step,
min_intensity=np.min(init_img),
max_intensity=np.max(init_img))
ctrl_form = QtGui.QFormLayout()
ctrl_form.addRow("Color &map", self._cm_cb)
ctrl_form.addRow("&Interpolation", self._cmb_interp)
ctrl_form.addRow("&Normalization", self._cmbbox_norm)
ctrl_form.addRow("limit &strategy", self._cmbbox_intensity_behavior)
ctrl_layout.addLayout(ctrl_form)
clim_spinners = QtGui.QGroupBox("clim parameters")
ispiner_form = QtGui.QFormLayout()
ispiner_form.addRow("mi&n", self._spin_min)
ispiner_form.addRow("ma&x", self._spin_max)
ispiner_form.addRow("s&tep", self._spin_step)
clim_spinners.setLayout(ispiner_form)
ctrl_layout.addWidget(clim_spinners)
# construct widget box 1
widget_box1_sub1 = QtGui.QVBoxLayout()
axes_swap_form = QtGui.QFormLayout()
axes_swap_form.addRow("axes A", self._cb_ax1)
axes_swap_form.addRow("axes B", self._cb_ax2)
widget_box1_sub1.addLayout(axes_swap_form)
widget_box1_sub1.addWidget(self._btn_swap)
swap_axes_box = QtGui.QGroupBox("Swap!")
swap_axes_box.setLayout(widget_box1_sub1)
swap_axes_box.setEnabled(False)
ctrl_layout.addWidget(swap_axes_box)
ctrl_layout.addLayout(widget_box1)
ctrl_layout.addStretch()
# set this down here to make sure the function will run
self._cmbbox_intensity_behavior.currentIndexChanged[str].connect(
self.set_image_intensity_behavior)
# set to full range, do this last so all the call-back propagate
self._cmbbox_intensity_behavior.setCurrentIndex(0)
# force the issue about emitting
self._cmbbox_intensity_behavior.currentIndexChanged[str].emit(
intensity_behavior_types[0])
# set this down here to make sure the function will run
self._cmbbox_norm.currentIndexChanged[str].connect(
self.set_normalization)
# set to full range, do this last so all the call-back propagate
self._cmbbox_norm.setCurrentIndex(0)
# force the issue about emitting
self._cmbbox_norm.currentIndexChanged[str].emit(
norm_names[0])
self._cmb_interp.currentIndexChanged[str].connect(
self.sig_update_interpolation)
def set_im_lim(self, lo, hi):
self._lo = lo
self._hi = hi
def init_img_changer(self, slider_img, spin_img, num_images):
slider_img.setRange(0, num_images - 1)
slider_img.setTracking(True)
slider_img.setSingleStep(1)
slider_img.setPageStep(10)
slider_img.setOrientation(QtCore.Qt.Horizontal)
spin_img.setRange(slider_img.minimum(), slider_img.maximum())
spin_img.valueChanged.connect(slider_img.setValue)
slider_img.valueChanged.connect(spin_img.setValue)
slider_img.rangeChanged.connect(spin_img.setRange)
def init_spinners(self, spin_min, spin_max, spin_step, min_intensity,
max_intensity):
# allow the spin boxes to be any value
spin_min.setMinimum(float("-inf"))
spin_min.setMaximum(float("inf"))
spin_max.setMinimum(float("-inf"))
spin_max.setMaximum(float("inf"))
spin_step.setMinimum(0)
spin_step.setMaximum(float("inf"))
# connect the intensity spinboxes to their updating method
spin_min.valueChanged.connect(
self.set_min_intensity_limit)
spin_max.valueChanged.connect(
self.set_max_intensity_limit)
spin_step.valueChanged.connect(
self.set_intensity_step)
# set the initial values for the spin boxes
spin_step.setValue((max_intensity-min_intensity)/100)
spin_max.setValue(max_intensity)
spin_min.setValue(min_intensity)
def init_swap_btns(self, cb_ax1, cb_ax2, btn_swap):
cb_ax1.setEditable(False)
cb_ax2.setEditable(False)
# TODO need to deal with changing the items in this combobox when the data is changed
cb_ax1.addItems(np.arange(len(self._axis_order)).astype(str))
cb_ax2.addItems(np.arange(len(self._axis_order)).astype(str))
btn_swap.resize(btn_swap.sizeHint())
btn_swap.clicked.connect(self.swap_stack_axes)
btn_swap.setEnabled(False)
def init_cmap_box(self, cm_cb):
cm_cb.setEditable(True)
cm_cb.addItems(self._CMAPS)
cm_cb.setEditText(self.default_cmap)
def swap_stack_axes(self):
"""
Swap the axes of the image stack based on the indices of the combo
boxes. The tooltip of the axis swap button maintains the current
position of the axes based on where they began.
e.g., the tooltip will start as [0 1 2] if a 3d array is passed.
If axes 0 and 2 are swapped, the tooltip will now read [2 1 0].
"""
axis1 = self._cb_ax1.currentIndex()
axis2 = self._cb_ax2.currentIndex()
cur_axis1 = self._axis_order[axis1]
cur_axis2 = self._axis_order[axis2]
self._axis_order[axis1] = cur_axis2
self._axis_order[axis2] = cur_axis1
self._btn_swap.setToolTip(np.array_str(self._axis_order))
self._stack = np.swapaxes(self._stack, axis1, axis2)
self.set_img_stack(self._stack)
print("stack.shape: {0}".format(self._stack.shape))
self._len = self._stack.shape[0]
self._slider_img.setRange(0, self._len - 1)
self._spin_img.setRange(self._slider_img.minimum(),
self._slider_img.maximum())
@QtCore.Slot(str)
def set_normalization(self, norm_name):
norm = self._norm_dict[str(norm_name)]
self.sig_update_norm.emit(norm())
@QtCore.Slot(str)
def set_image_intensity_behavior(self, im_behavior):
# get the limit factory to use
(limit_fac, get_params) = self._intensity_behav_dict[str(im_behavior)]
# stash the limit function factory for later use
self._limit_factory = limit_fac
# fixes the gui state, grabs default spinner values + spinner state
limits, state = get_params()
# make the limit function
limit_func = limit_fac(limits)
# emit the function to be passed on to the underlying object
self.sig_update_limit_function.emit(limit_func)
# set the new limits
self._set_spinbox_limits(*limits)
self._spinbox_enabler(state)
def _spinbox_enabler(self, state):
self._spin_max.setEnabled(state)
self._spin_min.setEnabled(state)
self._spin_step.setEnabled(state)
def _no_limit_config(self):
"""
Helper function to set up the gui for the 'no limit'
(max/min) color bounds
"""
# turn off the spin boxes
# just echo back what it is and don't change it
return (self._spin_min.value(),
self._spin_max.value()), False
def _percentile_config(self):
"""
helper function to set up the gui for use with the percentile
color bounds
"""
# return full range
return (0, 100), True
def _absolute_limit_config(self):
"""
Helper function to set up the gui for use with absolute limits
"""
return (self._lo, self._hi), True
def _set_spinbox_limits(self, bottom_val, top_val):
# turn off signals on the spin boxes
reset_state = [(sb, sb.blockSignals(True)) for sb in
(self._spin_max,
self._spin_min)]
try:
# set the top and bottom limits on the spinboxs to be in bounds
self._spin_max.setMinimum(bottom_val)
self._spin_min.setMinimum(bottom_val)
self._spin_max.setMaximum(top_val)
self._spin_min.setMaximum(top_val)
# don't let the step be bigger than the total allowed range
self._spin_step.setMaximum(top_val - bottom_val)
if not np.isinf(bottom_val) or not np.isinf(top_val):
# set the current values
self._spin_min.setValue(bottom_val)
self._spin_max.setValue(top_val)
# this will trigger via the call-back updating everything else
self._spin_step.setValue(
(top_val - bottom_val) / 100)
finally:
# un-wrap the signal blocking
[sb.blockSignals(state) for sb, state in reset_state]
@QtCore.Slot(float)
def set_intensity_step(self, intensity_step):
"""
Slot method for the intensity step spinbox valueChanged() method.
The intensity_step is passed as a string which needs to be parsed into
"""
# set the intensity steps for each of the combo boxes
self._spin_step.setSingleStep(intensity_step)
self._spin_max.setSingleStep(intensity_step)
self._spin_min.setSingleStep(intensity_step)
# parse the currently displayed string to determine if the last digit
# is non-zero. If it is, increase the number of displayed decimal
# places by 1
str_intensity_step = str(intensity_step)
num_decimals = len(str_intensity_step.split('.')[-1])
last_decimal = str_intensity_step[-1]
if last_decimal != 0:
self._spin_step.setDecimals(num_decimals + 1)
self._spin_min.setDecimals(num_decimals + 1)
self._spin_max.setDecimals(num_decimals + 1)
@QtCore.Slot(float)
def set_min_intensity_limit(self, min_intensity):
# grab the max value
max_intensity = self._spin_max.value()
# grab the step value
intensity_step = self._spin_step.value()
# covert max/min to number of steps
_max = int(round(max_intensity / intensity_step))
_min = int(round(min_intensity / intensity_step))
# if max is not atleast a step greater than min, adjust
if not _max > _min:
max_intensity = min_intensity + intensity_step
# this should take care of the call back to the viewer
self._spin_max.setValue(max_intensity)
else:
limit_func = self._limit_factory((min_intensity, max_intensity))
self.sig_update_limit_function.emit(limit_func)
@QtCore.Slot(float)
def set_max_intensity_limit(self, max_intensity):
# grab the max value
min_intensity = self._spin_min.value()
# grab the step value
intensity_step = self._spin_step.value()
_max = int(round(max_intensity / intensity_step))
_min = int(round(min_intensity / intensity_step))
if not _max > _min:
min_intensity = max_intensity - intensity_step
self._spin_min.setValue(min_intensity)
else:
limit_func = self._limit_factory((min_intensity, max_intensity))
self.sig_update_limit_function.emit(limit_func)
@QtCore.Slot(float, float)
def set_limits(self, bottom, top):
# TODO update the spinners + validate
limit_func = self._limit_factory((bottom, top))
self.sig_update_limit_function.emit(limit_func)
def set_img_stack(self, img_stack):
"""
Give the widget a new image stack without remaking the widget.
Only call this after the widget has been constructed. In
other words, don't call this from the __init__ method
Parameters
----------
img_stack: anything that returns a 2D array when __getitem__ is called
"""
if img_stack is not None:
self.stack = img_stack
self._view.sl_update_image(0)
@QtCore.Slot(int)
def update_frame(self, frame_idx):
self.sig_update_image.emit(frame_idx)
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/pandas/stats/plm.py | 14 | 24672 | """
Linear regression objects for panel data
"""
# pylint: disable-msg=W0231
# pylint: disable-msg=E1101,E1103
from __future__ import division
from pandas.compat import range
from pandas import compat
import warnings
import numpy as np
from pandas.core.panel import Panel
from pandas.core.frame import DataFrame
from pandas.core.reshape import get_dummies
from pandas.core.series import Series
from pandas.core.sparse import SparsePanel
from pandas.stats.ols import OLS, MovingOLS
import pandas.stats.common as com
import pandas.stats.math as math
from pandas.util.decorators import cache_readonly
class PanelOLS(OLS):
"""Implements panel OLS.
See ols function docs
"""
_panel_model = True
def __init__(self, y, x, weights=None, intercept=True, nw_lags=None,
entity_effects=False, time_effects=False, x_effects=None,
cluster=None, dropped_dummies=None, verbose=False,
nw_overlap=False):
self._x_orig = x
self._y_orig = y
self._weights = weights
self._intercept = intercept
self._nw_lags = nw_lags
self._nw_overlap = nw_overlap
self._entity_effects = entity_effects
self._time_effects = time_effects
self._x_effects = x_effects
self._dropped_dummies = dropped_dummies or {}
self._cluster = com._get_cluster_type(cluster)
self._verbose = verbose
(self._x, self._x_trans,
self._x_filtered, self._y,
self._y_trans) = self._prepare_data()
self._index = self._x.index.levels[0]
self._T = len(self._index)
def log(self, msg):
if self._verbose: # pragma: no cover
print(msg)
def _prepare_data(self):
"""Cleans and stacks input data into DataFrame objects
If time effects is True, then we turn off intercepts and omit an item
from every (entity and x) fixed effect.
Otherwise:
- If we have an intercept, we omit an item from every fixed effect.
- Else, we omit an item from every fixed effect except one of them.
The categorical variables will get dropped from x.
"""
(x, x_filtered, y, weights, cat_mapping) = self._filter_data()
self.log('Adding dummies to X variables')
x = self._add_dummies(x, cat_mapping)
self.log('Adding dummies to filtered X variables')
x_filtered = self._add_dummies(x_filtered, cat_mapping)
if self._x_effects:
x = x.drop(self._x_effects, axis=1)
x_filtered = x_filtered.drop(self._x_effects, axis=1)
if self._time_effects:
x_regressor = x.sub(x.mean(level=0), level=0)
unstacked_y = y.unstack()
y_regressor = unstacked_y.sub(unstacked_y.mean(1), axis=0).stack()
y_regressor.index = y.index
elif self._intercept:
# only add intercept when no time effects
self.log('Adding intercept')
x = x_regressor = add_intercept(x)
x_filtered = add_intercept(x_filtered)
y_regressor = y
else:
self.log('No intercept added')
x_regressor = x
y_regressor = y
if weights is not None:
if not y_regressor.index.equals(weights.index):
raise AssertionError("y_regressor and weights must have the "
"same index")
if not x_regressor.index.equals(weights.index):
raise AssertionError("x_regressor and weights must have the "
"same index")
rt_weights = np.sqrt(weights)
y_regressor = y_regressor * rt_weights
x_regressor = x_regressor.mul(rt_weights, axis=0)
return x, x_regressor, x_filtered, y, y_regressor
def _filter_data(self):
"""
"""
data = self._x_orig
cat_mapping = {}
if isinstance(data, DataFrame):
data = data.to_panel()
else:
if isinstance(data, Panel):
data = data.copy()
if not isinstance(data, SparsePanel):
data, cat_mapping = self._convert_x(data)
if not isinstance(data, Panel):
data = Panel.from_dict(data, intersect=True)
x_names = data.items
if self._weights is not None:
data['__weights__'] = self._weights
# Filter x's without y (so we can make a prediction)
filtered = data.to_frame()
# Filter all data together using to_frame
# convert to DataFrame
y = self._y_orig
if isinstance(y, Series):
y = y.unstack()
data['__y__'] = y
data_long = data.to_frame()
x_filt = filtered.filter(x_names)
x = data_long.filter(x_names)
y = data_long['__y__']
if self._weights is not None and not self._weights.empty:
weights = data_long['__weights__']
else:
weights = None
return x, x_filt, y, weights, cat_mapping
def _convert_x(self, x):
# Converts non-numeric data in x to floats. x_converted is the
# DataFrame with converted values, and x_conversion is a dict that
# provides the reverse mapping. For example, if 'A' was converted to 0
# for x named 'variety', then x_conversion['variety'][0] is 'A'.
x_converted = {}
cat_mapping = {}
# x can be either a dict or a Panel, but in Python 3, dicts don't have
# .iteritems
iteritems = getattr(x, 'iteritems', x.items)
for key, df in iteritems():
if not isinstance(df, DataFrame):
raise AssertionError("all input items must be DataFrames, "
"at least one is of "
"type {0}".format(type(df)))
if _is_numeric(df):
x_converted[key] = df
else:
try:
df = df.astype(float)
except (TypeError, ValueError):
values = df.values
distinct_values = sorted(set(values.flat))
cat_mapping[key] = dict(enumerate(distinct_values))
new_values = np.searchsorted(distinct_values, values)
x_converted[key] = DataFrame(new_values, index=df.index,
columns=df.columns)
if len(cat_mapping) == 0:
x_converted = x
return x_converted, cat_mapping
def _add_dummies(self, panel, mapping):
"""
Add entity and / or categorical dummies to input X DataFrame
Returns
-------
DataFrame
"""
panel = self._add_entity_effects(panel)
panel = self._add_categorical_dummies(panel, mapping)
return panel
def _add_entity_effects(self, panel):
"""
Add entity dummies to panel
Returns
-------
DataFrame
"""
from pandas.core.reshape import make_axis_dummies
if not self._entity_effects:
return panel
self.log('-- Adding entity fixed effect dummies')
dummies = make_axis_dummies(panel, 'minor')
if not self._use_all_dummies:
if 'entity' in self._dropped_dummies:
to_exclude = str(self._dropped_dummies.get('entity'))
else:
to_exclude = dummies.columns[0]
if to_exclude not in dummies.columns:
raise Exception('%s not in %s' % (to_exclude,
dummies.columns))
self.log('-- Excluding dummy for entity: %s' % to_exclude)
dummies = dummies.filter(dummies.columns.difference([to_exclude]))
dummies = dummies.add_prefix('FE_')
panel = panel.join(dummies)
return panel
def _add_categorical_dummies(self, panel, cat_mappings):
"""
Add categorical dummies to panel
Returns
-------
DataFrame
"""
if not self._x_effects:
return panel
dropped_dummy = (self._entity_effects and not self._use_all_dummies)
for effect in self._x_effects:
self.log('-- Adding fixed effect dummies for %s' % effect)
dummies = get_dummies(panel[effect])
val_map = cat_mappings.get(effect)
if val_map:
val_map = dict((v, k) for k, v in compat.iteritems(val_map))
if dropped_dummy or not self._use_all_dummies:
if effect in self._dropped_dummies:
to_exclude = mapped_name = self._dropped_dummies.get(
effect)
if val_map:
mapped_name = val_map[to_exclude]
else:
to_exclude = mapped_name = dummies.columns[0]
if mapped_name not in dummies.columns: # pragma: no cover
raise Exception('%s not in %s' % (to_exclude,
dummies.columns))
self.log(
'-- Excluding dummy for %s: %s' % (effect, to_exclude))
dummies = dummies.filter(dummies.columns.difference([mapped_name]))
dropped_dummy = True
dummies = _convertDummies(dummies, cat_mappings.get(effect))
dummies = dummies.add_prefix('%s_' % effect)
panel = panel.join(dummies)
return panel
@property
def _use_all_dummies(self):
"""
In the case of using an intercept or including time fixed
effects, completely partitioning the sample would make the X
not full rank.
"""
return (not self._intercept and not self._time_effects)
@cache_readonly
def _beta_raw(self):
"""Runs the regression and returns the beta."""
X = self._x_trans.values
Y = self._y_trans.values.squeeze()
beta, _, _, _ = np.linalg.lstsq(X, Y)
return beta
@cache_readonly
def beta(self):
return Series(self._beta_raw, index=self._x.columns)
@cache_readonly
def _df_model_raw(self):
"""Returns the raw model degrees of freedom."""
return self._df_raw - 1
@cache_readonly
def _df_resid_raw(self):
"""Returns the raw residual degrees of freedom."""
return self._nobs - self._df_raw
@cache_readonly
def _df_raw(self):
"""Returns the degrees of freedom."""
df = math.rank(self._x_trans.values)
if self._time_effects:
df += self._total_times
return df
@cache_readonly
def _r2_raw(self):
Y = self._y_trans.values.squeeze()
X = self._x_trans.values
resid = Y - np.dot(X, self._beta_raw)
SSE = (resid ** 2).sum()
if self._use_centered_tss:
SST = ((Y - np.mean(Y)) ** 2).sum()
else:
SST = (Y ** 2).sum()
return 1 - SSE / SST
@property
def _use_centered_tss(self):
# has_intercept = np.abs(self._resid_raw.sum()) < _FP_ERR
return self._intercept or self._entity_effects or self._time_effects
@cache_readonly
def _r2_adj_raw(self):
"""Returns the raw r-squared adjusted values."""
nobs = self._nobs
factors = (nobs - 1) / (nobs - self._df_raw)
return 1 - (1 - self._r2_raw) * factors
@cache_readonly
def _resid_raw(self):
Y = self._y.values.squeeze()
X = self._x.values
return Y - np.dot(X, self._beta_raw)
@cache_readonly
def resid(self):
return self._unstack_vector(self._resid_raw)
@cache_readonly
def _rmse_raw(self):
"""Returns the raw rmse values."""
# X = self._x.values
# Y = self._y.values.squeeze()
X = self._x_trans.values
Y = self._y_trans.values.squeeze()
resid = Y - np.dot(X, self._beta_raw)
ss = (resid ** 2).sum()
return np.sqrt(ss / (self._nobs - self._df_raw))
@cache_readonly
def _var_beta_raw(self):
cluster_axis = None
if self._cluster == 'time':
cluster_axis = 0
elif self._cluster == 'entity':
cluster_axis = 1
x = self._x
y = self._y
if self._time_effects:
xx = _xx_time_effects(x, y)
else:
xx = np.dot(x.values.T, x.values)
return _var_beta_panel(y, x, self._beta_raw, xx,
self._rmse_raw, cluster_axis, self._nw_lags,
self._nobs, self._df_raw, self._nw_overlap)
@cache_readonly
def _y_fitted_raw(self):
"""Returns the raw fitted y values."""
return np.dot(self._x.values, self._beta_raw)
@cache_readonly
def y_fitted(self):
return self._unstack_vector(self._y_fitted_raw, index=self._x.index)
def _unstack_vector(self, vec, index=None):
if index is None:
index = self._y_trans.index
panel = DataFrame(vec, index=index, columns=['dummy'])
return panel.to_panel()['dummy']
def _unstack_y(self, vec):
unstacked = self._unstack_vector(vec)
return unstacked.reindex(self.beta.index)
@cache_readonly
def _time_obs_count(self):
return self._y_trans.count(level=0).values
@cache_readonly
def _time_has_obs(self):
return self._time_obs_count > 0
@property
def _nobs(self):
return len(self._y)
def _convertDummies(dummies, mapping):
# cleans up the names of the generated dummies
new_items = []
for item in dummies.columns:
if not mapping:
var = str(item)
if isinstance(item, float):
var = '%g' % item
new_items.append(var)
else:
# renames the dummies if a conversion dict is provided
new_items.append(mapping[int(item)])
dummies = DataFrame(dummies.values, index=dummies.index,
columns=new_items)
return dummies
def _is_numeric(df):
for col in df:
if df[col].dtype.name == 'object':
return False
return True
def add_intercept(panel, name='intercept'):
"""
Add column of ones to input panel
Parameters
----------
panel: Panel / DataFrame
name: string, default 'intercept']
Returns
-------
New object (same type as input)
"""
panel = panel.copy()
panel[name] = 1.
return panel.consolidate()
class MovingPanelOLS(MovingOLS, PanelOLS):
"""Implements rolling/expanding panel OLS.
See ols function docs
"""
_panel_model = True
def __init__(self, y, x, weights=None,
window_type='expanding', window=None,
min_periods=None,
min_obs=None,
intercept=True,
nw_lags=None, nw_overlap=False,
entity_effects=False,
time_effects=False,
x_effects=None,
cluster=None,
dropped_dummies=None,
verbose=False):
self._args = dict(intercept=intercept,
nw_lags=nw_lags,
nw_overlap=nw_overlap,
entity_effects=entity_effects,
time_effects=time_effects,
x_effects=x_effects,
cluster=cluster,
dropped_dummies=dropped_dummies,
verbose=verbose)
PanelOLS.__init__(self, y=y, x=x, weights=weights,
**self._args)
self._set_window(window_type, window, min_periods)
if min_obs is None:
min_obs = len(self._x.columns) + 1
self._min_obs = min_obs
@cache_readonly
def resid(self):
return self._unstack_y(self._resid_raw)
@cache_readonly
def y_fitted(self):
return self._unstack_y(self._y_fitted_raw)
@cache_readonly
def y_predict(self):
"""Returns the predicted y values."""
return self._unstack_y(self._y_predict_raw)
def lagged_y_predict(self, lag=1):
"""
Compute forecast Y value lagging coefficient by input number
of time periods
Parameters
----------
lag : int
Returns
-------
DataFrame
"""
x = self._x.values
betas = self._beta_matrix(lag=lag)
return self._unstack_y((betas * x).sum(1))
@cache_readonly
def _rolling_ols_call(self):
return self._calc_betas(self._x_trans, self._y_trans)
@cache_readonly
def _df_raw(self):
"""Returns the degrees of freedom."""
df = self._rolling_rank()
if self._time_effects:
df += self._window_time_obs
return df[self._valid_indices]
@cache_readonly
def _var_beta_raw(self):
"""Returns the raw covariance of beta."""
x = self._x
y = self._y
dates = x.index.levels[0]
cluster_axis = None
if self._cluster == 'time':
cluster_axis = 0
elif self._cluster == 'entity':
cluster_axis = 1
nobs = self._nobs
rmse = self._rmse_raw
beta = self._beta_raw
df = self._df_raw
window = self._window
if not self._time_effects:
# Non-transformed X
cum_xx = self._cum_xx(x)
results = []
for n, i in enumerate(self._valid_indices):
if self._is_rolling and i >= window:
prior_date = dates[i - window + 1]
else:
prior_date = dates[0]
date = dates[i]
x_slice = x.truncate(prior_date, date)
y_slice = y.truncate(prior_date, date)
if self._time_effects:
xx = _xx_time_effects(x_slice, y_slice)
else:
xx = cum_xx[i]
if self._is_rolling and i >= window:
xx = xx - cum_xx[i - window]
result = _var_beta_panel(y_slice, x_slice, beta[n], xx, rmse[n],
cluster_axis, self._nw_lags,
nobs[n], df[n], self._nw_overlap)
results.append(result)
return np.array(results)
@cache_readonly
def _resid_raw(self):
beta_matrix = self._beta_matrix(lag=0)
Y = self._y.values.squeeze()
X = self._x.values
resid = Y - (X * beta_matrix).sum(1)
return resid
@cache_readonly
def _y_fitted_raw(self):
x = self._x.values
betas = self._beta_matrix(lag=0)
return (betas * x).sum(1)
@cache_readonly
def _y_predict_raw(self):
"""Returns the raw predicted y values."""
x = self._x.values
betas = self._beta_matrix(lag=1)
return (betas * x).sum(1)
def _beta_matrix(self, lag=0):
if lag < 0:
raise AssertionError("'lag' must be greater than or equal to 0, "
"input was {0}".format(lag))
index = self._y_trans.index
major_labels = index.labels[0]
labels = major_labels - lag
indexer = self._valid_indices.searchsorted(labels, side='left')
beta_matrix = self._beta_raw[indexer]
beta_matrix[labels < self._valid_indices[0]] = np.NaN
return beta_matrix
@cache_readonly
def _enough_obs(self):
# XXX: what's the best way to determine where to start?
# TODO: write unit tests for this
rank_threshold = len(self._x.columns) + 1
if self._min_obs < rank_threshold: # pragma: no cover
warnings.warn('min_obs is smaller than rank of X matrix')
enough_observations = self._nobs_raw >= self._min_obs
enough_time_periods = self._window_time_obs >= self._min_periods
return enough_time_periods & enough_observations
def create_ols_dict(attr):
def attr_getter(self):
d = {}
for k, v in compat.iteritems(self.results):
result = getattr(v, attr)
d[k] = result
return d
return attr_getter
def create_ols_attr(attr):
return property(create_ols_dict(attr))
class NonPooledPanelOLS(object):
"""Implements non-pooled panel OLS.
Parameters
----------
y : DataFrame
x : Series, DataFrame, or dict of Series
intercept : bool
True if you want an intercept.
nw_lags : None or int
Number of Newey-West lags.
window_type : {'full_sample', 'rolling', 'expanding'}
'full_sample' by default
window : int
size of window (for rolling/expanding OLS)
"""
ATTRIBUTES = [
'beta',
'df',
'df_model',
'df_resid',
'f_stat',
'p_value',
'r2',
'r2_adj',
'resid',
'rmse',
'std_err',
'summary_as_matrix',
't_stat',
'var_beta',
'x',
'y',
'y_fitted',
'y_predict'
]
def __init__(self, y, x, window_type='full_sample', window=None,
min_periods=None, intercept=True, nw_lags=None,
nw_overlap=False):
for attr in self.ATTRIBUTES:
setattr(self.__class__, attr, create_ols_attr(attr))
results = {}
for entity in y:
entity_y = y[entity]
entity_x = {}
for x_var in x:
entity_x[x_var] = x[x_var][entity]
from pandas.stats.interface import ols
results[entity] = ols(y=entity_y,
x=entity_x,
window_type=window_type,
window=window,
min_periods=min_periods,
intercept=intercept,
nw_lags=nw_lags,
nw_overlap=nw_overlap)
self.results = results
def _var_beta_panel(y, x, beta, xx, rmse, cluster_axis,
nw_lags, nobs, df, nw_overlap):
xx_inv = math.inv(xx)
yv = y.values
if cluster_axis is None:
if nw_lags is None:
return xx_inv * (rmse ** 2)
else:
resid = yv - np.dot(x.values, beta)
m = (x.values.T * resid).T
xeps = math.newey_west(m, nw_lags, nobs, df, nw_overlap)
return np.dot(xx_inv, np.dot(xeps, xx_inv))
else:
Xb = np.dot(x.values, beta).reshape((len(x.values), 1))
resid = DataFrame(yv[:, None] - Xb, index=y.index, columns=['resid'])
if cluster_axis == 1:
x = x.swaplevel(0, 1).sortlevel(0)
resid = resid.swaplevel(0, 1).sortlevel(0)
m = _group_agg(x.values * resid.values, x.index._bounds,
lambda x: np.sum(x, axis=0))
if nw_lags is None:
nw_lags = 0
xox = 0
for i in range(len(x.index.levels[0])):
xox += math.newey_west(m[i: i + 1], nw_lags,
nobs, df, nw_overlap)
return np.dot(xx_inv, np.dot(xox, xx_inv))
def _group_agg(values, bounds, f):
"""
R-style aggregator
Parameters
----------
values : N-length or N x K ndarray
bounds : B-length ndarray
f : ndarray aggregation function
Returns
-------
ndarray with same length as bounds array
"""
if values.ndim == 1:
N = len(values)
result = np.empty(len(bounds), dtype=float)
elif values.ndim == 2:
N, K = values.shape
result = np.empty((len(bounds), K), dtype=float)
testagg = f(values[:min(1, len(values))])
if isinstance(testagg, np.ndarray) and testagg.ndim == 2:
raise AssertionError('Function must reduce')
for i, left_bound in enumerate(bounds):
if i == len(bounds) - 1:
right_bound = N
else:
right_bound = bounds[i + 1]
result[i] = f(values[left_bound:right_bound])
return result
def _xx_time_effects(x, y):
"""
Returns X'X - (X'T) (T'T)^-1 (T'X)
"""
# X'X
xx = np.dot(x.values.T, x.values)
xt = x.sum(level=0).values
count = y.unstack().count(1).values
selector = count > 0
# X'X - (T'T)^-1 (T'X)
xt = xt[selector]
count = count[selector]
return xx - np.dot(xt.T / count, xt)
| apache-2.0 |
alf3r/GidroGraf-Sirius | src/main.py | 1 | 6674 | import os
import matplotlib.pyplot as plt
import numpy as np
import cv2
from gidroGraf_DBreader import Hyscan5wrapper
from Sonar_data import Sonar_data
def range2points(range, datarate, c):
return round(2 * datarate * range / c)
def points2range(n_points, datarate, c):
time0 = n_points / datarate
range = c * time0 / 2
return range
if __name__ == "__main__":
# Задаем исходные данные
source_starboard = 101 # Правый борт
source_port = 102 # Левый борт
c = 1500 # скорость звука
v = 1 # скорость гидролокатора
depth = 0 # глубина под гидролокатором
current_path = os.getcwd()
path2hyscanbin = r'/home/dmitriev.a/0900/hyscan-builder-linux/bin'
path2hyscanprj = r'/home/dmitriev.a/dev/hsdb'
project_name = '2017'
track_name = '7'
try:
# ============================РАБОТА С БАЗОЙ ДАННЫХ=============================================================
# Читалка БД, созадем экземпляр класса gidroGraf_DBreader
DB = Hyscan5wrapper(path2hyscanbin, path2hyscanprj, project_name)
# Читаем информацию о галсе [id, начаьный индекс строк, конечный индекс строк]
track_port = DB.get_track_id(track_name, source_port)
track_starboard = DB.get_track_id(track_name, source_starboard)
track_id_port = track_port[0]
track_size_port = track_port[1:]
datarate_port = DB.read_datarate(track_name, source_port)
track_id_starboard = track_starboard[0]
track_size_starboard = track_starboard[1:]
datarate_starboard = DB.read_datarate(track_name, source_starboard)
# Экземпляр класса - Обработчик данных от гидролокатора
sonar_port = Sonar_data(track_id_port, 'Sonar_Port', datarate_port, v, c)
sonar_starboard = Sonar_data(track_id_starboard, 'Sonar_Starboard', datarate_starboard, v, c)
# Определяем число строк, которое нужно считать
count_totalLines = track_size_port[1] - track_size_port[0] # ОБщее число строк в БД
count_lines2read = count_totalLines # Число строк, которое мы хотим считать
# Считываем строки из БД и сохраняем их в оперативную память в объект data_starboard
data_port = DB.read_lines(sonar_port.id, track_size_port[0], count_lines2read)
data_starboard = DB.read_lines(sonar_starboard.id, track_size_starboard[0], count_lines2read)
N_port = sonar_port.range2points(depth) # Начальный индекс точек, которые мы будем обрабатывать (в зависимости от глубины)
N_starboard = sonar_starboard.range2points(depth) # Начальный индекс точек, которые мы будем обрабатывать (в зависимости от глубины)
M = 0 # Начальный индекс строк, который мы будем обрабатывать
sonar_port.set_data(data_port[M:, N_port:])
sonar_starboard.set_data(data_starboard[M:, N_starboard:])
sonar_port.apply_left()
data_port = []
data_starboard = []
clahe1 = cv2.createCLAHE(clipLimit=2, tileGridSize=(30, 30))
sonar_port.data = clahe1.apply(sonar_port.data)
# ============================ОБРАБОТКА ДАННЫХ==================================================================
sonar_port.convert_range()
sonar_starboard.convert_range()
sonar_port.get_image(-1, 6000)
sonar_starboard.get_image(-1, 6000)
if sonar_port.data.size > sonar_starboard.data.size:
zeros_arr = np.zeros((sonar_port.data.shape[0], sonar_port.data.shape[1] - sonar_starboard.data.shape[1]))
sonar_starboard.extend_data(zeros_arr)
img_datarate = sonar_starboard.datarate
else:
zeros_arr = np.zeros((sonar_starboard.data.shape[0], sonar_starboard.data.shape[1] - sonar_port.data.shape[1]))
sonar_port.extend_data(zeros_arr)
img_datarate = sonar_port.datarate
img = np.concatenate((sonar_port.data, sonar_starboard.data), axis=1).astype('uint8')
sonar_port.data = []
sonar_starboard.data = []
zeros_arr = []
# Совершаем обработку
clahe = cv2.createCLAHE(clipLimit=2, tileGridSize=(30, 30))
img = clahe.apply(img)
a1 = np.median(img, 0)
plt.hist(a1, 256, range=[0, 255], fc='k', ec='k')
plt.show()
a = cv2.blur(img, (20, 20))
# a = cv2.adaptiveThreshold(a, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 201, 1)
retval2, a = cv2.threshold(a, 160, 255, cv2.THRESH_BINARY)
im2, contours, hierarchy = cv2.findContours(a, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
object_areaMin_px = range2points(500, img_datarate, c)
object_areaMax_px = range2points(1200, img_datarate, c)
new_boxes = []
new_contours = []
for cnt in contours:
area = cv2.contourArea(cnt)
if (area < object_areaMax_px) & (area > object_areaMin_px):
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
# convert all coordinates floating point values to int
box = np.int0(box)
# draw a red 'nghien' rectangle
new_contours.append(cnt)
new_boxes.append(box)
area_m = "%.1f" % (points2range(area, img_datarate, c))
cv2.putText(img, area_m, (box[0][0], box[0][1] - 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
contours = []
cv2.drawContours(img, new_contours, -1, (0, 255, 0), 2)
cv2.drawContours(img, new_boxes, -1, (255, 0, 0), 10)
figure, axes = plt.subplots(1, 2, sharey=True)
axes[0].imshow(a, interpolation='bicubic', clim=(0, 255))
axes[1].imshow(img, interpolation='bicubic', clim=(0, 255))
plt.show()
except Exception as err:
raise err
| gpl-3.0 |
winklerand/pandas | pandas/tests/util/test_util.py | 1 | 17280 | # -*- coding: utf-8 -*-
import os
import locale
import codecs
import sys
from uuid import uuid4
from collections import OrderedDict
import pytest
from pandas.compat import intern
from pandas.core.common import _all_none
from pandas.util._move import move_into_mutable_buffer, BadMove, stolenbuf
from pandas.util._decorators import deprecate_kwarg, make_signature
from pandas.util._validators import (validate_args, validate_kwargs,
validate_args_and_kwargs,
validate_bool_kwarg)
import pandas.util.testing as tm
from pandas.util._test_decorators import safe_import
class TestDecorators(object):
def setup_method(self, method):
@deprecate_kwarg('old', 'new')
def _f1(new=False):
return new
@deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
def _f2(new=False):
return new
@deprecate_kwarg('old', 'new', lambda x: x + 1)
def _f3(new=0):
return new
self.f1 = _f1
self.f2 = _f2
self.f3 = _f3
def test_deprecate_kwarg(self):
x = 78
with tm.assert_produces_warning(FutureWarning):
result = self.f1(old=x)
assert result is x
with tm.assert_produces_warning(None):
self.f1(new=x)
def test_dict_deprecate_kwarg(self):
x = 'yes'
with tm.assert_produces_warning(FutureWarning):
result = self.f2(old=x)
assert result
def test_missing_deprecate_kwarg(self):
x = 'bogus'
with tm.assert_produces_warning(FutureWarning):
result = self.f2(old=x)
assert result == 'bogus'
def test_callable_deprecate_kwarg(self):
x = 5
with tm.assert_produces_warning(FutureWarning):
result = self.f3(old=x)
assert result == x + 1
with pytest.raises(TypeError):
self.f3(old='hello')
def test_bad_deprecate_kwarg(self):
with pytest.raises(TypeError):
@deprecate_kwarg('old', 'new', 0)
def f4(new=None):
pass
def test_rands():
r = tm.rands(10)
assert(len(r) == 10)
def test_rands_array():
arr = tm.rands_array(5, size=10)
assert(arr.shape == (10,))
assert(len(arr[0]) == 5)
arr = tm.rands_array(7, size=(10, 10))
assert(arr.shape == (10, 10))
assert(len(arr[1, 1]) == 7)
class TestValidateArgs(object):
fname = 'func'
def test_bad_min_fname_arg_count(self):
msg = "'max_fname_arg_count' must be non-negative"
with tm.assert_raises_regex(ValueError, msg):
validate_args(self.fname, (None,), -1, 'foo')
def test_bad_arg_length_max_value_single(self):
args = (None, None)
compat_args = ('foo',)
min_fname_arg_count = 0
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(args) + min_fname_arg_count
msg = (r"{fname}\(\) takes at most {max_length} "
r"argument \({actual_length} given\)"
.format(fname=self.fname, max_length=max_length,
actual_length=actual_length))
with tm.assert_raises_regex(TypeError, msg):
validate_args(self.fname, args,
min_fname_arg_count,
compat_args)
def test_bad_arg_length_max_value_multiple(self):
args = (None, None)
compat_args = dict(foo=None)
min_fname_arg_count = 2
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(args) + min_fname_arg_count
msg = (r"{fname}\(\) takes at most {max_length} "
r"arguments \({actual_length} given\)"
.format(fname=self.fname, max_length=max_length,
actual_length=actual_length))
with tm.assert_raises_regex(TypeError, msg):
validate_args(self.fname, args,
min_fname_arg_count,
compat_args)
def test_not_all_defaults(self):
bad_arg = 'foo'
msg = ("the '{arg}' parameter is not supported "
r"in the pandas implementation of {func}\(\)".
format(arg=bad_arg, func=self.fname))
compat_args = OrderedDict()
compat_args['foo'] = 2
compat_args['bar'] = -1
compat_args['baz'] = 3
arg_vals = (1, -1, 3)
for i in range(1, 3):
with tm.assert_raises_regex(ValueError, msg):
validate_args(self.fname, arg_vals[:i], 2, compat_args)
def test_validation(self):
# No exceptions should be thrown
validate_args(self.fname, (None,), 2, dict(out=None))
compat_args = OrderedDict()
compat_args['axis'] = 1
compat_args['out'] = None
validate_args(self.fname, (1, None), 2, compat_args)
class TestValidateKwargs(object):
fname = 'func'
def test_bad_kwarg(self):
goodarg = 'f'
badarg = goodarg + 'o'
compat_args = OrderedDict()
compat_args[goodarg] = 'foo'
compat_args[badarg + 'o'] = 'bar'
kwargs = {goodarg: 'foo', badarg: 'bar'}
msg = (r"{fname}\(\) got an unexpected "
r"keyword argument '{arg}'".format(
fname=self.fname, arg=badarg))
with tm.assert_raises_regex(TypeError, msg):
validate_kwargs(self.fname, kwargs, compat_args)
def test_not_all_none(self):
bad_arg = 'foo'
msg = (r"the '{arg}' parameter is not supported "
r"in the pandas implementation of {func}\(\)".
format(arg=bad_arg, func=self.fname))
compat_args = OrderedDict()
compat_args['foo'] = 1
compat_args['bar'] = 's'
compat_args['baz'] = None
kwarg_keys = ('foo', 'bar', 'baz')
kwarg_vals = (2, 's', None)
for i in range(1, 3):
kwargs = dict(zip(kwarg_keys[:i],
kwarg_vals[:i]))
with tm.assert_raises_regex(ValueError, msg):
validate_kwargs(self.fname, kwargs, compat_args)
def test_validation(self):
# No exceptions should be thrown
compat_args = OrderedDict()
compat_args['f'] = None
compat_args['b'] = 1
compat_args['ba'] = 's'
kwargs = dict(f=None, b=1)
validate_kwargs(self.fname, kwargs, compat_args)
def test_validate_bool_kwarg(self):
arg_names = ['inplace', 'copy']
invalid_values = [1, "True", [1, 2, 3], 5.0]
valid_values = [True, False, None]
for name in arg_names:
for value in invalid_values:
with tm.assert_raises_regex(ValueError,
"For argument \"%s\" "
"expected type bool, "
"received type %s" %
(name, type(value).__name__)):
validate_bool_kwarg(value, name)
for value in valid_values:
assert validate_bool_kwarg(value, name) == value
class TestValidateKwargsAndArgs(object):
fname = 'func'
def test_invalid_total_length_max_length_one(self):
compat_args = ('foo',)
kwargs = {'foo': 'FOO'}
args = ('FoO', 'BaZ')
min_fname_arg_count = 0
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(kwargs) + len(args) + min_fname_arg_count
msg = (r"{fname}\(\) takes at most {max_length} "
r"argument \({actual_length} given\)"
.format(fname=self.fname, max_length=max_length,
actual_length=actual_length))
with tm.assert_raises_regex(TypeError, msg):
validate_args_and_kwargs(self.fname, args, kwargs,
min_fname_arg_count,
compat_args)
def test_invalid_total_length_max_length_multiple(self):
compat_args = ('foo', 'bar', 'baz')
kwargs = {'foo': 'FOO', 'bar': 'BAR'}
args = ('FoO', 'BaZ')
min_fname_arg_count = 2
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(kwargs) + len(args) + min_fname_arg_count
msg = (r"{fname}\(\) takes at most {max_length} "
r"arguments \({actual_length} given\)"
.format(fname=self.fname, max_length=max_length,
actual_length=actual_length))
with tm.assert_raises_regex(TypeError, msg):
validate_args_and_kwargs(self.fname, args, kwargs,
min_fname_arg_count,
compat_args)
def test_no_args_with_kwargs(self):
bad_arg = 'bar'
min_fname_arg_count = 2
compat_args = OrderedDict()
compat_args['foo'] = -5
compat_args[bad_arg] = 1
msg = (r"the '{arg}' parameter is not supported "
r"in the pandas implementation of {func}\(\)".
format(arg=bad_arg, func=self.fname))
args = ()
kwargs = {'foo': -5, bad_arg: 2}
tm.assert_raises_regex(ValueError, msg,
validate_args_and_kwargs,
self.fname, args, kwargs,
min_fname_arg_count, compat_args)
args = (-5, 2)
kwargs = {}
tm.assert_raises_regex(ValueError, msg,
validate_args_and_kwargs,
self.fname, args, kwargs,
min_fname_arg_count, compat_args)
def test_duplicate_argument(self):
min_fname_arg_count = 2
compat_args = OrderedDict()
compat_args['foo'] = None
compat_args['bar'] = None
compat_args['baz'] = None
kwargs = {'foo': None, 'bar': None}
args = (None,) # duplicate value for 'foo'
msg = (r"{fname}\(\) got multiple values for keyword "
r"argument '{arg}'".format(fname=self.fname, arg='foo'))
with tm.assert_raises_regex(TypeError, msg):
validate_args_and_kwargs(self.fname, args, kwargs,
min_fname_arg_count,
compat_args)
def test_validation(self):
# No exceptions should be thrown
compat_args = OrderedDict()
compat_args['foo'] = 1
compat_args['bar'] = None
compat_args['baz'] = -2
kwargs = {'baz': -2}
args = (1, None)
min_fname_arg_count = 2
validate_args_and_kwargs(self.fname, args, kwargs,
min_fname_arg_count,
compat_args)
class TestMove(object):
def test_cannot_create_instance_of_stolenbuffer(self):
"""Stolen buffers need to be created through the smart constructor
``move_into_mutable_buffer`` which has a bunch of checks in it.
"""
msg = "cannot create 'pandas.util._move.stolenbuf' instances"
with tm.assert_raises_regex(TypeError, msg):
stolenbuf()
def test_more_than_one_ref(self):
"""Test case for when we try to use ``move_into_mutable_buffer`` when
the object being moved has other references.
"""
b = b'testing'
with pytest.raises(BadMove) as e:
def handle_success(type_, value, tb):
assert value.args[0] is b
return type(e).handle_success(e, type_, value, tb) # super
e.handle_success = handle_success
move_into_mutable_buffer(b)
def test_exactly_one_ref(self):
"""Test case for when the object being moved has exactly one reference.
"""
b = b'testing'
# We need to pass an expression on the stack to ensure that there are
# not extra references hanging around. We cannot rewrite this test as
# buf = b[:-3]
# as_stolen_buf = move_into_mutable_buffer(buf)
# because then we would have more than one reference to buf.
as_stolen_buf = move_into_mutable_buffer(b[:-3])
# materialize as bytearray to show that it is mutable
assert bytearray(as_stolen_buf) == b'test'
@pytest.mark.skipif(
sys.version_info[0] > 2,
reason='bytes objects cannot be interned in py3',
)
def test_interned(self):
salt = uuid4().hex
def make_string():
# We need to actually create a new string so that it has refcount
# one. We use a uuid so that we know the string could not already
# be in the intern table.
return ''.join(('testing: ', salt))
# This should work, the string has one reference on the stack.
move_into_mutable_buffer(make_string())
refcount = [None] # nonlocal
def ref_capture(ob):
# Subtract two because those are the references owned by this
# frame:
# 1. The local variables of this stack frame.
# 2. The python data stack of this stack frame.
refcount[0] = sys.getrefcount(ob) - 2
return ob
with pytest.raises(BadMove):
# If we intern the string it will still have one reference but now
# it is in the intern table so if other people intern the same
# string while the mutable buffer holds the first string they will
# be the same instance.
move_into_mutable_buffer(ref_capture(intern(make_string()))) # noqa
assert refcount[0] == 1
def test_numpy_errstate_is_default():
# The defaults since numpy 1.6.0
expected = {'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
'under': 'ignore'}
import numpy as np
from pandas.compat import numpy # noqa
# The errstate should be unchanged after that import.
assert np.geterr() == expected
class TestLocaleUtils(object):
@classmethod
def setup_class(cls):
cls.locales = tm.get_locales()
cls.current_locale = locale.getlocale()
if not cls.locales:
pytest.skip("No locales found")
tm._skip_if_windows()
@classmethod
def teardown_class(cls):
del cls.locales
del cls.current_locale
def test_get_locales(self):
# all systems should have at least a single locale
assert len(tm.get_locales()) > 0
def test_get_locales_prefix(self):
if len(self.locales) == 1:
pytest.skip("Only a single locale found, no point in "
"trying to test filtering locale prefixes")
first_locale = self.locales[0]
assert len(tm.get_locales(prefix=first_locale[:2])) > 0
def test_set_locale(self):
if len(self.locales) == 1:
pytest.skip("Only a single locale found, no point in "
"trying to test setting another locale")
if _all_none(*self.current_locale):
# Not sure why, but on some travis runs with pytest,
# getlocale() returned (None, None).
pytest.skip("Current locale is not set.")
locale_override = os.environ.get('LOCALE_OVERRIDE', None)
if locale_override is None:
lang, enc = 'it_CH', 'UTF-8'
elif locale_override == 'C':
lang, enc = 'en_US', 'ascii'
else:
lang, enc = locale_override.split('.')
enc = codecs.lookup(enc).name
new_locale = lang, enc
if not tm._can_set_locale(new_locale):
with pytest.raises(locale.Error):
with tm.set_locale(new_locale):
pass
else:
with tm.set_locale(new_locale) as normalized_locale:
new_lang, new_enc = normalized_locale.split('.')
new_enc = codecs.lookup(enc).name
normalized_locale = new_lang, new_enc
assert normalized_locale == new_locale
current_locale = locale.getlocale()
assert current_locale == self.current_locale
def test_make_signature():
# See GH 17608
# Case where the func does not have default kwargs
sig = make_signature(validate_kwargs)
assert sig == (['fname', 'kwargs', 'compat_args'],
['fname', 'kwargs', 'compat_args'])
# Case where the func does have default kwargs
sig = make_signature(deprecate_kwarg)
assert sig == (['old_arg_name', 'new_arg_name',
'mapping=None', 'stacklevel=2'],
['old_arg_name', 'new_arg_name', 'mapping', 'stacklevel'])
def test_safe_import(monkeypatch):
assert not safe_import("foo")
assert not safe_import("pandas", min_version="99.99.99")
# Create dummy module to be imported
import types
import sys
mod_name = "hello123"
mod = types.ModuleType(mod_name)
mod.__version__ = "1.5"
assert not safe_import(mod_name)
monkeypatch.setitem(sys.modules, mod_name, mod)
assert not safe_import(mod_name, min_version="2.0")
assert safe_import(mod_name, min_version="1.0")
| bsd-3-clause |
hainm/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
wangmiao1981/spark | python/pyspark/pandas/extensions.py | 11 | 12362 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Callable, Generic, Optional, Type, Union, TYPE_CHECKING
import warnings
from pyspark.pandas._typing import T
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
class CachedAccessor(Generic[T]):
"""
Custom property-like object.
A descriptor for caching accessors:
Parameters
----------
name : str
Namespace that accessor's methods, properties, etc will be accessed under, e.g. "foo" for a
dataframe accessor yields the accessor ``df.foo``
accessor: cls
Class with the extension methods.
Notes
-----
For accessor, the class's __init__ method assumes that you are registering an accessor for one
of ``Series``, ``DataFrame``, or ``Index``.
This object is not meant to be instantiated directly. Instead, use register_dataframe_accessor,
register_series_accessor, or register_index_accessor.
The pandas-on-Spark accessor is modified based on pandas.core.accessor.
"""
def __init__(self, name: str, accessor: Type[T]) -> None:
self._name = name
self._accessor = accessor
def __get__(
self, obj: Optional[Union["DataFrame", "Series", "Index"]], cls: Type[T]
) -> Union[T, Type[T]]:
if obj is None:
return self._accessor
accessor_obj = self._accessor(obj) # type: ignore
object.__setattr__(obj, self._name, accessor_obj)
return accessor_obj
def _register_accessor(
name: str, cls: Union[Type["DataFrame"], Type["Series"], Type["Index"]]
) -> Callable[[Type[T]], Type[T]]:
"""
Register a custom accessor on {klass} objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued if this name
conflicts with a preexisting attribute.
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor: Register a custom accessor on DataFrame objects
register_series_accessor: Register a custom accessor on Series objects
register_index_accessor: Register a custom accessor on Index objects
Notes
-----
When accessed, your accessor will be initialiazed with the pandas-on-Spark object the user
is interacting with. The code signature must be:
.. code-block:: python
def __init__(self, pandas_on_spark_obj):
# constructor logic
...
In the pandas API, if data passed to your accessor has an incorrect dtype, it's recommended to
raise an ``AttributeError`` for consistency purposes. In pandas-on-Spark, ``ValueError`` is more
frequently used to annotate when a value's datatype is unexpected for a given method/function.
Ultimately, you can structure this however you like, but pandas-on-Spark would likely do
something like this:
>>> ps.Series(['a', 'b']).dt
...
Traceback (most recent call last):
...
ValueError: Cannot call DatetimeMethods on type StringType
Note: This function is not meant to be used directly - instead, use register_dataframe_accessor,
register_series_accessor, or register_index_accessor.
"""
def decorator(accessor: Type[T]) -> Type[T]:
if hasattr(cls, name):
msg = (
"registration of accessor {0} under name '{1}' for type {2} is overriding "
"a preexisting attribute with the same name.".format(accessor, name, cls.__name__)
)
warnings.warn(
msg,
UserWarning,
stacklevel=2,
)
setattr(cls, name, CachedAccessor(name, accessor))
return accessor
return decorator
def register_dataframe_accessor(name: str) -> Callable[[Type[T]], Type[T]]:
"""
Register a custom accessor with a DataFrame
Parameters
----------
name : str
name used when calling the accessor after its registered
Returns
-------
callable
A class decorator.
See Also
--------
register_series_accessor: Register a custom accessor on Series objects
register_index_accessor: Register a custom accessor on Index objects
Notes
-----
When accessed, your accessor will be initialiazed with the pandas-on-Spark object the user
is interacting with. The accessor's init method should always ingest the object being accessed.
See the examples for the init signature.
In the pandas API, if data passed to your accessor has an incorrect dtype, it's recommended to
raise an ``AttributeError`` for consistency purposes. In pandas-on-Spark, ``ValueError`` is more
frequently used to annotate when a value's datatype is unexpected for a given method/function.
Ultimately, you can structure this however you like, but pandas-on-Spark would likely do
something like this:
>>> ps.Series(['a', 'b']).dt
...
Traceback (most recent call last):
...
ValueError: Cannot call DatetimeMethods on type StringType
Examples
--------
In your library code::
from pyspark.pandas.extensions import register_dataframe_accessor
@register_dataframe_accessor("geo")
class GeoAccessor:
def __init__(self, pandas_on_spark_obj):
self._obj = pandas_on_spark_obj
# other constructor logic
@property
def center(self):
# return the geographic center point of this DataFrame
lat = self._obj.latitude
lon = self._obj.longitude
return (float(lon.mean()), float(lat.mean()))
def plot(self):
# plot this array's data on a map
pass
Then, in an ipython session::
>>> ## Import if the accessor is in the other file.
>>> # from my_ext_lib import GeoAccessor
>>> psdf = ps.DataFrame({"longitude": np.linspace(0,10),
... "latitude": np.linspace(0, 20)})
>>> psdf.geo.center # doctest: +SKIP
(5.0, 10.0)
>>> psdf.geo.plot() # doctest: +SKIP
"""
from pyspark.pandas import DataFrame
return _register_accessor(name, DataFrame)
def register_series_accessor(name: str) -> Callable[[Type[T]], Type[T]]:
"""
Register a custom accessor with a Series object
Parameters
----------
name : str
name used when calling the accessor after its registered
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor: Register a custom accessor on DataFrame objects
register_index_accessor: Register a custom accessor on Index objects
Notes
-----
When accessed, your accessor will be initialiazed with the pandas-on-Spark object the user is
interacting with. The code signature must be::
def __init__(self, pandas_on_spark_obj):
# constructor logic
...
In the pandas API, if data passed to your accessor has an incorrect dtype, it's recommended to
raise an ``AttributeError`` for consistency purposes. In pandas-on-Spark, ``ValueError`` is more
frequently used to annotate when a value's datatype is unexpected for a given method/function.
Ultimately, you can structure this however you like, but pandas-on-Spark would likely do
something like this:
>>> ps.Series(['a', 'b']).dt
...
Traceback (most recent call last):
...
ValueError: Cannot call DatetimeMethods on type StringType
Examples
--------
In your library code::
from pyspark.pandas.extensions import register_series_accessor
@register_series_accessor("geo")
class GeoAccessor:
def __init__(self, pandas_on_spark_obj):
self._obj = pandas_on_spark_obj
@property
def is_valid(self):
# boolean check to see if series contains valid geometry
return True
Then, in an ipython session::
>>> ## Import if the accessor is in the other file.
>>> # from my_ext_lib import GeoAccessor
>>> psdf = ps.DataFrame({"longitude": np.linspace(0,10),
... "latitude": np.linspace(0, 20)})
>>> psdf.longitude.geo.is_valid # doctest: +SKIP
True
"""
from pyspark.pandas import Series
return _register_accessor(name, Series)
def register_index_accessor(name: str) -> Callable[[Type[T]], Type[T]]:
"""
Register a custom accessor with an Index
Parameters
----------
name : str
name used when calling the accessor after its registered
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor: Register a custom accessor on DataFrame objects
register_series_accessor: Register a custom accessor on Series objects
Notes
-----
When accessed, your accessor will be initialiazed with the pandas-on-Spark object the user is
interacting with. The code signature must be::
def __init__(self, pandas_on_spark_obj):
# constructor logic
...
In the pandas API, if data passed to your accessor has an incorrect dtype, it's recommended to
raise an ``AttributeError`` for consistency purposes. In pandas-on-Spark, ``ValueError`` is more
frequently used to annotate when a value's datatype is unexpected for a given method/function.
Ultimately, you can structure this however you like, but pandas-on-Spark would likely do
something like this:
>>> ps.Series(['a', 'b']).dt
...
Traceback (most recent call last):
...
ValueError: Cannot call DatetimeMethods on type StringType
Examples
--------
In your library code::
from pyspark.pandas.extensions import register_index_accessor
@register_index_accessor("foo")
class CustomAccessor:
def __init__(self, pandas_on_spark_obj):
self._obj = pandas_on_spark_obj
self.item = "baz"
@property
def bar(self):
# return item value
return self.item
Then, in an ipython session::
>>> ## Import if the accessor is in the other file.
>>> # from my_ext_lib import CustomAccessor
>>> psdf = ps.DataFrame({"longitude": np.linspace(0,10),
... "latitude": np.linspace(0, 20)})
>>> psdf.index.foo.bar # doctest: +SKIP
'baz'
"""
from pyspark.pandas import Index
return _register_accessor(name, Index)
def _test() -> None:
import os
import doctest
import sys
import numpy
from pyspark.sql import SparkSession
import pyspark.pandas.extensions
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.extensions.__dict__.copy()
globs["np"] = numpy
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.extensions tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.extensions,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
GuessWhoSamFoo/pandas | pandas/tests/tools/test_numeric.py | 2 | 17715 | import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas.compat as compat
import pandas as pd
from pandas import DataFrame, Index, Series, to_numeric
from pandas.util import testing as tm
@pytest.fixture(params=[None, "ignore", "raise", "coerce"])
def errors(request):
return request.param
@pytest.fixture(params=[True, False])
def signed(request):
return request.param
@pytest.fixture(params=[lambda x: x, str], ids=["identity", "str"])
def transform(request):
return request.param
@pytest.fixture(params=[
47393996303418497800,
100000000000000000000
])
def large_val(request):
return request.param
@pytest.fixture(params=[True, False])
def multiple_elts(request):
return request.param
@pytest.fixture(params=[
(lambda x: Index(x, name="idx"), tm.assert_index_equal),
(lambda x: Series(x, name="ser"), tm.assert_series_equal),
(lambda x: np.array(Index(x).values), tm.assert_numpy_array_equal)
])
def transform_assert_equal(request):
return request.param
@pytest.mark.parametrize("input_kwargs,result_kwargs", [
(dict(), dict(dtype=np.int64)),
(dict(errors="coerce", downcast="integer"), dict(dtype=np.int8))
])
def test_empty(input_kwargs, result_kwargs):
# see gh-16302
ser = Series([], dtype=object)
result = to_numeric(ser, **input_kwargs)
expected = Series([], **result_kwargs)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("last_val", ["7", 7])
def test_series(last_val):
ser = Series(["1", "-3.14", last_val])
result = to_numeric(ser)
expected = Series([1, -3.14, 7])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("data", [
[1, 3, 4, 5],
[1., 3., 4., 5.],
# Bool is regarded as numeric.
[True, False, True, True]
])
def test_series_numeric(data):
ser = Series(data, index=list("ABCD"), name="EFG")
result = to_numeric(ser)
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize("data,msg", [
([1, -3.14, "apple"],
'Unable to parse string "apple" at position 2'),
(["orange", 1, -3.14, "apple"],
'Unable to parse string "orange" at position 0')
])
def test_error(data, msg):
ser = Series(data)
with pytest.raises(ValueError, match=msg):
to_numeric(ser, errors="raise")
@pytest.mark.parametrize("errors,exp_data", [
("ignore", [1, -3.14, "apple"]),
("coerce", [1, -3.14, np.nan])
])
def test_ignore_error(errors, exp_data):
ser = Series([1, -3.14, "apple"])
result = to_numeric(ser, errors=errors)
expected = Series(exp_data)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("errors,exp", [
("raise", 'Unable to parse string "apple" at position 2'),
("ignore", [True, False, "apple"]),
# Coerces to float.
("coerce", [1., 0., np.nan])
])
def test_bool_handling(errors, exp):
ser = Series([True, False, "apple"])
if isinstance(exp, str):
with pytest.raises(ValueError, match=exp):
to_numeric(ser, errors=errors)
else:
result = to_numeric(ser, errors=errors)
expected = Series(exp)
tm.assert_series_equal(result, expected)
def test_list():
ser = ["1", "-3.14", "7"]
res = to_numeric(ser)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("data,arr_kwargs", [
([1, 3, 4, 5], dict(dtype=np.int64)),
([1., 3., 4., 5.], dict()),
# Boolean is regarded as numeric.
([True, False, True, True], dict())
])
def test_list_numeric(data, arr_kwargs):
result = to_numeric(data)
expected = np.array(data, **arr_kwargs)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("kwargs", [
dict(dtype="O"), dict()
])
def test_numeric(kwargs):
data = [1, -3.14, 7]
ser = Series(data, **kwargs)
result = to_numeric(ser)
expected = Series(data)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("columns", [
# One column.
"a",
# Multiple columns.
["a", "b"]
])
def test_numeric_df_columns(columns):
# see gh-14827
df = DataFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), "0.1"],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = DataFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
df_copy = df.copy()
df_copy[columns] = df_copy[columns].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
@pytest.mark.parametrize("data,exp_data", [
([[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1],
[[3.14, 1.0], 1.6, 0.1]),
([np.array([decimal.Decimal(3.14), 1.0]), 0.1],
[[3.14, 1.0], 0.1])
])
def test_numeric_embedded_arr_likes(data, exp_data):
# Test to_numeric with embedded lists and arrays
df = DataFrame(dict(a=data))
df["a"] = df["a"].apply(to_numeric)
expected = DataFrame(dict(a=exp_data))
tm.assert_frame_equal(df, expected)
def test_all_nan():
ser = Series(["a", "b", "c"])
result = to_numeric(ser, errors="coerce")
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_type_check(errors):
# see gh-11776
df = DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = dict(errors=errors) if errors is not None else dict()
error_ctx = pytest.raises(TypeError, match="1-d array")
with error_ctx:
to_numeric(df, **kwargs)
@pytest.mark.parametrize("val", [1, 1.1, 20001])
def test_scalar(val, signed, transform):
val = -val if signed else val
assert to_numeric(transform(val)) == float(val)
def test_really_large_scalar(large_val, signed, transform, errors):
# see gh-24910
kwargs = dict(errors=errors) if errors is not None else dict()
val = -large_val if signed else large_val
val = transform(val)
val_is_string = isinstance(val, str)
if val_is_string and errors in (None, "raise"):
msg = "Integer out of range. at position 0"
with pytest.raises(ValueError, match=msg):
to_numeric(val, **kwargs)
else:
expected = float(val) if (errors == "coerce" and
val_is_string) else val
assert tm.assert_almost_equal(to_numeric(val, **kwargs), expected)
def test_really_large_in_arr(large_val, signed, transform,
multiple_elts, errors):
# see gh-24910
kwargs = dict(errors=errors) if errors is not None else dict()
val = -large_val if signed else large_val
val = transform(val)
extra_elt = "string"
arr = [val] + multiple_elts * [extra_elt]
val_is_string = isinstance(val, str)
coercing = errors == "coerce"
if errors in (None, "raise") and (val_is_string or multiple_elts):
if val_is_string:
msg = "Integer out of range. at position 0"
else:
msg = 'Unable to parse string "string" at position 1'
with pytest.raises(ValueError, match=msg):
to_numeric(arr, **kwargs)
else:
result = to_numeric(arr, **kwargs)
exp_val = float(val) if (coercing and val_is_string) else val
expected = [exp_val]
if multiple_elts:
if coercing:
expected.append(np.nan)
exp_dtype = float
else:
expected.append(extra_elt)
exp_dtype = object
else:
exp_dtype = float if isinstance(exp_val, (
int, compat.long, float)) else object
tm.assert_almost_equal(result, np.array(expected, dtype=exp_dtype))
def test_really_large_in_arr_consistent(large_val, signed,
multiple_elts, errors):
# see gh-24910
#
# Even if we discover that we have to hold float, does not mean
# we should be lenient on subsequent elements that fail to be integer.
kwargs = dict(errors=errors) if errors is not None else dict()
arr = [str(-large_val if signed else large_val)]
if multiple_elts:
arr.insert(0, large_val)
if errors in (None, "raise"):
index = int(multiple_elts)
msg = "Integer out of range. at position {index}".format(index=index)
with pytest.raises(ValueError, match=msg):
to_numeric(arr, **kwargs)
else:
result = to_numeric(arr, **kwargs)
if errors == "coerce":
expected = [float(i) for i in arr]
exp_dtype = float
else:
expected = arr
exp_dtype = object
tm.assert_almost_equal(result, np.array(expected, dtype=exp_dtype))
@pytest.mark.parametrize("errors,checker", [
("raise", 'Unable to parse string "fail" at position 0'),
("ignore", lambda x: x == "fail"),
("coerce", lambda x: np.isnan(x))
])
def test_scalar_fail(errors, checker):
scalar = "fail"
if isinstance(checker, str):
with pytest.raises(ValueError, match=checker):
to_numeric(scalar, errors=errors)
else:
assert checker(to_numeric(scalar, errors=errors))
@pytest.mark.parametrize("data", [
[1, 2, 3],
[1., np.nan, 3, np.nan]
])
def test_numeric_dtypes(data, transform_assert_equal):
transform, assert_equal = transform_assert_equal
data = transform(data)
result = to_numeric(data)
assert_equal(result, data)
@pytest.mark.parametrize("data,exp", [
(["1", "2", "3"], np.array([1, 2, 3], dtype="int64")),
(["1.5", "2.7", "3.4"], np.array([1.5, 2.7, 3.4]))
])
def test_str(data, exp, transform_assert_equal):
transform, assert_equal = transform_assert_equal
result = to_numeric(transform(data))
expected = transform(exp)
assert_equal(result, expected)
def test_datetime_like(tz_naive_fixture, transform_assert_equal):
transform, assert_equal = transform_assert_equal
idx = pd.date_range("20130101", periods=3, tz=tz_naive_fixture)
result = to_numeric(transform(idx))
expected = transform(idx.asi8)
assert_equal(result, expected)
def test_timedelta(transform_assert_equal):
transform, assert_equal = transform_assert_equal
idx = pd.timedelta_range("1 days", periods=3, freq="D")
result = to_numeric(transform(idx))
expected = transform(idx.asi8)
assert_equal(result, expected)
def test_period(transform_assert_equal):
transform, assert_equal = transform_assert_equal
idx = pd.period_range("2011-01", periods=3, freq="M", name="")
inp = transform(idx)
if isinstance(inp, Index):
result = to_numeric(inp)
expected = transform(idx.asi8)
assert_equal(result, expected)
else:
# TODO: PeriodDtype, so support it in to_numeric.
pytest.skip("Missing PeriodDtype support in to_numeric")
@pytest.mark.parametrize("errors,expected", [
("raise", "Invalid object type at position 0"),
("ignore", Series([[10.0, 2], 1.0, "apple"])),
("coerce", Series([np.nan, 1.0, np.nan]))
])
def test_non_hashable(errors, expected):
# see gh-13324
ser = Series([[10.0, 2], 1.0, "apple"])
if isinstance(expected, str):
with pytest.raises(TypeError, match=expected):
to_numeric(ser, errors=errors)
else:
result = to_numeric(ser, errors=errors)
tm.assert_series_equal(result, expected)
def test_downcast_invalid_cast():
# see gh-13352
data = ["1", 2, 3]
invalid_downcast = "unsigned-integer"
msg = "invalid downcasting method provided"
with pytest.raises(ValueError, match=msg):
to_numeric(data, downcast=invalid_downcast)
@pytest.mark.parametrize("data", [
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03",
"1970-01-04"], dtype="datetime64[D]")
])
@pytest.mark.parametrize("kwargs,exp_dtype", [
# Basic function tests.
(dict(), np.int64),
(dict(downcast=None), np.int64),
# Support below np.float32 is rare and far between.
(dict(downcast="float"), np.dtype(np.float32).char),
# Basic dtype support.
(dict(downcast="unsigned"), np.dtype(np.typecodes["UnsignedInteger"][0]))
])
def test_downcast_basic(data, kwargs, exp_dtype):
# see gh-13352
result = to_numeric(data, **kwargs)
expected = np.array([1, 2, 3], dtype=exp_dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("signed_downcast", ["integer", "signed"])
@pytest.mark.parametrize("data", [
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03",
"1970-01-04"], dtype="datetime64[D]")
])
def test_signed_downcast(data, signed_downcast):
# see gh-13352
smallest_int_dtype = np.dtype(np.typecodes["Integer"][0])
expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
res = to_numeric(data, downcast=signed_downcast)
tm.assert_numpy_array_equal(res, expected)
def test_ignore_downcast_invalid_data():
# If we can't successfully cast the given
# data to a numeric dtype, do not bother
# with the downcast parameter.
data = ["foo", 2, 3]
expected = np.array(data, dtype=object)
res = to_numeric(data, errors="ignore",
downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
def test_ignore_downcast_neg_to_unsigned():
# Cannot cast to an unsigned integer
# because we have a negative number.
data = ["-1", 2, 3]
expected = np.array([-1, 2, 3], dtype=np.int64)
res = to_numeric(data, downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("downcast", ["integer", "signed", "unsigned"])
@pytest.mark.parametrize("data,expected", [
(["1.1", 2, 3],
np.array([1.1, 2, 3], dtype=np.float64)),
([10000.0, 20000, 3000, 40000.36, 50000, 50000.00],
np.array([10000.0, 20000, 3000,
40000.36, 50000, 50000.00], dtype=np.float64))
])
def test_ignore_downcast_cannot_convert_float(data, expected, downcast):
# Cannot cast to an integer (signed or unsigned)
# because we have a float number.
res = to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("downcast,expected_dtype", [
("integer", np.int16),
("signed", np.int16),
("unsigned", np.uint16)
])
def test_downcast_not8bit(downcast, expected_dtype):
# the smallest integer dtype need not be np.(u)int8
data = ["256", 257, 258]
expected = np.array([256, 257, 258], dtype=expected_dtype)
res = to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("dtype,downcast,min_max", [
("int8", "integer", [iinfo(np.int8).min,
iinfo(np.int8).max]),
("int16", "integer", [iinfo(np.int16).min,
iinfo(np.int16).max]),
("int32", "integer", [iinfo(np.int32).min,
iinfo(np.int32).max]),
("int64", "integer", [iinfo(np.int64).min,
iinfo(np.int64).max]),
("uint8", "unsigned", [iinfo(np.uint8).min,
iinfo(np.uint8).max]),
("uint16", "unsigned", [iinfo(np.uint16).min,
iinfo(np.uint16).max]),
("uint32", "unsigned", [iinfo(np.uint32).min,
iinfo(np.uint32).max]),
("uint64", "unsigned", [iinfo(np.uint64).min,
iinfo(np.uint64).max]),
("int16", "integer", [iinfo(np.int8).min,
iinfo(np.int8).max + 1]),
("int32", "integer", [iinfo(np.int16).min,
iinfo(np.int16).max + 1]),
("int64", "integer", [iinfo(np.int32).min,
iinfo(np.int32).max + 1]),
("int16", "integer", [iinfo(np.int8).min - 1,
iinfo(np.int16).max]),
("int32", "integer", [iinfo(np.int16).min - 1,
iinfo(np.int32).max]),
("int64", "integer", [iinfo(np.int32).min - 1,
iinfo(np.int64).max]),
("uint16", "unsigned", [iinfo(np.uint8).min,
iinfo(np.uint8).max + 1]),
("uint32", "unsigned", [iinfo(np.uint16).min,
iinfo(np.uint16).max + 1]),
("uint64", "unsigned", [iinfo(np.uint32).min,
iinfo(np.uint32).max + 1])
])
def test_downcast_limits(dtype, downcast, min_max):
# see gh-14404: test the limits of each downcast.
series = to_numeric(Series(min_max), downcast=downcast)
assert series.dtype == dtype
@pytest.mark.parametrize("data,exp_data", [
([200, 300, "", "NaN", 30000000000000000000],
[200, 300, np.nan, np.nan, 30000000000000000000]),
(["12345678901234567890", "1234567890", "ITEM"],
[12345678901234567890, 1234567890, np.nan])
])
def test_coerce_uint64_conflict(data, exp_data):
# see gh-17007 and gh-17125
#
# Still returns float despite the uint64-nan conflict,
# which would normally force the casting to object.
result = to_numeric(Series(data), errors="coerce")
expected = Series(exp_data, dtype=float)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("errors,exp", [
("ignore", Series(["12345678901234567890", "1234567890", "ITEM"])),
("raise", "Unable to parse string")
])
def test_non_coerce_uint64_conflict(errors, exp):
# see gh-17007 and gh-17125
#
# For completeness.
ser = Series(["12345678901234567890", "1234567890", "ITEM"])
if isinstance(exp, str):
with pytest.raises(ValueError, match=exp):
to_numeric(ser, errors=errors)
else:
result = to_numeric(ser, errors=errors)
tm.assert_series_equal(result, ser)
| bsd-3-clause |
albireox/marvin | python/marvin/utils/plot/utils.py | 2 | 4421 | #!/usr/bin/env python
# encoding: utf-8
#
# Licensed under a 3-clause BSD license.
#
# Original code from mangadap.plot.colorbar.py licensed under the following
# 3-clause BSD license.
#
# Copyright (c) 2015, SDSS-IV/MaNGA Pipeline Group
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
# HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# utils.py
#
# Created by José Sánchez-Gallego on 5 Aug 2017.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from matplotlib import pyplot as plt
__ALL__ = ('bind_to_figure')
def bind_to_figure(ax, fig=None):
"""Copies axes to a new figure.
This is a custom implementation of a method to copy axes from one
matplotlib figure to another. Matplotlib does not allow this, so we create
a new figure and copy the relevant lines and containers.
This is all quite hacky and may stop working in future versions of
matplotlib, but it seems to be the only way to bind axes from one figure
to a different one.
Current limitations include: 1) the legend is copied but its style is not
maintained; 2) scatter plots do not maintain the marker type, markers are
always replaced with squares.
"""
if fig is not None:
assert isinstance(fig, plt.Figure), 'argument must be a Figure'
assert len(fig.axes) == 1, 'figure must have one and only one axes'
new_ax = fig.axes[0]
else:
fig, new_ax = plt.subplots()
new_ax.set_facecolor(ax.get_facecolor())
for line in ax.lines:
data = line.get_data()
new_ax.plot(data[0], data[1], linestyle=line.get_linestyle(), color=line.get_color(),
zorder=line.zorder, label=line.get_label())
for collection in ax.collections:
data = collection.get_offsets()
new_ax.scatter(data[:, 0], data[:, 1], marker='s', facecolor=collection.get_facecolors(),
edgecolor=collection.get_edgecolors(), s=collection.get_sizes(),
zorder=line.zorder, label=collection.get_label())
for text in ax.texts:
xx, yy = text.get_position()
new_ax.text(xx, yy, text.get_text(), family=text.get_fontfamily(),
fontsize=text.get_fontsize(),
color=text.get_color(), ha=text.get_horizontalalignment(),
va=text.get_verticalalignment(), zorder=text.zorder)
for image in ax.images:
new_ax.imshow(image.get_array(), interpolation=image.get_interpolation())
if ax.legend_:
new_ax.legend()
new_ax.grid(ax.get_xgridlines(), color=ax.get_xgridlines()[0].get_color(),
alpha=ax.get_xgridlines()[0].get_alpha())
new_ax.grid(ax.get_ygridlines(), color=ax.get_xgridlines()[0].get_color(),
alpha=ax.get_xgridlines()[0].get_alpha())
new_ax.set_xlim(ax.get_xlim())
new_ax.set_ylim(ax.get_ylim())
new_ax.set_xlabel(ax.get_xlabel())
new_ax.set_ylabel(ax.get_ylabel())
return fig
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/sklearn/tree/tree.py | 7 | 44619 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
# Nelson Liu <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE,
"mae": _criterion.MAE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_split,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
if not 1 <= self.min_samples_leaf:
raise ValueError("min_samples_leaf must be at least 1 "
"or in (0, 0.5], got %s"
% self.min_samples_leaf)
min_samples_leaf = self.min_samples_leaf
else: # float
if not 0. < self.min_samples_leaf <= 0.5:
raise ValueError("min_samples_leaf must be at least 1 "
"or in (0, 0.5], got %s"
% self.min_samples_leaf)
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
if not 2 <= self.min_samples_split:
raise ValueError("min_samples_split must be at least 2 "
"or in (0, 1], got %s"
% self.min_samples_split)
min_samples_split = self.min_samples_split
else: # float
if not 0. < self.min_samples_split <= 1.:
raise ValueError("min_samples_split must be at least 2 "
"or in (0, 1], got %s"
% self.min_samples_split)
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if sample_weight is None:
min_weight_leaf = (self.min_weight_fraction_leaf *
n_samples)
else:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
if self.min_impurity_split < 0.:
raise ValueError("min_impurity_split must be greater than "
"or equal to 0")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_,
n_samples)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth, self.min_impurity_split)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes,
self.min_impurity_split)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
.. versionadded:: 0.18
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort)
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels) as integers or strings.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
super(DecisionTreeClassifier, self).fit(
X, y,
sample_weight=sample_weight,
check_input=check_input,
X_idx_sorted=X_idx_sorted)
return self
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. If the impurity
of a node is below the threshold, the node is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort)
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
super(DecisionTreeRegressor, self).fit(
X, y,
sample_weight=sample_weight,
check_input=check_input,
X_idx_sorted=X_idx_sorted)
return self
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_split=min_impurity_split,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
min_impurity_split=1e-7,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=random_state)
| mit |
tanayz/Kaggle | HB_ML_Challenge/startingKitHiggsKegl.py | 2 | 7993 |
# coding: utf-8
## Starting kit for the Higgs boson machine learning challenge
# This notebook contains a starting kit for the <a href="https://www.kaggle.com/c/higgs-boson">
# Higgs boson machine learning challenge</a>. Download the training set (called <code>training.csv</code>) and the test set (<code>test.csv</code>), then execute cells in order.
# In[1]:
import random,string,math,csv
import numpy as np
import matplotlib.pyplot as plt
### Reading an formatting training data
# In[2]:
all = list(csv.reader(open("training.csv","rb"), delimiter=','))
# Slicing off header row and id, weight, and label columns.
# In[3]:
xs = np.array([map(float, row[1:-2]) for row in all[1:]])
(numPoints,numFeatures) = xs.shape
# Perturbing features to avoid ties. It's far from optimal but makes life easier in this simple example.
# In[4]:
xs = np.add(xs, np.random.normal(0.0, 0.0001, xs.shape))
# Label selectors.
# In[5]:
sSelector = np.array([row[-1] == 's' for row in all[1:]])
bSelector = np.array([row[-1] == 'b' for row in all[1:]])
# Weights and weight sums.
# In[6]:
weights = np.array([float(row[-2]) for row in all[1:]])
sumWeights = np.sum(weights)
sumSWeights = np.sum(weights[sSelector])
sumBWeights = np.sum(weights[bSelector])
### Training and validation cuts
# We will train a classifier on a random training set for minimizing the weighted error with balanced weights, then we will maximize the AMS on the held out validation set.
# In[7]:
randomPermutation = random.sample(range(len(xs)), len(xs))
numPointsTrain = int(numPoints*0.9)
numPointsValidation = numPoints - numPointsTrain
xsTrain = xs[randomPermutation[:numPointsTrain]]
xsValidation = xs[randomPermutation[numPointsTrain:]]
sSelectorTrain = sSelector[randomPermutation[:numPointsTrain]]
bSelectorTrain = bSelector[randomPermutation[:numPointsTrain]]
sSelectorValidation = sSelector[randomPermutation[numPointsTrain:]]
bSelectorValidation = bSelector[randomPermutation[numPointsTrain:]]
weightsTrain = weights[randomPermutation[:numPointsTrain]]
weightsValidation = weights[randomPermutation[numPointsTrain:]]
sumWeightsTrain = np.sum(weightsTrain)
sumSWeightsTrain = np.sum(weightsTrain[sSelectorTrain])
sumBWeightsTrain = np.sum(weightsTrain[bSelectorTrain])
# In[8]:
xsTrainTranspose = xsTrain.transpose()
# Making signal and background weights sum to $1/2$ each to emulate uniform priors $p(s)=p(b)=1/2$.
# In[9]:
weightsBalancedTrain = np.array([0.5 * weightsTrain[i]/sumSWeightsTrain
if sSelectorTrain[i]
else 0.5 * weightsTrain[i]/sumBWeightsTrain\
for i in range(numPointsTrain)])
### Training naive Bayes and defining the score function
# Number of bins per dimension for binned naive Bayes.
# In[10]:
numBins = 10
# <code>logPs[fI,bI]</code> will be the log probability of a data point <code>x</code> with <code>binMaxs[bI - 1] < x[fI] <= binMaxs[bI]</code> (with <code>binMaxs[-1] = -</code>$\infty$ by convention) being a signal under uniform priors $p(\text{s}) = p(\text{b}) = 1/2$.
# In[11]:
logPs = np.empty([numFeatures, numBins])
binMaxs = np.empty([numFeatures, numBins])
binIndexes = np.array(range(0, numPointsTrain+1, numPointsTrain/numBins))
# In[12]:
for fI in range(numFeatures):
# index permutation of sorted feature column
indexes = xsTrainTranspose[fI].argsort()
for bI in range(numBins):
# upper bin limits
binMaxs[fI, bI] = xsTrainTranspose[fI, indexes[binIndexes[bI+1]-1]]
# training indices of points in a bin
indexesInBin = indexes[binIndexes[bI]:binIndexes[bI+1]]
# sum of signal weights in bin
wS = np.sum(weightsBalancedTrain[indexesInBin]
[sSelectorTrain[indexesInBin]])
# sum of background weights in bin
wB = np.sum(weightsBalancedTrain[indexesInBin]
[bSelectorTrain[indexesInBin]])
# log probability of being a signal in the bin
logPs[fI, bI] = math.log(wS/(wS+wB))
# The score function we will use to sort the test examples. For readability it is shifted so negative means likely background (under uniform prior) and positive means likely signal. <code>x</code> is an input vector.
# In[13]:
def score(x):
logP = 0
for fI in range(numFeatures):
bI = 0
# linear search for the bin index of the fIth feature
# of the signal
while bI < len(binMaxs[fI]) - 1 and x[fI] > binMaxs[fI, bI]:
bI += 1
logP += logPs[fI, bI] - math.log(0.5)
return logP
### Optimizing the AMS on the held out validation set
# The Approximate Median Significance
# \begin{equation*}
# \text{AMS} = \sqrt{ 2 \left( (s + b + 10) \ln \left( 1 + \frac{s}{b +
# 10} \right) - s \right) }
# \end{equation*}
# <code>s</code> and <code>b</code> are the sum of signal and background weights, respectively, in the selection region.
# In[14]:
def AMS(s,b):
assert s >= 0
assert b >= 0
bReg = 10.
return math.sqrt(2 * ((s + b + bReg) *
math.log(1 + s / (b + bReg)) - s))
# Computing the scores on the validation set
# In[15]:
validationScores = np.array([score(x) for x in xsValidation])
# Sorting the indices in increasing order of the scores.
# In[16]:
tIIs = validationScores.argsort()
# Weights have to be normalized to the same sum as in the full set.
# In[17]:
wFactor = 1.* numPoints / numPointsValidation
# Initializing $s$ and $b$ to the full sum of weights, we start by having all points in the selectiom region.
# In[35]:
s = np.sum(weightsValidation[sSelectorValidation])
b = np.sum(weightsValidation[bSelectorValidation])
# <code>amss</code> will contain AMSs after each point moved out of the selection region in the sorted validation set.
# In[36]:
amss = np.empty([len(tIIs)])
# <code>amsMax</code> will contain the best validation AMS, and <code>threshold</code> will be the smallest score among the selected points.
# In[37]:
amsMax = 0
threshold = 0.0
# We will do <code>len(tIIs)</code> iterations, which means that <code>amss[-1]</code> is the AMS when only the point with the highest score is selected.
# In[38]:
for tI in range(len(tIIs)):
# don't forget to renormalize the weights to the same sum
# as in the complete training set
amss[tI] = AMS(max(0,s * wFactor),max(0,b * wFactor))
if amss[tI] > amsMax:
amsMax = amss[tI]
threshold = validationScores[tIIs[tI]]
#print tI,threshold
if sSelectorValidation[tIIs[tI]]:
s -= weightsValidation[tIIs[tI]]
else:
b -= weightsValidation[tIIs[tI]]
# In[39]:
amsMax
# In[40]:
threshold
# In[41]:
plt.plot(amss)
### Computing the permutation on the test set
# Reading the test file, slicing off the header row and the id column, and converting the data into float.
# In[42]:
test = list(csv.reader(open("test.csv", "rb"),delimiter=','))
xsTest = np.array([map(float, row[1:]) for row in test[1:]])
# In[43]:
testIds = np.array([int(row[0]) for row in test[1:]])
# Computing the scores.
# In[44]:
testScores = np.array([score(x) for x in xsTest])
# Computing the rank order.
# In[45]:
testInversePermutation = testScores.argsort()
# In[46]:
testPermutation = list(testInversePermutation)
for tI,tII in zip(range(len(testInversePermutation)),
testInversePermutation):
testPermutation[tII] = tI
# Computing the submission file with columns EventId, RankOrder, and Class.
# In[47]:
submission = np.array([[str(testIds[tI]),str(testPermutation[tI]+1),
's' if testScores[tI] >= threshold else 'b']
for tI in range(len(testIds))])
# In[48]:
submission = np.append([['EventId','RankOrder','Class']],
submission, axis=0)
# Saving the file that can be submitted to Kaggle.
# In[49]:
np.savetxt("submission.csv",submission,fmt='%s',delimiter=',')
| apache-2.0 |
Winand/pandas | pandas/tests/test_panelnd.py | 15 | 3726 | # -*- coding: utf-8 -*-
import pytest
from warnings import catch_warnings
from pandas.core import panelnd
from pandas.core.panel import Panel
from pandas.util.testing import assert_panel_equal
import pandas.util.testing as tm
class TestPanelnd(object):
def setup_method(self, method):
pass
def test_4d_construction(self):
with catch_warnings(record=True):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) # noqa
def test_4d_construction_alt(self):
with catch_warnings(record=True):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer='Panel',
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) # noqa
def test_4d_construction_error(self):
# create a 4D
pytest.raises(Exception,
panelnd.create_nd_panel_factory,
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis',
'minor_axis'],
slices={'items': 'items',
'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer='foo',
aliases={'major': 'major_axis',
'minor': 'minor_axis'},
stat_axis=2)
def test_5d_construction(self):
with catch_warnings(record=True):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels1', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
# deprecation GH13564
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel()))
# create a 5D
Panel5D = panelnd.create_nd_panel_factory(
klass_name='Panel5D',
orders=['cool1', 'labels1', 'items', 'major_axis',
'minor_axis'],
slices={'labels1': 'labels1', 'items': 'items',
'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel4D,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
# deprecation GH13564
p5d = Panel5D(dict(C1=p4d))
# slice back to 4d
results = p5d.iloc[p5d.cool1.get_loc('C1'), :, :, 0:3, :]
expected = p4d.iloc[:, :, 0:3, :]
assert_panel_equal(results['L1'], expected['L1'])
# test a transpose
# results = p5d.transpose(1,2,3,4,0)
# expected =
| bsd-3-clause |
lreis2415/SEIMS | seims/pyseims_check.py | 1 | 2306 | """Check the requirements of pySEIMS.
"""
from __future__ import absolute_import, unicode_literals
# 1. pygeoc
try:
import pygeoc
from pygeoc.raster import *
from pygeoc.vector import *
from pygeoc.hydro import *
from pygeoc.utils import *
except ImportError:
print('ERROR: PyGeoC is not successfully installed, please check and retry!')
else:
print('PyGeoC-%s has been installed!' % pygeoc.__version__)
# 2. gdal
try:
import osgeo
from osgeo import ogr
from osgeo import osr
from osgeo import gdalconst
from osgeo import gdal_array
from osgeo import gdal
except ImportError:
print('ERROR: GDAL is not successfully installed, please check and retry!')
else:
print('GDAL-%s has been installed!' % osgeo.__version__)
# 3. numpy
try:
import numpy
except ImportError:
print('ERROR: NumPy is not successfully installed, please check and retry!')
else:
print('NumPy-%s has been installed!' % numpy.__version__)
# 4. pymongo
try:
import pymongo
from pymongo import MongoClient
except ImportError:
print('ERROR: pymongo is not successfully installed, please check and retry!')
else:
print('pymongo-%s has been installed!' % pymongo.__version__)
# 5. networkx
try:
import networkx
except ImportError:
print('ERROR: networkx is not successfully installed, please check and retry!')
else:
print('networkx-%s has been installed!' % networkx.__version__)
# 6. shapely
try:
import shapely
except ImportError:
print('ERROR: shapely is not successfully installed, please check and retry!')
else:
print('shapely-%s has been installed!' % shapely.__version__)
# 7. matplotlib
try:
import matplotlib
except ImportError:
print('ERROR: matplotlib is not successfully installed, please check and retry!')
else:
print('matplotlib-%s has been installed!' % matplotlib.__version__)
# 8. deap
try:
import deap
except ImportError:
print('ERROR: deap is not successfully installed, please check and retry!')
else:
print('deap-%s has been installed!' % deap.__version__)
# 9. scoop
try:
import scoop
except ImportError:
print('ERROR: scoop is not successfully installed, please check and retry!')
else:
print('scoop-%s.%s has been installed!' % (scoop.__version__, scoop.__revision__))
| gpl-3.0 |
nicjhan/MOM6-examples | tools/analysis/MOM6_annual_analysis.py | 6 | 4961 | # Script to plot sub-surface ocean temperature drift.
# Analysis: using newer python 2.7.3
"""
module purge
module use -a /home/fms/local/modulefiles
module load gcc
module load netcdf/4.2
module load python/2.7.3
"""
import os
import math
import numpy as np
from numpy import ma
from netCDF4 import Dataset, MFDataset, num2date, date2num
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# -----------------------------------------------------------------------------
# Function to convert from page coordinates to non-dimensional coordinates
def page_to_ndc( panel, page ):
if len(panel) == 4:
ndc = [ 0.0, 0.0, 0.0, 0.0 ]
ndc[0] = (panel[0]-page[0])/(page[2]-page[0])
ndc[1] = (panel[1]-page[1])/(page[3]-page[1])
ndc[2] = (panel[2]-panel[0])/(page[2]-page[0])
ndc[3] = (panel[3]-panel[1])/(page[3]-page[1])
return ndc
elif len(panel) == 2:
ndc = [ 0.0, 0.0 ]
ndc[0] = (panel[0]-page[0])/(page[2]-page[0])
ndc[1] = (panel[1]-page[1])/(page[3]-page[1])
return ndc
# -----------------------------------------------------------------------------
# Function to discretize colormap with option to white out certain regions
def cmap_discretize(cmap, N, white=None):
"""Return a discrete colormap from the continuous colormap cmap.
cmap: colormap instance, eg. cm.jet.
N: number of colors.
Example
x = resize(arange(100), (5,100))
djet = cmap_discretize(cm.jet, 5)
imshow(x, cmap=djet)
"""
if type(cmap) == str:
cmap = get_cmap(cmap)
colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))
colors_rgba = cmap(colors_i)
# White levels?
if white != None:
for i in range(N):
if white[i] > 0.0:
colors_rgba[i,:] = 1.0
# Construct colormap distionary
indices = np.linspace(0, 1., N+1)
cdict = {}
for ki,key in enumerate(('red','green','blue')):
cdict[key] = [ (indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki]) for i in xrange(N+1) ]
# Return colormap object.
return matplotlib.colors.LinearSegmentedColormap(cmap.name + "_%d"%N, cdict, 1024)
# -----------------------------------------------------------------------------
# Radius of the earth (shared/constants/constants.F90)
radius = 6371.0e3
# Ocean heat capacity (ocean_core/ocean_parameters.F90)
cp_ocean = 3992.10322329649
# Read 'descriptor' and 'years' from external file
f = open("files.txt")
for line in f.readlines():
exec(line.lstrip())
f.close()
model_label = "%s (%s)" % (descriptor,years)
# TMPDIR where input files are located
tmpdir = "./"
# Open input files
#fstatic = Dataset(tmpdir+'19000101.ocean_geometry.nc', 'r')
fstatic = MFDataset(tmpdir+'*.ocean_static.nc')
ftemp = MFDataset(tmpdir+'*.ocean_annual.nc')
# Time info
time = ftemp.variables["time"]
ntimes = len(time[:])
date = num2date(time,time.units,time.calendar.lower())
year = [d.year for d in date]
time_days = date2num(date,'days since 01-01-0001',time.calendar.lower())
# Grid info
#area = fstatic.variables["Ah"][:]
area = fstatic.variables["area_t"][:]
z = ftemp.variables["zl"][:]
nz = len(z)
# Input variables
temp = ftemp.variables["temp"]
salt = ftemp.variables["salt"]
# Create arrays to hold derived variables
ztemp = ma.array( np.zeros((ntimes,nz), 'float64'), mask=True )
zsalt = ma.array( np.zeros((ntimes,nz), 'float64'), mask=True )
# Loop over time
#for itime in range(ntimes):
for itime in range(1):
# Compute vertical profile of zemperature
tmp = temp[itime,:,:,:]
contmp = salt[itime,:,:,:]
for iz in range(nz):
ztemp[itime,iz] = ma.average(tmp[iz,:,:], weights=area)
zsalt[itime,iz] = ma.average(contmp[iz,:,:], weights=area)
# Transpose for compatibility with contour plots
ztemp = ztemp.transpose()
zsalt = zsalt.transpose()
# Close files
fstatic.close()
ftemp.close()
# -----------------------------------------------------------------------------
# Create plot
# Specify plots position in points: [left bottom right top]
page = [ 0.0, 0.0, 612.0, 792.0 ] # corresponding to papertype='letter'
plot1a = [ 89.0, 497.0, 480.0, 670.0 ]
plot1b = [ 89.0, 324.0, 480.0, 497.0 ]
cbar = [ 506.0, 324.0, 531.0, 670.0 ]
plot2 = [ 89.0, 99.0, 480.0, 272.0 ]
plot = [ 89.0, 99.0, 480.0, 670.0 ]
#plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.dpi'] = 72.0
plt.rcParams['figure.figsize'] = [ (page[2]-page[0])/72.0, (page[3]-page[1])/72.0 ]
fig = plt.figure()
ax1a = plt.axes(page_to_ndc(plot,page))
ax1a.set_ylim(5300,0)
ax1a.set_ylabel('Depth (m)')
ax1a.set_xlabel('Ocean Temp (C)',color='r')
ax1a.plot(ztemp,z,ls='-',color='r')
ax1b = ax1a.twiny()
ax1b.set_xlabel('Ocean Salinity (PSU)',color='b')
ax1b.plot(zsalt,z,ls='-',color='b')
# Figure title
xy = page_to_ndc([280.0,730.0],page)
fig.text(xy[0],xy[1],model_label,ha="center",size="x-large")
# Save figure
fig.savefig("ocean_temp_salt.ps")
| gpl-3.0 |
ramseylab/cerenkov | feature_extraction/genome_seg_util.py | 1 | 2437 | from genome_browser_client import GenomeBrowserClient
from abstract_feature_util import AbstractFeatureUtil
# def extract_genome_seg_annot(src, dest):
# """
# Match every SNP to GSs by rsid;
#
# E.g. A matched group for SNP `rs1` is like below
#
# name seg
# rs1 ['Repr','Quies']
# """
#
# rsid = pandas.read_csv(src, sep='\t').loc[:, "name"]
#
# with GenomeBrowserClient('local_hg19') as gb_client:
# result = gb_client.identify_genome_seg(rsid)
#
# result = CT.remove_dup_on_chrY(result)
#
# result.to_csv(dest, sep='\t', header=True, index=False)
class GenomeSegUtil(AbstractFeatureUtil):
def __init__(self, reproduce_osu17=False):
super(GenomeSegUtil, self).__init__()
self.reproduce_osu17 = reproduce_osu17
def get_feat(self, _input):
"""
:param _input: the SNP data frame
:return:
"""
snp_dfm = _input.loc[:, ['chrom', 'name']]
with GenomeBrowserClient(self.db_config_key) as gb_client:
result = gb_client.identify_genome_seg(snp_dfm.loc[:, 'name'])
# result = ct.remove_dup_on_chrY(result)
if not self.reproduce_osu17:
# Use clearer names for osu18
result = result.rename(columns={'ch1Name': 'ChromhmmGm12878',
'ch2Name': 'ChromhmmH1hesc',
'ch3Name': 'ChromhmmHelas3',
'ch4Name': 'ChromhmmHepg2',
'ch5Name': 'ChromhmmHuvec',
'ch6Name': 'ChromhmmK562',
'sw1Name': 'SegwayGm12878',
'sw2Name': 'SegwayH1hesc',
'sw3Name': 'SegwayHelas3',
'sw4Name': 'SegwayHepg2',
'sw5Name': 'SegwayHuvec',
'sw6Name': 'SegwayK562'})
snp_dfm = snp_dfm.merge(result, how='left', on=['name', 'chrom'])
return snp_dfm.drop(['chrom'], axis=1).fillna(0)
def save_temp(self, _result):
_result.to_csv(self.temp_dest, sep='\t', header=True, index=False)
| apache-2.0 |
valexandersaulys/airbnb_kaggle_contest | prototype_alpha/randomForest_take4.py | 1 | 1587 | """
Take 1 on the RandomForest, predicting for country_destinations.
"""
import pandas as pd
from sklearn.cross_validation import train_test_split
training = pd.read_csv("protoAlpha_training.csv")
testing = pd.read_csv("protoAlpha_testing.csv")
X = training.iloc[:,1:-1].values
y = training['country_destination'].values
"""
# Use Discriminant Analysis
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
trans = LinearDiscriminantAnalysis(n_components=3)
trans.fit(X,y)
X = trans.transform(X)
"""
# Split Up Data
x_train,x_valid,y_train,y_valid = train_test_split(X,y,test_size=0.3,random_state=None)
# Train classifier
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
clf = QuadraticDiscriminantAnalysis()
clf.fit(x_train,y_train)
# Run Predictions
from sklearn.metrics import confusion_matrix, accuracy_score
y_preds = clf.predict(x_valid)
print( confusion_matrix(y_valid,y_preds) );
print( "Accuracy: %f" % (accuracy_score(y_valid,y_preds)) );
f = open('randomForest_take4.txt', 'w')
f.write( str(confusion_matrix(y_valid,y_preds)) );
f.write( "\nAccuracy: %f" % (accuracy_score(y_valid,y_preds)) );
f.write( "\nQuadraticDiscriminantAnalysis()" );
f.write( "\nclf = RandomForestClassifier(n_estimators=1000)" );
# Now on to final submission
x_final = testing.iloc[:,1:].values
x_final = trans.transform(x_final)
y_final = clf.predict(x_final).reshape([62096,]);
y_final = pd.DataFrame(y_final);
numbahs = testing['id']
df = pd.concat([numbahs,y_final],axis=1)
df.columns = ['id','country']
df.to_csv("randomForest_take4.csv",index=False)
| gpl-2.0 |
joernhees/scikit-learn | examples/linear_model/plot_lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
theislab/scanpy | scanpy/tests/notebooks/test_paga_paul15_subsampled.py | 1 | 4129 | # PAGA for hematopoiesis in mouse [(Paul *et al.*, 2015)](https://doi.org/10.1016/j.cell.2015.11.013)
# Hematopoiesis: trace myeloid and erythroid differentiation for data of [Paul *et al.* (2015)](http://doi.org/10.1016/j.cell.2015.11.013).
#
# This is the subsampled notebook for testing.
from pathlib import Path
import numpy as np
from matplotlib.testing import setup
setup()
import scanpy as sc
HERE: Path = Path(__file__).parent
ROOT = HERE / '_images_paga_paul15_subsampled'
FIGS = HERE / 'figures'
def test_paga_paul15_subsampled(image_comparer, plt):
save_and_compare_images = image_comparer(ROOT, FIGS, tol=25)
adata = sc.datasets.paul15()
sc.pp.subsample(adata, n_obs=200)
del adata.uns['iroot']
adata.X = adata.X.astype('float64')
# Preprocessing and Visualization
sc.pp.recipe_zheng17(adata)
sc.tl.pca(adata, svd_solver='arpack')
sc.pp.neighbors(adata, n_neighbors=4, n_pcs=20)
sc.tl.draw_graph(adata)
sc.pl.draw_graph(adata, color='paul15_clusters', legend_loc='on data')
sc.tl.diffmap(adata)
sc.tl.diffmap(adata) # See #1262
sc.pp.neighbors(adata, n_neighbors=10, use_rep='X_diffmap')
sc.tl.draw_graph(adata)
sc.pl.draw_graph(adata, color='paul15_clusters', legend_loc='on data')
# Clustering and PAGA
sc.tl.louvain(adata, resolution=1.0)
sc.tl.paga(adata, groups='louvain')
# sc.pl.paga(adata, color=['louvain', 'Hba-a2', 'Elane', 'Irf8'])
# sc.pl.paga(adata, color=['louvain', 'Itga2b', 'Prss34'])
adata.obs['louvain_anno'] = adata.obs['louvain']
sc.tl.paga(adata, groups='louvain_anno')
PAGA_CONNECTIVITIES = np.array(
[
[0.0, 0.128553, 0.0, 0.07825, 0.0, 0.0, 0.238741, 0.0, 0.0, 0.657049],
[
0.128553,
0.0,
0.480676,
0.257505,
0.533036,
0.043871,
0.0,
0.032903,
0.0,
0.087743,
],
]
)
assert np.allclose(
adata.uns['paga']['connectivities'].toarray()[:2],
PAGA_CONNECTIVITIES,
atol=1e-4,
)
sc.pl.paga(adata, threshold=0.03)
# !!!! no clue why it doesn't produce images with the same shape
# save_and_compare_images('paga')
sc.tl.draw_graph(adata, init_pos='paga')
sc.pl.paga_compare(
adata,
threshold=0.03,
title='',
right_margin=0.2,
size=10,
edge_width_scale=0.5,
legend_fontsize=12,
fontsize=12,
frameon=False,
edges=True,
)
# slight deviations because of graph drawing
# save_and_compare_images('paga_compare')
adata.uns['iroot'] = np.flatnonzero(adata.obs['louvain_anno'] == '3')[0]
sc.tl.dpt(adata)
gene_names = [
'Gata2',
'Gata1',
'Klf1',
'Hba-a2', # erythroid
'Elane',
'Cebpe', # neutrophil
'Irf8',
] # monocyte
paths = [
('erythrocytes', [3, 9, 0, 6]),
('neutrophils', [3, 1, 2]),
('monocytes', [3, 1, 4, 5]),
]
adata.obs['distance'] = adata.obs['dpt_pseudotime']
_, axs = plt.subplots(
ncols=3, figsize=(6, 2.5), gridspec_kw={'wspace': 0.05, 'left': 0.12}
)
plt.subplots_adjust(left=0.05, right=0.98, top=0.82, bottom=0.2)
for ipath, (descr, path) in enumerate(paths):
_, data = sc.pl.paga_path(
adata,
path,
gene_names,
show_node_names=False,
ax=axs[ipath],
ytick_fontsize=12,
left_margin=0.15,
n_avg=50,
annotations=['distance'],
show_yticks=True if ipath == 0 else False,
show_colorbar=False,
color_map='Greys',
color_maps_annotations={'distance': 'viridis'},
title='{} path'.format(descr),
return_data=True,
show=False,
)
# add a test for this at some point
# data.to_csv('./write/paga_path_{}.csv'.format(descr))
save_and_compare_images('paga_path')
| bsd-3-clause |
derkling/trappy | trappy/stats/Aggregator.py | 1 | 5804 | # Copyright 2015-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Aggregators are responsible for aggregating information
for further analysis. These aggregations can produce
both scalars and vectors and each aggregator implementation
is expected to handle its "aggregation" mechanism.
"""
from trappy.plotter.Utils import listify
from trappy.stats.Indexer import MultiTriggerIndexer
from abc import ABCMeta, abstractmethod
class AbstractAggregator(object):
"""Abstract class for all aggregators"""
__metaclass__ = ABCMeta
# The current implementation needs the index to
# be unified across data frames to account for
# variable sampling across data frames
def __init__(self, indexer, aggfunc=None):
"""Args:
indexer (Indexer): Indexer iss passed on by the Child class
for handling indices during correlation
aggfunc (function): Function that accepts a pandas.Series and
process it for aggregation.
"""
self._result = {}
self._aggregated = False
self._aggfunc = aggfunc
self.indexer = indexer
def _add_result(self, pivot, data_frame, value):
"""Add the result for the given pivot and run
Args:
pivot (hashable): The pivot for which the result is being generated
data_frame (pandas.DataFrame): pandas data frame of result values
value (str, numeric): If value is str, the corresponding
column is used as a vector of resultant values. If
numeric, each index in data frame gets the numeric
"""
if pivot not in self._result:
self._result[pivot] = self.indexer.series()
for idx in data_frame.index:
if isinstance(value, basestring):
self._result[pivot][idx] = data_frame[value][idx]
else:
self._result[pivot][idx] = value
@abstractmethod
def aggregate(self, run_idx, **kwargs):
"""Abstract Method for aggregating data for various
pivots.
Args:
run_idx: Index of the run to be aggregated
Returns:
The aggregated result
"""
raise NotImplementedError("Method Not Implemented")
class MultiTriggerAggregator(AbstractAggregator):
"""This aggregator accepts a list of triggers and each trigger has
a value associated with it.
"""
def __init__(self, triggers, topology, aggfunc=None):
"""
Args:
triggers (trappy.stat.Trigger): A list or a singular trigger object
topology (trappy.stat.Topology): A topology object for aggregation
levels
aggfunc: A function to be applied on each series being aggregated.
For each topology node, a series will be generated and this
will be processed by the aggfunc
"""
self._triggers = triggers
self.topology = topology
super(
MultiTriggerAggregator,
self).__init__(MultiTriggerIndexer(triggers), aggfunc)
def aggregate(self, **kwargs):
"""
Aggregate implementation that aggrgates
triggers for a given topological level
Args:
level can be specified. If not the default level is
taken to be all
Returns:
A scalar or a vector aggregated result.
Each group in the level produces an element
in the result list with a one to one
index correspondence
groups["level"] = [[1,2], [3,4]]
result = [result_1, result_2]
"""
level = kwargs.pop("level", "all")
# This function is a hot spot in the code. It is
# worth considering a memoize decorator to cache
# the function. The memoization can also be
# maintained by the aggregator object. This will
# help the code scale efficeintly
level_groups = self.topology.get_level(level)
result = []
if not self._aggregated:
self._aggregate_base()
for group in level_groups:
group = listify(group)
level_res = self._aggfunc(self._result[group[0]], **kwargs)
for node in group[1:]:
if self._aggfunc is not None:
node_res = self._aggfunc(self._result[node], **kwargs)
else:
node_res = self._result[node]
level_res += node_res
result.append(level_res)
return result
def _aggregate_base(self):
"""A memoized function to generate the base series
for each node in the flattened topology.
eg topo["level_1"] = [[1, 2], [3, 4]]
This function will generate the fundamental
aggregations for all nodes 1, 2, 3, 4 and
store the result in _agg_result
"""
for trigger in self._triggers:
for node in self.topology.flatten():
result_df = trigger.generate(node)
self._add_result(node, result_df, trigger.value)
self._aggregated = True
| apache-2.0 |
cl4rke/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
theislab/scvelo | scvelo/plotting/velocity.py | 1 | 9788 | from .. import settings
from ..preprocessing.moments import second_order_moments
from ..tools.rank_velocity_genes import rank_velocity_genes
from .scatter import scatter
from .utils import (
savefig_or_show,
default_basis,
default_size,
get_basis,
get_figure_params,
)
import numpy as np
import pandas as pd
import matplotlib.pyplot as pl
from matplotlib import rcParams
from scipy.sparse import issparse
def velocity(
adata,
var_names=None,
basis=None,
vkey="velocity",
mode=None,
fits=None,
layers="all",
color=None,
color_map=None,
colorbar=True,
perc=[2, 98],
alpha=0.5,
size=None,
groupby=None,
groups=None,
legend_loc="none",
legend_fontsize=8,
use_raw=False,
fontsize=None,
figsize=None,
dpi=None,
show=None,
save=None,
ax=None,
ncols=None,
**kwargs,
):
"""Phase and velocity plot for set of genes.
The phase plot shows spliced against unspliced expressions with steady-state fit.
Further the embedding is shown colored by velocity and expression.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
var_names: `str` or list of `str` (default: `None`)
Which variables to show.
basis: `str` (default: `'umap'`)
Key for embedding coordinates.
mode: `'stochastic'` or `None` (default: `None`)
Whether to show show covariability phase portrait.
fits: `str` or list of `str` (default: `['velocity', 'dynamics']`)
Which steady-state estimates to show.
layers: `str` or list of `str` (default: `'all'`)
Which layers to show.
color: `str`, list of `str` or `None` (default: `None`)
Key for annotations of observations/cells or variables/genes
color_map: `str` or tuple (default: `['RdYlGn', 'gnuplot_r']`)
String denoting matplotlib color map. If tuple is given, first and latter
color map correspond to velocity and expression, respectively.
perc: tuple, e.g. [2,98] (default: `[2,98]`)
Specify percentile for continuous coloring.
groups: `str`, `list` (default: `None`)
Subset of groups, e.g. [‘g1’, ‘g2’], to which the plot shall be restricted.
groupby: `str`, `list` or `np.ndarray` (default: `None`)
Key of observations grouping to consider.
legend_loc: str (default: 'none')
Location of legend, either 'on data', 'right margin'
or valid keywords for matplotlib.legend.
size: `float` (default: 5)
Point size.
alpha: `float` (default: 1)
Set blending - 0 transparent to 1 opaque.
fontsize: `float` (default: `None`)
Label font size.
figsize: tuple (default: `(7,5)`)
Figure size.
dpi: `int` (default: 80)
Figure dpi.
show: `bool`, optional (default: `None`)
Show the plot, do not return axis.
save: `bool` or `str`, optional (default: `None`)
If `True` or a `str`, save the figure. A string is appended to the default
filename. Infer the filetype if ending on {'.pdf', '.png', '.svg'}.
ax: `matplotlib.Axes`, optional (default: `None`)
A matplotlib axes object. Only works if plotting a single component.
ncols: `int` or `None` (default: `None`)
Number of columns to arange multiplots into.
"""
basis = default_basis(adata) if basis is None else get_basis(adata, basis)
color, color_map = kwargs.pop("c", color), kwargs.pop("cmap", color_map)
if fits is None:
fits = ["velocity", "dynamics"]
if color_map is None:
color_map = ["RdYlGn", "gnuplot_r"]
if isinstance(groupby, str) and groupby in adata.obs.keys():
if (
"rank_velocity_genes" not in adata.uns.keys()
or adata.uns["rank_velocity_genes"]["params"]["groupby"] != groupby
):
rank_velocity_genes(adata, vkey=vkey, n_genes=10, groupby=groupby)
names = np.array(adata.uns["rank_velocity_genes"]["names"].tolist())
if groups is None:
var_names = names[:, 0]
else:
groups = [groups] if isinstance(groups, str) else groups
categories = adata.obs[groupby].cat.categories
idx = np.array([any([g in group for g in groups]) for group in categories])
var_names = np.hstack(names[idx, : int(10 / idx.sum())])
elif var_names is not None:
if isinstance(var_names, str):
var_names = [var_names]
else:
var_names = [var for var in var_names if var in adata.var_names]
else:
raise ValueError("No var_names or groups specified.")
var_names = pd.unique(var_names)
if use_raw or "Ms" not in adata.layers.keys():
skey, ukey = "spliced", "unspliced"
else:
skey, ukey = "Ms", "Mu"
layers = [vkey, skey] if layers == "all" else layers
layers = [layer for layer in layers if layer in adata.layers.keys() or layer == "X"]
fits = list(adata.layers.keys()) if fits == "all" else fits
fits = [fit for fit in fits if f"{fit}_gamma" in adata.var.keys()] + ["dynamics"]
stochastic_fits = [fit for fit in fits if f"variance_{fit}" in adata.layers.keys()]
nplts = 1 + len(layers) + (mode == "stochastic") * 2
ncols = 1 if ncols is None else ncols
nrows = int(np.ceil(len(var_names) / ncols))
ncols = int(ncols * nplts)
figsize = rcParams["figure.figsize"] if figsize is None else figsize
figsize, dpi = get_figure_params(figsize, dpi, ncols / 2)
if ax is None:
gs_figsize = (figsize[0] * ncols / 2, figsize[1] * nrows / 2)
ax = pl.figure(figsize=gs_figsize, dpi=dpi)
gs = pl.GridSpec(nrows, ncols, wspace=0.5, hspace=0.8)
# half size, since fontsize is halved in width and height
size = default_size(adata) / 2 if size is None else size
fontsize = rcParams["font.size"] * 0.8 if fontsize is None else fontsize
scatter_kwargs = dict(colorbar=colorbar, perc=perc, size=size, use_raw=use_raw)
scatter_kwargs.update(dict(fontsize=fontsize, legend_fontsize=legend_fontsize))
for v, var in enumerate(var_names):
_adata = adata[:, var]
s, u = _adata.layers[skey], _adata.layers[ukey]
if issparse(s):
s, u = s.A, u.A
# spliced/unspliced phase portrait with steady-state estimate
ax = pl.subplot(gs[v * nplts])
cmap = color_map
if isinstance(color_map, (list, tuple)):
cmap = color_map[-1] if color in ["X", skey] else color_map[0]
if "xlabel" not in kwargs:
kwargs["xlabel"] = "spliced"
if "ylabel" not in kwargs:
kwargs["ylabel"] = "unspliced"
legend_loc_lines = "none" if v < len(var_names) - 1 else legend_loc
scatter(
adata,
basis=var,
color=color,
color_map=cmap,
frameon=True,
title=var,
alpha=alpha,
vkey=fits,
show=False,
ax=ax,
save=False,
legend_loc_lines=legend_loc_lines,
**scatter_kwargs,
**kwargs,
)
# velocity and expression plots
for l, layer in enumerate(layers):
ax = pl.subplot(gs[v * nplts + l + 1])
title = "expression" if layer in ["X", skey] else layer
# _kwargs = {} if title == 'expression' else kwargs
cmap = color_map
if isinstance(color_map, (list, tuple)):
cmap = color_map[-1] if layer in ["X", skey] else color_map[0]
scatter(
adata,
basis=basis,
color=var,
layer=layer,
title=title,
color_map=cmap,
alpha=alpha,
frameon=False,
show=False,
ax=ax,
save=False,
**scatter_kwargs,
**kwargs,
)
if mode == "stochastic":
ss, us = second_order_moments(_adata)
s, u, ss, us = s.flatten(), u.flatten(), ss.flatten(), us.flatten()
fit = stochastic_fits[0]
ax = pl.subplot(gs[v * nplts + len(layers) + 1])
beta, offset = 1, 0
if f"{fit}_beta" in adata.var.keys():
beta = _adata.var[f"{fit}_beta"]
if f"{fit}_offset" in adata.var.keys():
offset = _adata.var[f"{fit}_offset"]
x = np.array(2 * (ss - s ** 2) - s)
y = np.array(2 * (us - u * s) + u + 2 * s * offset / beta)
kwargs["xlabel"] = r"2 $\Sigma_s - \langle s \rangle$"
kwargs["ylabel"] = r"2 $\Sigma_{us} + \langle u \rangle$"
scatter(
adata,
x=x,
y=y,
color=color,
title=var,
frameon=True,
ax=ax,
save=False,
show=False,
**scatter_kwargs,
**kwargs,
)
xnew = np.linspace(np.min(x), np.max(x) * 1.02)
for fit in stochastic_fits:
gamma, beta, offset2 = 1, 1, 0
if f"{fit}_gamma" in adata.var.keys():
gamma = _adata.var[f"{fit}_gamma"].values
if f"{fit}_beta" in adata.var.keys():
beta = _adata.var[f"{fit}_beta"].values
if f"{fit}_offset2" in adata.var.keys():
offset2 = _adata.var[f"{fit}_offset2"].values
ynew = gamma / beta * xnew + offset2 / beta
pl.plot(xnew, ynew, c="k", linestyle="--")
savefig_or_show(dpi=dpi, save=save, show=show)
if show is False:
return ax
| bsd-3-clause |
cbmoore/statsmodels | tools/code_maintenance.py | 37 | 2307 | """
Code maintenance script modified from PyMC
"""
#!/usr/bin/env python
import sys
import os
# This is a function, not a test case, because it has to be run from inside
# the source tree to work well.
mod_strs = ['IPython', 'pylab', 'matplotlib', 'scipy','Pdb']
dep_files = {}
for mod_str in mod_strs:
dep_files[mod_str] = []
def remove_whitespace(fname):
# Remove trailing whitespace
fd = open(fname,mode='U') # open in universal newline mode
lines = []
for line in fd.readlines():
lines.append( line.rstrip() )
fd.close()
fd = open(fname,mode='w')
fd.seek(0)
for line in lines:
fd.write(line+'\n')
fd.close()
# print 'Removed whitespace from %s'%fname
def find_whitespace(fname):
fd = open(fname, mode='U')
for line in fd.readlines():
#print repr(line)
if ' \n' in line:
print fname
break
# print
print_only = True
# ====================
# = Strip whitespace =
# ====================
for dirname, dirs, files in os.walk('.'):
if dirname[1:].find('.')==-1:
# print dirname
for fname in files:
if fname[-2:] in ['c', 'f'] or fname[-3:]=='.py' or fname[-4:] in ['.pyx', '.txt', '.tex', '.sty', '.cls'] or fname.find('.')==-1:
# print fname
if print_only:
find_whitespace(dirname + '/' + fname)
else:
remove_whitespace(dirname + '/' + fname)
"""
# ==========================
# = Check for dependencies =
# ==========================
for dirname, dirs, files in os.walk('pymc'):
for fname in files:
if fname[-3:]=='.py' or fname[-4:]=='.pyx':
if dirname.find('sandbox')==-1 and fname != 'test_dependencies.py'\
and dirname.find('examples')==-1:
for mod_str in mod_strs:
if file(dirname+'/'+fname).read().find(mod_str)>=0:
dep_files[mod_str].append(dirname+'/'+fname)
print 'Instances of optional dependencies found are:'
for mod_str in mod_strs:
print '\t'+mod_str+':'
for fname in dep_files[mod_str]:
print '\t\t'+fname
if len(dep_files['Pdb'])>0:
raise ValueError, 'Looks like Pdb was not commented out in '+', '.join(dep_files[mod_str])
"""
| bsd-3-clause |
davofis/Optical_Coherence_Wavelets | ToRun.py | 1 | 10065 | # coding=utf-8
#*********************************************************************
# PROGRAM NAME: ToRun.py *
# *
#*********************************************************************
# AUTHOR: David Alejandro Vargas O *
# [email protected] *
# AFFILIATION: LMU Ludwig Maximilian University of Munich *
# DATE: 27.03.2015 *
#*********************************************************************
# DESCRIPTION:
# This file is a part of the Project SPATIAL COHERENCE WAVELETS.
# ToRun is the principal file of the project SPATIAL COHERENCE WAVELE-
# TS. here, the methods that compute the marginal power spectrum, the
# Intensity distribution function, and the degree of spatial coheren-
# ce function are called. these functions computed under specific phy-
# sical and geometric parameters became the key point in the analysis
# of optical fields in arbitrary states of spacial coherence that pro-
# pagate between two planes, aperture or entrance plane 'AP' and obser
# vation or exit plane 'OP' according with the theory of spatial cohe-
# rence wavelets [1].
# 'SPATIAL COHERENCE WAVELETS' is a project which focus on unidimensi-
# onal diffractional processes under the nonparaxial modelling of op-
# tical fields. [1]. The simulation is based on the theory of spatial
# coherence wavelets which purpose is the modelling of optical fields
# in any state of spatial coherence.
#*********************************************************************
# PARAMETERS:
# so : Maximum intensity.
# wide : Slide wide.
# so_1 : Maximum intensity left slide.
# so_2 : Maximum intensity right slide.
# wide_1 : Left slide wide.
# wide_2 : Right slide wide.
# slide_space : Space between slides.
# a : Amplitude.
# w : Spatial frequency.
#
# sources : Point sources amount.
# pixel_number : sampling pixel amount in xa
# wavelength : Optical field wavelength.
# z : Aperture plane - Exit plane distance.
# aperture_size : Aperture size
# exit_size : Exit size
#
# sigma : Gaussian standard deviation.
# gamma : Lorentzian standard deviation.
#
# INPUT: input1, input2
# OUTPUT: output1, output2
#
# NOTE: All lenght units must be in micrometers.
#
# REFERENCES: ********************************************************
# [1] Castañeda R, Sucerquia J. Non-approximated numerical modeling *
# of propagation of light in any state of spatial coherence *
# [2] R. Castañeda,* D. Vargas, E. Franco. Discreteness of the set of*
# radiant point sources: a physical feature of the second-order wave *
# -fronts *
# [3] R. Castañeda,* D. Vargas, E. Franco. Spatial coherence of light*
# and a fundamental discontinuity of classical second-order wave *
# fronts *
#*********************************************************************
# COPYRIGHT *
# Copyright (C) 2015 David Alejandro Vargas Otalora *
# *
# This program is free software: you can redistribute it and/or modif*
# y it under the terms of the GNU General Public License as published*
# by the Free Software Foundation, either version 3 of the License, *
# or(at your option) any later version. *
# *
# This program is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# GNU General Public License for more details. *
# *
# You should have received a copy of the GNU General Public License *
# along with this program. If not, see http://www.gnu.org/licenses/ *
#*********************************************************************
# IMPORT MODULES
import Intensity
import CoheDegree
import MarPowSpec
import numpy as np
import matplotlib.pyplot as plt
#---------------------------------------------------------------------------
# SET PARAMETERS
# COORDINATES AXIS: xi_A(AP APERTURE PLANE), r_A(OP OBSERVATION PLANE)
wavelength = 0.632 # Optical field wavelength um.
z = 50 # AP-OP distance um.
sources = 20 # Point sources amount in xi_A.
pixel_number = 1024 # sampling pixel amount in r_A.
alpha = np.sqrt(0.1) # consider 99% of the free space diffraction envelope
AP_width = 10 # AP Aperture plane width um.
OP_width = np.sqrt(np.power((1+np.sqrt(1+8*alpha))/(4*alpha),2)-1)*z + AP_width
xi_A = np.linspace(-0.5 * AP_width, 0.5 * AP_width, 2*sources - 1) # AP Axis
r_A = np.linspace(-0.5 * OP_width, 0.5 * OP_width, pixel_number) # OP Axis
# INTENSITY
so = 1 # Maximum intensity.
#width = 10 # Slide width.
#so_1 = 100 # Maximum intensity left slide.
#so_2 = 100 # Maximum intensity right slide.
#width_1 = 40 # Left slide width.
#width_2 = 40 # Right slide width.
#slide_space = 60 # Space between slides.
#a = 100 # Amplitude.
#w = 4 # Spatial frequency.
# SET COHERENCE DEGREE PARAMETERS
sigma = 1000000 # Gaussian standard deviation.
#gamma = 1000 # Lorentzian standard deviation.
##FBeta = 5
#wavelength = wide/((sources-1)*FBeta);
#---------------------------------------------------------------------------
# SET INTENSITY
intensity = lambda x: Intensity.slide(x, so, AP_width)
# SET COHERENCE
cohe_degree = lambda x: CoheDegree.gauss_cohe(x, sigma)
# MARGINAL POWER SPECTRUM
MPSpectrum = lambda xi_A, r_A, z: MarPowSpec.nonparaxial(xi_A, r_A, z,
wavelength, intensity, cohe_degree)
#---------------------------------------------------------------------------
# ANALYSIS OF LIGHT PROPAGATION
# COMPUTE MARGINAL POWER SPECTRUM
MPS_rad, MPS_vir = MPSpectrum(xi_A, r_A, z)
Mar_Pow_Spectrum = MPS_rad + MPS_vir
# COMPUTE POWER SPECTRUMS
PS_rad_OP = MPS_rad.sum(axis=1) # Radiant power spectrum at observation plane
PS_vir_OP = MPS_vir.sum(axis=1) # Virtual power spectrum at observation plane
PS_OP = PS_rad_OP + PS_vir_OP # Power spectrum at observation plane
PS_AP = Mar_Pow_Spectrum.sum(axis=0) # Power spectrum at aperture plane AP
# COMPUTE NORMALIZED POWER SPECTRUMS
N_PS_rad_OP = PS_rad_OP/np.max(PS_rad_OP) # PS_rad_OP Normalized
N_PS_vir_OP = PS_vir_OP/np.max(PS_vir_OP) # PS_vir_OP Normalized
N_PS_OP = PS_OP/np.max(PS_OP) # PS_OP Normalized
N_PS_AP = PS_AP/np.max(PS_AP) # PS_AP Normalized
#---------------------------------------------------------------------------
# SET GRAPHICS
#MARGINAL POWER SPECTRUM, ENTRANCE AND EXIT POWER SPECTRUM
plt.figure(figsize=(16,10), dpi=80)
plt.subplot(1, 2, 1)
plt.imshow(Mar_Pow_Spectrum, cmap='gray', aspect='auto',
extent=[-0.5*AP_width, 0.5*AP_width,
-0.5*OP_width, 0.5*OP_width])
plt.title('Marginal Power Spectrum')
plt.xlabel(r'$ \xi_A \ (\mu m)$')
plt.ylabel(r'$ r_A \ (\mu m)$')
plt.subplot(1, 2, 2)
#plt.plot(xa, s_xaNormal)
plt.fill_between(r_A, 0, N_PS_OP) # 'r', alpha=0.7
plt.plot(r_A, N_PS_rad_OP, 'r--')
plt.title('Power Spectrum - Observation plane')
plt.xlabel(r'$ r_A \ (\mu m) $')
plt.ylabel(r'$ S(r_A) $')
#---------------------------------------------------------------------------
## ANALYSIS OF LIGHT PROPAGATION
#longitude = np.linspace(0.1, 1000, 1000)
#power = []
#for z in longitude:
# # COMPUTE MARGINAL POWER SPECTRUM
# mpeReal, mpeVirtual = MarPowSpec(xia, xa, z)
# #Mar_Pow_Spectrum = mpeReal + mpeVirtual
#
# # POWER SPECTRUM
# s_xaReal = mpeReal.sum(axis=1) # REAL LAYER POWER SPECTRUM
# s_xaRealNormal = s_xaReal/np.amax(s_xaReal);
# #s_xaVirtual = mpeVirtual.sum(axis=1) # VIRTUAL LAYER POWER SPECTRUM
# #s_xa = s_xaReal + s_xaVirtual # EXIT POWER SPECTRUM
# #s_xia = Mar_Pow_Spectrum.sum(axis=0) # ENTRANCE POWER SPECTRUM
# #s_xaNormal=s_xa/np.amax(s_xa);
# power.append(s_xaRealNormal)
#power = np.asanyarray(power)
#power = power>0.01
##power = power[:, 0:1024]
#
#plt.imshow(power.T, cmap='hot', aspect='auto',
# extent=[0, z, 0, 0.5*exit_size])
# #title(sprintf('Potencia espectral a lo largo del eje z'))
# #xlabel('Z [um]')
# #ylabel('\xi_A [um]')
#---------------------------------------------------------------------------
## SET GRAPHICS
##MARGINAL POWER SPECTRUM, ENTRANCE AND EXIT POWER SPECTRUM
#plt.figure(figsize=(16,10), dpi=80)
#plt.subplot(1, 3, 1)
#plt.imshow(Mar_Pow_Spectrum, cmap='gray', aspect='auto',
# extent=[-0.5*aperture_size, 0.5*aperture_size,
# -0.5*exit_size, 0.5*exit_size])
#plt.title('Marginal Power Spectrum')
#plt.xlabel(r'$ \xi_A \ (\mu m)$')
#plt.ylabel(r'$ r_A \ (\mu m)$')
#
#plt.subplot(1, 3, 2)
#plt.plot(xa, s_xaNormal)
##plt.fill_between(xa, 0, s_xaNormal) # 'r', alpha=0.7
#plt.plot(xa, s_xaRealNormal)
#plt.title('Exit Power Spectrum')
#plt.xlabel(r'$ r_A \ (\mu m) $')
#plt.ylabel(r'$ S(r_A) $')
#
#plt.subplot(1, 3, 3)
#plt.plot(xa, s_xaReal, xa, s_xaVirtual)
#plt.plot(xa, -s_xaReal, 'b')
#plt.title('Entrance Power Spectrum')
#plt.xlabel(r'$ \xi_A \ (\mu m)$')
#plt.ylabel(r'$ S(\xi_A) $')
#plt.show()
#*********************************************************************
| gpl-3.0 |
davidmatten/eva | prepare_data/transform/transform_v0.3.py | 1 | 3438 | # Written by David Matten. 2014-09-10.
# For Eva Ujeneza.
# This script transforms the file (given as a passed argument) to calculate the difference in days between the minimum visit date, for a patient for a biomarker, and the visit date for a given row.
# The input file is a .csv file, with columns "","merge_no","cohort","patient","lab_dmy","lab_id","lab_v","unit_txt","lab_t","rna_l","tb_drug","drug_res".
# The input file must contain this heading row - as pandas uses the headings to look up columns
# The output file is a .csv file, with the same input headings, and the additional heading: "diff
# The output file is written to the same directory as the input file.
import os, sys
import datetime
import pandas as pd
import numpy as np
def main(argv):
'''
# Written by David Matten. 2014-09-10.
# For Eva Ujeneza.
# This script transforms the file (given as a passed argument) to calculate the difference in days between the minimum visit date, for a patient for a biomarker, and the visit date for a given row.
# In the case of a missing visit date, a NA is used.
# The input file is a .csv file, with columns "","merge_no","cohort","patient","lab_dmy","lab_id","lab_v","unit_txt","lab_t","rna_l","tb_drug","drug_res".
# The input file should not contain a heading row.
# The output file is a .csv file, with the same input headings, and the additional heading: "diff
# The output file is written to the same directory as the input file.
'''
# Confirm the file exists.
#{{{
if len(argv) != 2:
sys.exit("Only supply the path to file: labdata_2014-07-10.csv")
fn = argv[1]
if not os.path.exists(fn):
sys.exit("it appears the supplied file path does not exist.")
#}}}
df = pd.io.parsers.read_csv(fn)
grouped = df.groupby(['patient', 'lab_id'])
print "constructing dictionary"
dct = {}
n_groups = len(df.groupby(['patient', 'lab_id'], sort=False))
i,p = 0,0
for name, group in df.groupby(['patient', 'lab_id'], sort=False):
i += 1
p1 = int(i*1.0 / n_groups * 100.0)
if (p1 != p) and (p1%5==0):
p = p1
print str(p) + "% done"
m = group.lab_dmy.dropna().min()
n = name[0] + "_" + name[1]
dct[n] = m
print "finished constructing dictionary"
out_fn = os.path.split(fn)[0] +"/"+ os.path.split(fn)[1][:-4] + "_OUT_.csv"
fw = open(out_fn, "w")
print "writing to file."
headings = ",merge_no,cohort,patient,lab_dmy,lab_id,lab_v,unit_txt,lab_t,rna_l,tb_drug,drug_res,diff\n"
fw.write(headings)
j,p,p1 = 0,0,0
n = len(df)
for row in df.iterrows():
j += 1
# raw_input(j)
# raw_input(n)
# raw_input(j*1.0)
# raw_input(n*1.0)
# raw_input(j*1.0 / n*1.0)
# raw_input((j*1.0 / n*1.0) * 100.0)
# raw_input(type( ((j*1.0 / n*1.0) * 100.0) ))
p1 = int((j*1.0 / n*1.0) * 100.0)
if (p1 != p) and (p1%5==0):
p = p1
print str(p) + "% done"
diff = np.nan
if not pd.isnull(row[1][4]):
k = row[1][3] + "_" + row[1][5]
diff = (pd.to_datetime(row[1][4]) - pd.to_datetime(dct[k])).days
l = [str(i) for i in row[1]] + [str(diff)]
x = ",".join(l)
x = x.replace("nan","NA")
fw.write(x + "\n")
fw.close()
print "finished writing to file."
if __name__ == "__main__":
main(sys.argv)
print "end"
| gpl-2.0 |
wlamond/scikit-learn | sklearn/decomposition/nmf.py | 4 | 45047 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck
# Mathieu Blondel <[email protected]>
# Tom Dupre la Tour
# License: BSD 3 clause
from __future__ import division, print_function
from math import sqrt
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.extmath import fast_dot, safe_min
from ..utils.validation import check_is_fitted, check_non_negative
from ..exceptions import ConvergenceWarning
from .cdnmf_fast import _update_cdnmf_fast
EPSILON = np.finfo(np.float32).eps
INTEGER_TYPES = (numbers.Integral, np.integer)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _check_init(A, shape, whom):
A = check_array(A)
if np.shape(A) != shape:
raise ValueError('Array with wrong shape passed to %s. Expected %s, '
'but got %s ' % (whom, shape, np.shape(A)))
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError('Array passed to %s is full of zeros.' % whom)
def _beta_divergence(X, W, H, beta, square_root=False):
"""Compute the beta-divergence of X and dot(W, H).
Parameters
----------
X : float or array-like, shape (n_samples, n_features)
W : float or dense array-like, shape (n_samples, n_components)
H : float or dense array-like, shape (n_components, n_features)
beta : float, string in {'frobenius', 'kullback-leibler', 'itakura-saito'}
Parameter of the beta-divergence.
If beta == 2, this is half the Frobenius *squared* norm.
If beta == 1, this is the generalized Kullback-Leibler divergence.
If beta == 0, this is the Itakura-Saito divergence.
Else, this is the general beta-divergence.
square_root : boolean, default False
If True, return np.sqrt(2 * res)
For beta == 2, it corresponds to the Frobenius norm.
Returns
-------
res : float
Beta divergence of X and np.dot(X, H)
"""
beta = _beta_loss_to_float(beta)
# The method can be called with scalars
if not sp.issparse(X):
X = np.atleast_2d(X)
W = np.atleast_2d(W)
H = np.atleast_2d(H)
# Frobenius norm
if beta == 2:
# Avoid the creation of the dense np.dot(W, H) if X is sparse.
if sp.issparse(X):
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
res = (norm_X + norm_WH - 2. * cross_prod) / 2.
else:
res = squared_norm(X - np.dot(W, H)) / 2.
if square_root:
return np.sqrt(res * 2)
else:
return res
if sp.issparse(X):
# compute np.dot(W, H) only where X is nonzero
WH_data = _special_sparse_dot(W, H, X).data
X_data = X.data
else:
WH = fast_dot(W, H)
WH_data = WH.ravel()
X_data = X.ravel()
# do not affect the zeros: here 0 ** (-1) = 0 and not infinity
WH_data = WH_data[X_data != 0]
X_data = X_data[X_data != 0]
# used to avoid division by zero
WH_data[WH_data == 0] = EPSILON
# generalized Kullback-Leibler divergence
if beta == 1:
# fast and memory efficient computation of np.sum(np.dot(W, H))
sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
# computes np.sum(X * log(X / WH)) only where X is nonzero
div = X_data / WH_data
res = np.dot(X_data, np.log(div))
# add full np.sum(np.dot(W, H)) - np.sum(X)
res += sum_WH - X_data.sum()
# Itakura-Saito divergence
elif beta == 0:
div = X_data / WH_data
res = np.sum(div) - np.product(X.shape) - np.sum(np.log(div))
# beta-divergence, beta not in (0, 1, 2)
else:
if sp.issparse(X):
# slow loop, but memory efficient computation of :
# np.sum(np.dot(W, H) ** beta)
sum_WH_beta = 0
for i in range(X.shape[1]):
sum_WH_beta += np.sum(fast_dot(W, H[:, i]) ** beta)
else:
sum_WH_beta = np.sum(WH ** beta)
sum_X_WH = np.dot(X_data, WH_data ** (beta - 1))
res = (X_data ** beta).sum() - beta * sum_X_WH
res += sum_WH_beta * (beta - 1)
res /= beta * (beta - 1)
if square_root:
return np.sqrt(2 * res)
else:
return res
def _special_sparse_dot(W, H, X):
"""Computes np.dot(W, H), only where X is non zero."""
if sp.issparse(X):
ii, jj = X.nonzero()
dot_vals = np.multiply(W[ii, :], H.T[jj, :]).sum(axis=1)
WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape)
return WH.tocsr()
else:
return fast_dot(W, H)
def _compute_regularization(alpha, l1_ratio, regularization):
"""Compute L1 and L2 regularization coefficients for W and H"""
alpha_H = 0.
alpha_W = 0.
if regularization in ('both', 'components'):
alpha_H = float(alpha)
if regularization in ('both', 'transformation'):
alpha_W = float(alpha)
l1_reg_W = alpha_W * l1_ratio
l1_reg_H = alpha_H * l1_ratio
l2_reg_W = alpha_W * (1. - l1_ratio)
l2_reg_H = alpha_H * (1. - l1_ratio)
return l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H
def _check_string_param(solver, regularization, beta_loss, init):
allowed_solver = ('cd', 'mu')
if solver not in allowed_solver:
raise ValueError(
'Invalid solver parameter: got %r instead of one of %r' %
(solver, allowed_solver))
allowed_regularization = ('both', 'components', 'transformation', None)
if regularization not in allowed_regularization:
raise ValueError(
'Invalid regularization parameter: got %r instead of one of %r' %
(regularization, allowed_regularization))
# 'mu' is the only solver that handles other beta losses than 'frobenius'
if solver != 'mu' and beta_loss not in (2, 'frobenius'):
raise ValueError(
'Invalid beta_loss parameter: solver %r does not handle beta_loss'
' = %r' % (solver, beta_loss))
if solver == 'mu' and init == 'nndsvd':
warnings.warn("The multiplicative update ('mu') solver cannot update "
"zeros present in the initialization, and so leads to "
"poorer results when used jointly with init='nndsvd'. "
"You may try init='nndsvda' or init='nndsvdar' instead.",
UserWarning)
beta_loss = _beta_loss_to_float(beta_loss)
return beta_loss
def _beta_loss_to_float(beta_loss):
"""Convert string beta_loss to float"""
allowed_beta_loss = {'frobenius': 2,
'kullback-leibler': 1,
'itakura-saito': 0}
if isinstance(beta_loss, str) and beta_loss in allowed_beta_loss:
beta_loss = allowed_beta_loss[beta_loss]
if not isinstance(beta_loss, numbers.Number):
raise ValueError('Invalid beta_loss parameter: got %r instead '
'of one of %r, or a float.' %
(beta_loss, allowed_beta_loss.keys()))
return beta_loss
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : integer
The number of components desired in the approximation.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise 'random'.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
eps : float
Truncate all values less then this in output to zero.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``random`` == 'nndsvdar' or 'random'.
Returns
-------
W : array-like, shape (n_samples, n_components)
Initial guesses for solving X ~= WH
H : array-like, shape (n_components, n_features)
Initial guesses for solving X ~= WH
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if init is None:
if n_components < n_features:
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features)
W = avg * rng.randn(n_samples, n_components)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle,
random_state):
"""Helper function for _fit_coordinate_descent
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...)
"""
n_components = Ht.shape[1]
HHt = fast_dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L2 regularization corresponds to increase of the diagonal of HHt
if l2_reg != 0.:
# adds l2_reg only on the diagonal
HHt.flat[::n_components + 1] += l2_reg
# L1 regularization corresponds to decrease of each element of XHt
if l1_reg != 0.:
XHt -= l1_reg
if shuffle:
permutation = random_state.permutation(n_components)
else:
permutation = np.arange(n_components)
# The following seems to be required on 64-bit Windows w/ Python 3.5.
permutation = np.asarray(permutation, dtype=np.intp)
return _update_cdnmf_fast(W, HHt, XHt, permutation)
def _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, l1_reg_W=0,
l1_reg_H=0, l2_reg_W=0, l2_reg_H=0, update_H=True,
verbose=0, shuffle=False, random_state=None):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
l1_reg_W : double, default: 0.
L1 regularization parameter for W.
l1_reg_H : double, default: 0.
L1 regularization parameter for H.
l2_reg_W : double, default: 0.
L2 regularization parameter for W.
l2_reg_H : double, default: 0.
L2 regularization parameter for H.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order='C')
X = check_array(X, accept_sparse='csr')
rng = check_random_state(random_state)
for n_iter in range(max_iter):
violation = 0.
# Update W
violation += _update_coordinate_descent(X, W, Ht, l1_reg_W,
l2_reg_W, shuffle, rng)
# Update H
if update_H:
violation += _update_coordinate_descent(X.T, Ht, W, l1_reg_H,
l2_reg_H, shuffle, rng)
if n_iter == 0:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def _multiplicative_update_w(X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma,
H_sum=None, HHt=None, XHt=None, update_H=True):
"""update W in Multiplicative Update NMF"""
if beta_loss == 2:
# Numerator
if XHt is None:
XHt = safe_sparse_dot(X, H.T)
if update_H:
# avoid a copy of XHt, which will be re-computed (update_H=True)
numerator = XHt
else:
# preserve the XHt, which is not re-computed (update_H=False)
numerator = XHt.copy()
# Denominator
if HHt is None:
HHt = fast_dot(H, H.T)
denominator = fast_dot(W, HHt)
else:
# Numerator
# if X is sparse, compute WH only where X is non zero
WH_safe_X = _special_sparse_dot(W, H, X)
if sp.issparse(X):
WH_safe_X_data = WH_safe_X.data
X_data = X.data
else:
WH_safe_X_data = WH_safe_X
X_data = X
# copy used in the Denominator
WH = WH_safe_X.copy()
if beta_loss - 1. < 0:
WH[WH == 0] = EPSILON
# to avoid taking a negative power of zero
if beta_loss - 2. < 0:
WH_safe_X_data[WH_safe_X_data == 0] = EPSILON
if beta_loss == 1:
np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
else:
WH_safe_X_data **= beta_loss - 2
# element-wise multiplication
WH_safe_X_data *= X_data
# here numerator = dot(X * (dot(W, H) ** (beta_loss - 2)), H.T)
numerator = safe_sparse_dot(WH_safe_X, H.T)
# Denominator
if beta_loss == 1:
if H_sum is None:
H_sum = np.sum(H, axis=1) # shape(n_components, )
denominator = H_sum[np.newaxis, :]
else:
# computation of WHHt = dot(dot(W, H) ** beta_loss - 1, H.T)
if sp.issparse(X):
# memory efficient computation
# (compute row by row, avoiding the dense matrix WH)
WHHt = np.empty(W.shape)
for i in range(X.shape[0]):
WHi = fast_dot(W[i, :], H)
if beta_loss - 1 < 0:
WHi[WHi == 0] = EPSILON
WHi **= beta_loss - 1
WHHt[i, :] = fast_dot(WHi, H.T)
else:
WH **= beta_loss - 1
WHHt = fast_dot(WH, H.T)
denominator = WHHt
# Add L1 and L2 regularization
if l1_reg_W > 0:
denominator += l1_reg_W
if l2_reg_W > 0:
denominator = denominator + l2_reg_W * W
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta_W = numerator
# gamma is in ]0, 1]
if gamma != 1:
delta_W **= gamma
return delta_W, H_sum, HHt, XHt
def _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma):
"""update H in Multiplicative Update NMF"""
if beta_loss == 2:
numerator = safe_sparse_dot(W.T, X)
denominator = fast_dot(fast_dot(W.T, W), H)
else:
# Numerator
WH_safe_X = _special_sparse_dot(W, H, X)
if sp.issparse(X):
WH_safe_X_data = WH_safe_X.data
X_data = X.data
else:
WH_safe_X_data = WH_safe_X
X_data = X
# copy used in the Denominator
WH = WH_safe_X.copy()
if beta_loss - 1. < 0:
WH[WH == 0] = EPSILON
# to avoid division by zero
if beta_loss - 2. < 0:
WH_safe_X_data[WH_safe_X_data == 0] = EPSILON
if beta_loss == 1:
np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
else:
WH_safe_X_data **= beta_loss - 2
# element-wise multiplication
WH_safe_X_data *= X_data
# here numerator = dot(W.T, (dot(W, H) ** (beta_loss - 2)) * X)
numerator = safe_sparse_dot(W.T, WH_safe_X)
# Denominator
if beta_loss == 1:
W_sum = np.sum(W, axis=0) # shape(n_components, )
W_sum[W_sum == 0] = 1.
denominator = W_sum[:, np.newaxis]
# beta_loss not in (1, 2)
else:
# computation of WtWH = dot(W.T, dot(W, H) ** beta_loss - 1)
if sp.issparse(X):
# memory efficient computation
# (compute column by column, avoiding the dense matrix WH)
WtWH = np.empty(H.shape)
for i in range(X.shape[1]):
WHi = fast_dot(W, H[:, i])
if beta_loss - 1 < 0:
WHi[WHi == 0] = EPSILON
WHi **= beta_loss - 1
WtWH[:, i] = fast_dot(W.T, WHi)
else:
WH **= beta_loss - 1
WtWH = fast_dot(W.T, WH)
denominator = WtWH
# Add L1 and L2 regularization
if l1_reg_H > 0:
denominator += l1_reg_H
if l2_reg_H > 0:
denominator = denominator + l2_reg_H * H
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta_H = numerator
# gamma is in ]0, 1]
if gamma != 1:
delta_H **= gamma
return delta_H
def _fit_multiplicative_update(X, W, H, beta_loss='frobenius',
max_iter=200, tol=1e-4,
l1_reg_W=0, l1_reg_H=0, l2_reg_W=0, l2_reg_H=0,
update_H=True, verbose=0):
"""Compute Non-negative Matrix Factorization with Multiplicative Update
The objective function is _beta_divergence(X, WH) and is minimized with an
alternating minimization of W and H. Each minimization is done with a
Multiplicative Update.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant input matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
beta_loss : float or string, default 'frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros.
max_iter : integer, default: 200
Number of iterations.
tol : float, default: 1e-4
Tolerance of the stopping condition.
l1_reg_W : double, default: 0.
L1 regularization parameter for W.
l1_reg_H : double, default: 0.
L1 regularization parameter for H.
l2_reg_W : double, default: 0.
L2 regularization parameter for W.
l2_reg_H : double, default: 0.
L2 regularization parameter for H.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
Returns
-------
W : array, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
"""
start_time = time.time()
beta_loss = _beta_loss_to_float(beta_loss)
# gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011]
if beta_loss < 1:
gamma = 1. / (2. - beta_loss)
elif beta_loss > 2:
gamma = 1. / (beta_loss - 1.)
else:
gamma = 1.
# used for the convergence criterion
error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True)
previous_error = error_at_init
H_sum, HHt, XHt = None, None, None
for n_iter in range(1, max_iter + 1):
# update W
# H_sum, HHt and XHt are saved and reused if not update_H
delta_W, H_sum, HHt, XHt = _multiplicative_update_w(
X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma,
H_sum, HHt, XHt, update_H)
W *= delta_W
# necessary for stability with beta_loss < 1
if beta_loss < 1:
W[W < np.finfo(np.float64).eps] = 0.
# update H
if update_H:
delta_H = _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H,
l2_reg_H, gamma)
H *= delta_H
# These values will be recomputed since H changed
H_sum, HHt, XHt = None, None, None
# necessary for stability with beta_loss < 1
if beta_loss <= 1:
H[H < np.finfo(np.float64).eps] = 0.
# test convergence criterion every 10 iterations
if tol > 0 and n_iter % 10 == 0:
error = _beta_divergence(X, W, H, beta_loss, square_root=True)
if verbose:
iter_time = time.time()
print("Epoch %02d reached after %.3f seconds, error: %f" %
(n_iter, iter_time - start_time, error))
if (previous_error - error) / error_at_init < tol:
break
previous_error = error
# do not print if we have already printed in the convergence test
if verbose and (tol == 0 or n_iter % 10 != 0):
end_time = time.time()
print("Epoch %02d reached after %.3f seconds." %
(n_iter, end_time - start_time))
return W, H, n_iter
def non_negative_factorization(X, W=None, H=None, n_components=None,
init='random', update_H=True, solver='cd',
beta_loss='frobenius', tol=1e-4,
max_iter=200, alpha=0., l1_ratio=0.,
regularization=None, random_state=None,
verbose=0, shuffle=False):
"""Compute Non-negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
For multiplicative-update ('mu') solver, the Frobenius norm
(0.5 * ||X - WH||_Fro^2) can be changed into another beta-divergence loss,
by changing the beta_loss parameter.
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
If update_H=False, it is used as a constant, to solve for W only.
n_components : integer
Number of components, if n_components is not set all features
are kept.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : 'cd' | 'mu'
Numerical solver to use:
'cd' is a Coordinate Descent solver.
'mu' is a Multiplicative Update solver.
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionadded:: 0.19
Multiplicative Update solver.
beta_loss : float or string, default 'frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros. Used only in 'mu' solver.
.. versionadded:: 0.19
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import non_negative_factorization
>>> W, H, n_iter = non_negative_factorization(X, n_components=2, \
init='random', random_state=0)
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
"""
X = check_array(X, accept_sparse=('csr', 'csc'), dtype=float)
check_non_negative(X, "NMF (input X)")
beta_loss = _check_string_param(solver, regularization, beta_loss, init)
if safe_min(X) == 0 and beta_loss <= 0:
raise ValueError("When beta_loss <= 0 and X contains zeros, "
"the solver may diverge. Please add small values to "
"X, or use a positive beta_loss.")
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
if not isinstance(n_components, INTEGER_TYPES) or n_components <= 0:
raise ValueError("Number of components must be a positive integer;"
" got (n_components=%r)" % n_components)
if not isinstance(max_iter, INTEGER_TYPES) or max_iter < 0:
raise ValueError("Maximum number of iterations must be a positive "
"integer; got (max_iter=%r)" % max_iter)
if not isinstance(tol, numbers.Number) or tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % tol)
# check W and H, or initialize them
if init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
# 'mu' solver should not be initialized by zeros
if solver == 'mu':
avg = np.sqrt(X.mean() / n_components)
W = avg * np.ones((n_samples, n_components))
else:
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=init,
random_state=random_state)
l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = _compute_regularization(
alpha, l1_ratio, regularization)
if solver == 'cd':
W, H, n_iter = _fit_coordinate_descent(X, W, H, tol, max_iter,
l1_reg_W, l1_reg_H,
l2_reg_W, l2_reg_H,
update_H=update_H,
verbose=verbose,
shuffle=shuffle,
random_state=random_state)
elif solver == 'mu':
W, H, n_iter = _fit_multiplicative_update(X, W, H, beta_loss, max_iter,
tol, l1_reg_W, l1_reg_H,
l2_reg_W, l2_reg_H, update_H,
verbose)
else:
raise ValueError("Invalid solver parameter '%s'." % solver)
if n_iter == max_iter and tol > 0:
warnings.warn("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter, ConvergenceWarning)
return W, H, n_iter
class NMF(BaseEstimator, TransformerMixin):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
For multiplicative-update ('mu') solver, the Frobenius norm
(0.5 * ||X - WH||_Fro^2) can be changed into another beta-divergence loss,
by changing the beta_loss parameter.
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
solver : 'cd' | 'mu'
Numerical solver to use:
'cd' is a Coordinate Descent solver.
'mu' is a Multiplicative Update solver.
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionadded:: 0.19
Multiplicative Update solver.
beta_loss : float or string, default 'frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros. Used only in 'mu' solver.
.. versionadded:: 0.19
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent
solver.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
Attributes
----------
components_ : array, [n_components, n_features]
Factorization matrix, sometimes called 'dictionary'.
reconstruction_err_ : number
Frobenius norm of the matrix difference, or beta-divergence, between
the training data ``X`` and the reconstructed data ``WH`` from
the fitted model.
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> W = model.fit_transform(X)
>>> H = model.components_
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
"""
def __init__(self, n_components=None, init=None, solver='cd',
beta_loss='frobenius', tol=1e-4, max_iter=200,
random_state=None, alpha=0., l1_ratio=0., verbose=0,
shuffle=False):
self.n_components = n_components
self.init = init
self.solver = solver
self.beta_loss = beta_loss
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
Returns
-------
W : array, shape (n_samples, n_components)
Transformed data.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), dtype=float)
W, H, n_iter_ = non_negative_factorization(
X=X, W=W, H=H, n_components=self.n_components, init=self.init,
update_H=True, solver=self.solver, beta_loss=self.beta_loss,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle)
self.reconstruction_err_ = _beta_divergence(X, W, H, self.beta_loss,
square_root=True)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Returns
-------
W : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'n_components_')
W, _, n_iter_ = non_negative_factorization(
X=X, W=None, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False, solver=self.solver,
beta_loss=self.beta_loss, tol=self.tol, max_iter=self.max_iter,
alpha=self.alpha, l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle)
return W
def inverse_transform(self, W):
"""Transform data back to its original space.
Parameters
----------
W : {array-like, sparse matrix}, shape (n_samples, n_components)
Transformed data matrix
Returns
-------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix of original shape
.. versionadded:: 0.18
"""
check_is_fitted(self, 'n_components_')
return np.dot(W, self.components_)
| bsd-3-clause |
tacaswell/bokeh | bokeh/charts/builder/tests/test_dot_builder.py | 4 | 3924 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Dot
from bokeh.util.testing import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestDot(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python']=[2, 5]
xyvalues['pypy']=[12, 40]
xyvalues['jython']=[22, 30]
xyvaluesdf = pd.DataFrame(xyvalues, index=['lists', 'loops'])
cat = ['lists', 'loops']
catjython = ['lists:0.75', 'loops:0.75']
catpypy = ['lists:0.5', 'loops:0.5']
catpython = ['lists:0.25', 'loops:0.25']
python = seg_top_python = [2, 5]
pypy = seg_top_pypy = [12, 40]
jython = seg_top_jython = [22, 30]
zero = [0, 0]
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Dot, _xy, cat=cat)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['cat'], cat)
assert_array_equal(builder._data['catjython'], catjython)
assert_array_equal(builder._data['catpython'], catpython)
assert_array_equal(builder._data['catpypy'], catpypy)
assert_array_equal(builder._data['python'], python)
assert_array_equal(builder._data['jython'], jython)
assert_array_equal(builder._data['pypy'], pypy)
assert_array_equal(builder._data['seg_top_python'], seg_top_python)
assert_array_equal(builder._data['seg_top_jython'], seg_top_jython)
assert_array_equal(builder._data['seg_top_pypy'], seg_top_pypy)
assert_array_equal(builder._data['z_python'], zero)
assert_array_equal(builder._data['z_pypy'], zero)
assert_array_equal(builder._data['z_jython'], zero)
assert_array_equal(builder._data['zero'], zero)
lvalues = [[2, 5], [12, 40], [22, 30]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Dot, _xy, cat=cat)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['cat'], cat)
assert_array_equal(builder._data['cat0'], catpython)
assert_array_equal(builder._data['cat1'], catpypy)
assert_array_equal(builder._data['cat2'], catjython)
assert_array_equal(builder._data['0'], python)
assert_array_equal(builder._data['1'], pypy)
assert_array_equal(builder._data['2'], jython)
assert_array_equal(builder._data['seg_top_0'], seg_top_python)
assert_array_equal(builder._data['seg_top_1'], seg_top_pypy)
assert_array_equal(builder._data['seg_top_2'], seg_top_jython)
assert_array_equal(builder._data['z_0'], zero)
assert_array_equal(builder._data['z_1'], zero)
assert_array_equal(builder._data['z_2'], zero)
assert_array_equal(builder._data['zero'], zero)
| bsd-3-clause |
guaix-ucm/pyemir | emirdrp/processing/wavecal/refine_rectwv_coeff.py | 3 | 13528 | #
# Copyright 2008-2018 Universidad Complutense de Madrid
#
# This file is part of PyEmir
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
from __future__ import division
from __future__ import print_function
from copy import deepcopy
import logging
import numpy as np
from numina.array.display.pause_debugplot import pause_debugplot
from numina.array.display.ximplotxy import ximplotxy
from numina.array.display.matplotlib_qt import plt
from numina.array.display.matplotlib_qt import set_window_geometry
from numina.array.stats import summary
from numina.array.wavecalib.check_wlcalib import check_wlcalib_sp
from numina.array.wavecalib.crosscorrelation import convolve_comb_lines
from numina.array.wavecalib.crosscorrelation import periodic_corr1d
from numina.array.wavecalib.fix_pix_borders import find_pix_borders
from numina.frame.utils import copy_img
import emirdrp.datamodel as datamodel
from emirdrp.instrument.csu_configuration import CsuConfiguration
from emirdrp.processing.wavecal.median_slitlets_rectified \
import median_slitlets_rectified
from emirdrp.processing.wavecal.set_wv_parameters import set_wv_parameters
from emirdrp.core import EMIR_NAXIS1
from emirdrp.core import EMIR_NBARS
def refine_rectwv_coeff(input_image, rectwv_coeff,
catlines_all_wave,
catlines_all_flux,
refine_wavecalib_mode,
list_useful_slitlets,
save_intermediate_results=False,
debugplot=0):
"""Refine RectWaveCoeff object using a catalogue of lines
One and only one among refine_with_oh_lines_mode and
refine_with_arc_lines must be different from zero.
Parameters
----------
input_image : HDUList object
Input 2D image.
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for the
particular CSU configuration.
catlines_all_wave : numpy array
Array with wavelengths
catlines_all_flux : numpy array
Array with fluxes.
refine_wavecalib_mode : int
Integer, indicating the type of refinement:
0 : no refinement
1 : apply the same global offset to all the slitlets (using ARC lines)
2 : apply individual offset to each slitlet (using ARC lines)
11 : apply the same global offset to all the slitlets (using OH lines)
12 : apply individual offset to each slitlet (using OH lines)
list_useful_slitlets : list of integers
List of useful slitlets.
save_intermediate_results : bool
If True, save plots in PDF files
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
refined_rectwv_coeff : RectWaveCoeff instance
Refined rectification and wavelength calibration coefficients
for the particular CSU configuration.
expected_cat_image : HDUList object
Output 2D image (rectified and wavelength calibrated) with
the expected catalogue lines.
"""
logger = logging.getLogger(__name__)
if save_intermediate_results:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages('crosscorrelation.pdf')
else:
pdf = None
# image header
main_header = input_image[0].header
mecs_header = datamodel.get_mecs_header(input_image)
filter_name = main_header['filter']
grism_name = main_header['grism']
# initialize output
refined_rectwv_coeff = deepcopy(rectwv_coeff)
logger.info('Computing median spectrum')
# compute median spectrum and normalize it
sp_median = median_slitlets_rectified(
input_image,
mode=2,
list_useful_slitlets=list_useful_slitlets
)[0].data
sp_median /= sp_median.max()
# determine minimum and maximum useful wavelength
jmin, jmax = find_pix_borders(sp_median, 0)
naxis1 = main_header['naxis1']
naxis2 = main_header['naxis2']
crpix1 = main_header['crpix1']
crval1 = main_header['crval1']
cdelt1 = main_header['cdelt1']
xwave = crval1 + (np.arange(naxis1) + 1.0 - crpix1) * cdelt1
if grism_name == 'LR':
wv_parameters = set_wv_parameters(filter_name, grism_name)
wave_min = wv_parameters['wvmin_useful']
wave_max = wv_parameters['wvmax_useful']
else:
wave_min = crval1 + (jmin + 1 - crpix1) * cdelt1
wave_max = crval1 + (jmax + 1 - crpix1) * cdelt1
logger.info('Setting wave_min to {}'.format(wave_min))
logger.info('Setting wave_max to {}'.format(wave_max))
# extract subset of catalogue lines within current wavelength range
lok1 = catlines_all_wave >= wave_min
lok2 = catlines_all_wave <= wave_max
catlines_reference_wave = catlines_all_wave[lok1*lok2]
catlines_reference_flux = catlines_all_flux[lok1*lok2]
catlines_reference_flux /= catlines_reference_flux.max()
# estimate sigma to broaden catalogue lines
csu_config = CsuConfiguration.define_from_header(mecs_header)
# segregate slitlets
list_not_useful_slitlets = [i for i in list(range(1, EMIR_NBARS + 1))
if i not in list_useful_slitlets]
logger.info('list of useful slitlets: {}'.format(
list_useful_slitlets))
logger.info('list of unusable slitlets: {}'.format(
list_not_useful_slitlets))
tempwidths = np.array([csu_config.csu_bar_slit_width(islitlet)
for islitlet in list_useful_slitlets])
widths_summary = summary(tempwidths)
logger.info('Statistics of useful slitlet widths (mm):')
logger.info('- npoints....: {0:d}'.format(widths_summary['npoints']))
logger.info('- mean.......: {0:7.3f}'.format(widths_summary['mean']))
logger.info('- median.....: {0:7.3f}'.format(widths_summary['median']))
logger.info('- std........: {0:7.3f}'.format(widths_summary['std']))
logger.info('- robust_std.: {0:7.3f}'.format(widths_summary['robust_std']))
# empirical transformation of slit width (mm) to pixels
sigma_broadening = cdelt1 * widths_summary['median']
# convolve location of catalogue lines to generate expected spectrum
xwave_reference, sp_reference = convolve_comb_lines(
catlines_reference_wave, catlines_reference_flux, sigma_broadening,
crpix1, crval1, cdelt1, naxis1
)
sp_reference /= sp_reference.max()
# generate image2d with expected lines
if save_intermediate_results:
image2d_expected_lines = np.tile(sp_reference, (naxis2, 1))
expected_cat_image = copy_img(input_image)
expected_cat_image[0].data = image2d_expected_lines
else:
expected_cat_image = None
if (abs(debugplot) % 10 != 0) or (pdf is not None):
ax = ximplotxy(xwave, sp_median, 'C1-',
xlabel='Wavelength (Angstroms, in vacuum)',
ylabel='Normalized number of counts',
title='Median spectrum',
label='observed spectrum', show=False)
# overplot reference catalogue lines
ax.stem(catlines_reference_wave, catlines_reference_flux, 'C4-',
markerfmt=' ', basefmt='C4-', label='tabulated lines')
# overplot convolved reference lines
ax.plot(xwave_reference, sp_reference, 'C0-',
label='expected spectrum')
ax.legend()
if pdf is not None:
pdf.savefig()
else:
pause_debugplot(debugplot=debugplot, pltshow=True)
# compute baseline signal in sp_median
baseline = np.percentile(sp_median[sp_median > 0], q=10)
if (abs(debugplot) % 10 != 0) or (pdf is not None):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(sp_median, bins=1000, log=True)
ax.set_xlabel('Normalized number of counts')
ax.set_ylabel('Number of pixels')
ax.set_title('Median spectrum')
ax.axvline(float(baseline), linestyle='--', color='grey')
if pdf is not None:
pdf.savefig()
else:
geometry = (0, 0, 640, 480)
set_window_geometry(geometry)
plt.show()
# subtract baseline to sp_median (only pixels with signal above zero)
lok = np.where(sp_median > 0)
sp_median[lok] -= baseline
# compute global offset through periodic correlation
logger.info('Computing global offset')
global_offset, fpeak = periodic_corr1d(
sp_reference=sp_reference,
sp_offset=sp_median,
fminmax=None,
naround_zero=50,
plottitle='Median spectrum (cross-correlation)',
pdf=pdf,
debugplot=debugplot
)
logger.info('Global offset: {} pixels'.format(-global_offset))
missing_slitlets = rectwv_coeff.missing_slitlets
if refine_wavecalib_mode in [1, 11]:
# apply computed offset to obtain refined_rectwv_coeff_global
for islitlet in range(1, EMIR_NBARS + 1):
if islitlet not in missing_slitlets:
i = islitlet - 1
dumdict = refined_rectwv_coeff.contents[i]
dumdict['wpoly_coeff'][0] -= global_offset*cdelt1
elif refine_wavecalib_mode in [2, 12]:
# compute individual offset for each slitlet
logger.info('Computing individual offsets')
median_55sp = median_slitlets_rectified(input_image, mode=1)
offset_array = np.zeros(EMIR_NBARS)
xplot = []
yplot = []
xplot_skipped = []
yplot_skipped = []
cout = '0'
for islitlet in range(1, EMIR_NBARS + 1):
if islitlet in list_useful_slitlets:
i = islitlet - 1
sp_median = median_55sp[0].data[i, :]
lok = np.where(sp_median > 0)
if np.any(lok):
baseline = np.percentile(sp_median[lok], q=10)
sp_median[lok] -= baseline
sp_median /= sp_median.max()
offset_array[i], fpeak = periodic_corr1d(
sp_reference=sp_reference,
sp_offset=median_55sp[0].data[i, :],
fminmax=None,
naround_zero=50,
plottitle='slitlet #{0} (cross-correlation)'.format(
islitlet),
pdf=pdf,
debugplot=debugplot
)
else:
offset_array[i] = 0.0
dumdict = refined_rectwv_coeff.contents[i]
dumdict['wpoly_coeff'][0] -= offset_array[i]*cdelt1
xplot.append(islitlet)
yplot.append(-offset_array[i])
# second correction
wpoly_coeff_refined = check_wlcalib_sp(
sp=median_55sp[0].data[i, :],
crpix1=crpix1,
crval1=crval1-offset_array[i]*cdelt1,
cdelt1=cdelt1,
wv_master=catlines_reference_wave,
coeff_ini=dumdict['wpoly_coeff'],
naxis1_ini=EMIR_NAXIS1,
title='slitlet #{0} (after applying offset)'.format(
islitlet),
ylogscale=False,
pdf=pdf,
debugplot=debugplot
)
dumdict['wpoly_coeff'] = wpoly_coeff_refined
cout += '.'
else:
xplot_skipped.append(islitlet)
yplot_skipped.append(0)
cout += 'i'
if islitlet % 10 == 0:
if cout != 'i':
cout = str(islitlet // 10)
logger.info(cout)
# show offsets with opposite sign
stat_summary = summary(np.array(yplot))
logger.info('Statistics of individual slitlet offsets (pixels):')
logger.info('- npoints....: {0:d}'.format(stat_summary['npoints']))
logger.info('- mean.......: {0:7.3f}'.format(stat_summary['mean']))
logger.info('- median.....: {0:7.3f}'.format(stat_summary['median']))
logger.info('- std........: {0:7.3f}'.format(stat_summary['std']))
logger.info('- robust_std.: {0:7.3f}'.format(stat_summary[
'robust_std']))
if (abs(debugplot) % 10 != 0) or (pdf is not None):
ax = ximplotxy(xplot, yplot,
linestyle='', marker='o', color='C0',
xlabel='slitlet number',
ylabel='-offset (pixels) = offset to be applied',
title='cross-correlation result',
show=False, **{'label': 'individual slitlets'})
if len(xplot_skipped) > 0:
ax.plot(xplot_skipped, yplot_skipped, 'mx')
ax.axhline(-global_offset, linestyle='--', color='C1',
label='global offset')
ax.legend()
if pdf is not None:
pdf.savefig()
else:
pause_debugplot(debugplot=debugplot, pltshow=True)
else:
raise ValueError('Unexpected mode={}'.format(refine_wavecalib_mode))
# close output PDF file
if pdf is not None:
pdf.close()
# return result
return refined_rectwv_coeff, expected_cat_image
| gpl-3.0 |
DSLituiev/scikit-learn | examples/model_selection/plot_validation_curve.py | 141 | 1931 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
Eric89GXL/mne-python | examples/visualization/plot_topo_customized.py | 15 | 1940 | """
========================================
Plot custom topographies for MEG sensors
========================================
This example exposes the :func:`~mne.viz.iter_topography` function that makes
it very easy to generate custom sensor topography plots.
Here we will plot the power spectrum of each channel on a topographic
layout.
"""
# Author: Denis A. Engemann <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.viz import iter_topography
from mne import io
from mne.time_frequency import psd_welch
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, fir_design='firwin')
picks = mne.pick_types(raw.info, meg=True, exclude=[])
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 2, 20 # look at frequencies between 2 and 20Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
psds, freqs = psd_welch(raw, picks=picks, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax)
psds = 20 * np.log10(psds) # scale to dB
def my_callback(ax, ch_idx):
"""
This block of code is executed once you click on one of the channel axes
in the plot. To work with the viz internals, this function should only take
two parameters, the axis and the channel or data index.
"""
ax.plot(freqs, psds[ch_idx], color='red')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Power (dB)')
for ax, idx in iter_topography(raw.info,
fig_facecolor='white',
axis_facecolor='white',
axis_spinecolor='white',
on_pick=my_callback):
ax.plot(psds[idx], color='red')
plt.gcf().suptitle('Power spectral densities')
plt.show()
| bsd-3-clause |
woodscn/scipy | scipy/signal/filter_design.py | 6 | 122824 | """Filter design.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy
from numpy import (atleast_1d, poly, polyval, roots, real, asarray, allclose,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array)
from numpy import mintypecode
import numpy as np
from scipy import special, optimize
from scipy.special import comb
from scipy.misc import factorial
from numpy.polynomial.polynomial import polyval as npp_polyval
import math
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
'sosfreqz']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system. The coefficients are
ordered from highest to lowest degree.
N : int
The length of the array to be computed.
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denominator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=False, plot=None):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denominator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + .... + b[M]e
H(e ) = ---- = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + .... + a[N]e
Parameters
----------
b : array_like
numerator of a linear filter
a : array_like
denominator of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
sosfreqz
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
def sosfreqz(sos, worN=None, whole=False):
"""
Compute the frequency response of a digital filter in SOS format.
Given `sos`, an array with shape (n, 6) of second order sections of
a digital filter, compute the frequency response of the system function::
B0(z) B1(z) B{n-1}(z)
H(z) = ----- * ----- * ... * ---------
A0(z) A1(z) A{n-1}(z)
for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and
denominator of the transfer function of the k-th second order section.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz, sosfilt
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
Design a 15th-order bandpass filter in SOS format.
>>> from scipy import signal
>>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='sos')
Compute the frequency response at 1500 points from DC to Nyquist.
>>> w, h = signal.sosfreqz(sos, worN=1500)
Plot the response.
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.ylim(-75, 5)
>>> plt.grid(True)
>>> plt.yticks([0, -20, -40, -60])
>>> plt.ylabel('Gain [dB]')
>>> plt.title('Frequency Response')
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.grid(True)
>>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
... ['$-\pi$', '$-\pi/2$', '0', '$\pi/2$', '$\pi$'])
>>> plt.ylabel('Phase [rad]')
>>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
>>> plt.show()
If the same filter is implemented as a single transfer function,
numerical error corrupts the frequency response:
>>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='ba')
>>> w, h = signal.freqz(b, a, worN=1500)
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
if n_sections == 0:
raise ValueError('Cannot compute frequencies with no sections')
h = 1.
for row in sos:
w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole)
h *= rowh
return w, h
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. (The output coefficents are not correct for analog filters.)
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def _align_nums(nums):
"""Aligns the shapes of multiple numerators.
Given an array of numerator coefficient arrays [[a_1, a_2,...,
a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator
arrays with zero's so that all numerators have the same length. Such
alignment is necessary for functions like 'tf2ss', which needs the
alignment when dealing with SIMO transfer functions.
Parameters
----------
nums: array_like
Numerator or list of numerators. Not necessarily with same length.
Returns
-------
nums: array
The numerator. If `nums` input was a list of numerators then a 2d
array with padded zeros for shorter numerators is returned. Otherwise
returns ``np.asarray(nums)``.
"""
try:
# The statement can throw a ValueError if one
# of the numerators is a single digit and another
# is array-like e.g. if nums = [5, [1, 2, 3]]
nums = asarray(nums)
if not np.issubdtype(nums.dtype, np.number):
raise ValueError("dtype of numerator is non-numeric")
return nums
except ValueError:
nums = [np.atleast_1d(num) for num in nums]
max_width = max(num.size for num in nums)
# pre-allocate
aligned_nums = np.zeros((len(nums), max_width))
# Create numerators with padded zeros
for index, num in enumerate(nums):
aligned_nums[index, -num.size:] = num
return aligned_nums
def normalize(b, a):
"""Normalize numerator/denominator of a continuous-time transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
Parameters
----------
b: array_like
Numerator of the transfer function. Can be a 2d array to normalize
multiple transfer functions.
a: array_like
Denominator of the transfer function. At most 1d.
Returns
-------
num: array
The numerator of the normalized transfer function. At least a 1d
array. A 2d-array if the input `num` is a 2d array.
den: 1d-array
The denominator of the normalized transfer function.
Notes
-----
Coefficients for both the numerator and denominator should be specified in
descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``).
"""
num, den = b, a
den = np.atleast_1d(den)
num = np.atleast_2d(_align_nums(num))
if den.ndim != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if num.ndim > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if np.all(den == 0):
raise ValueError("Denominator must have at least on nonzero element.")
# Trim leading zeros in denominator, leave at least one.
den = np.trim_zeros(den, 'f')
# Normalize transfer function
num, den = num / den[0], den / den[0]
# Count numerator columns that are all zero
leading_zeros = 0
for col in num.T:
if np.allclose(col, 0, atol=1e-14):
leading_zeros += 1
else:
break
# Trim leading zeros of numerator
if leading_zeros > 0:
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
# Make sure at least one column remains
if leading_zeros == num.shape[1]:
leading_zeros -= 1
num = num[:, leading_zeros:]
# Squeeze first dimension if singular
if num.shape[0] == 1:
num = num[0, :]
return num, den
def lp2lp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog one using a bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``.
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
elif typefunc == besselap:
z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn < 0) or numpy.any(Wn > 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 <= Wn <= 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = _zpklp2lp(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = _zpklp2hp(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = _zpkbilinear(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
# TODO: merge these into existing functions or make public versions
def _zpkbilinear(z, p, k, fs):
"""
Return a digital filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def _zpklp2lp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _zpklp2hp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def _zpklp2bp(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
p_lp - sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def _zpklp2bs(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
z_hp - sqrt(z_hp**2 - wo**2)))
p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
p_hp - sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, +1j*wo * ones(degree))
z_bs = append(z_bs, -1j*wo * ones(degree))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba'):
"""
Butterworth digital and analog filter design.
Design an Nth-order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord, buttap
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type I digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord, cheb1ap
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type II digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord, cheb2ap
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth-order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord, ellipap
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase'):
"""
Bessel/Thomson digital and analog filter design.
Design an Nth-order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies (defined
by the `norm` parameter).
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned. (See Notes.)
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
norm : {'phase', 'delay', 'mag'}, optional
Critical frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for
both low-pass and high-pass filters, so this is the
"phase-matched" case.
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1/`Wn` (e.g. seconds). This is the "natural" type obtained by
solving Bessel polynomials.
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency `Wn`.
.. versionadded:: 0.18.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response. [1]_
The Bessel is inherently an analog filter. This function generates digital
Bessel filters using the bilinear transform, which does not preserve the
phase response of the analog filter. As such, it is only approximately
correct at frequencies below about fs/4. To get maximally-flat group
delay at higher frequencies, the analog Bessel filter must be transformed
using phase-preserving techniques.
See `besselap` for implementation details and references.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the phase-normalized frequency response, showing the relationship
to the Butterworth's cutoff frequency (green):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter magnitude response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
and the phase midpoint:
>>> plt.figure()
>>> plt.semilogx(w, np.unwrap(np.angle(h)))
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-np.pi, color='red') # phase midpoint
>>> plt.title('Bessel filter phase response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Phase [radians]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the magnitude-normalized frequency response, showing the -3 dB cutoff:
>>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.axhline(-3, color='red') # -3 dB magnitude
>>> plt.axvline(10, color='green') # cutoff frequency
>>> plt.title('Magnitude-normalized Bessel filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the delay-normalized filter, showing the maximally-flat group delay
at 0.1 seconds:
>>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay')
>>> w, h = signal.freqs(b, a)
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.axhline(0.1, color='red') # 0.1 seconds group delay
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
References
----------
.. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel_'+norm)
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
print("Warning, order is zero...check input parameters.")
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=False):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth-order Butterworth filter.
The filter will have an angular (e.g. rad/s) cutoff frequency of 1.
See Also
--------
butter : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
cheby1 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
See Also
--------
cheby2 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
def _vratio(u, ineps, mp):
[s, c, d, phi] = special.ellipj(u, mp)
ret = abs(ineps - s / c)
return ret
def _kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m, 1 - m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth-order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
ellip : Filter design function using this prototype
References
----------
.. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing",
Chapters 5 and 12.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
elif N == 1:
p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0))
k = -p
z = []
return asarray(z), asarray(p), k
eps = numpy.sqrt(10 ** (0.1 * rp) - 1)
ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1)
ck1p = numpy.sqrt(1 - ck1 * ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk([ck1 * ck1, ck1p * ck1p])
if abs(1 - ck1p * ck1p) < EPSILON:
krat = 0
else:
krat = N * val[0] / val[1]
m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps * eps))
return z, p, k
# TODO: Make this a real public function scipy.misc.ff
def _falling_factorial(x, n):
r"""
Return the factorial of `x` to the `n` falling.
This is defined as:
.. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1)
This can more efficiently calculate ratios of factorials, since:
n!/m! == falling_factorial(n, n-m)
where n >= m
skipping the factors that cancel out
the usual factorial n! == ff(n, n)
"""
val = 1
for k in range(x - n + 1, x + 1):
val *= k
return val
def _bessel_poly(n, reverse=False):
"""
Return the coefficients of Bessel polynomial of degree `n`
If `reverse` is true, a reverse Bessel polynomial is output.
Output is a list of coefficients:
[1] = 1
[1, 1] = 1*s + 1
[1, 3, 3] = 1*s^2 + 3*s + 3
[1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15
[1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105
etc.
Output is a Python list of arbitrary precision long ints, so n is only
limited by your hardware's memory.
Sequence is http://oeis.org/A001498 , and output can be confirmed to
match http://oeis.org/A001498/b001498.txt :
>>> i = 0
>>> for n in range(51):
... for x in _bessel_poly(n, reverse=True):
... print(i, x)
... i += 1
"""
if abs(int(n)) != n:
raise ValueError("Polynomial order must be a nonnegative integer")
else:
n = int(n) # np.int32 doesn't work, for instance
out = []
for k in range(n + 1):
num = _falling_factorial(2*n - k, n)
den = 2**(n - k) * factorial(k, exact=True)
out.append(num // den)
if reverse:
return out[::-1]
else:
return out
def _campos_zeros(n):
"""
Return approximate zero locations of Bessel polynomials y_n(x) for order
`n` using polynomial fit (Campos-Calderon 2011)
"""
if n == 1:
return asarray([-1+0j])
s = npp_polyval(n, [0, 0, 2, 0, -3, 1])
b3 = npp_polyval(n, [16, -8]) / s
b2 = npp_polyval(n, [-24, -12, 12]) / s
b1 = npp_polyval(n, [8, 24, -12, -2]) / s
b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s
r = npp_polyval(n, [0, 0, 2, 1])
a1 = npp_polyval(n, [-6, -6]) / r
a2 = 6 / r
k = np.arange(1, n+1)
x = npp_polyval(k, [0, a1, a2])
y = npp_polyval(k, [b0, b1, b2, b3])
return x + 1j*y
def _aberth(f, fp, x0, tol=1e-15, maxiter=50):
"""
Given a function `f`, its first derivative `fp`, and a set of initial
guesses `x0`, simultaneously find the roots of the polynomial using the
Aberth-Ehrlich method.
``len(x0)`` should equal the number of roots of `f`.
(This is not a complete implementation of Bini's algorithm.)
"""
N = len(x0)
x = array(x0, complex)
beta = np.empty_like(x0)
for iteration in range(maxiter):
alpha = -f(x) / fp(x) # Newton's method
# Model "repulsion" between zeros
for k in range(N):
beta[k] = np.sum(1/(x[k] - x[k+1:]))
beta[k] += np.sum(1/(x[k] - x[:k]))
x += alpha / (1 + alpha * beta)
if not all(np.isfinite(x)):
raise RuntimeError('Root-finding calculation failed')
# Mekwi: The iterative process can be stopped when |hn| has become
# less than the largest error one is willing to permit in the root.
if all(abs(alpha) <= tol):
break
else:
raise Exception('Zeros failed to converge')
return x
def _bessel_zeros(N):
"""
Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of
modified Bessel function of the second kind
"""
if N == 0:
return asarray([])
# Generate starting points
x0 = _campos_zeros(N)
# Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary
# Bessel polynomial y_N(x)
def f(x):
return special.kve(N+0.5, 1/x)
# First derivative of above
def fp(x):
return (special.kve(N-0.5, 1/x)/(2*x**2) -
special.kve(N+0.5, 1/x)/(x**2) +
special.kve(N+1.5, 1/x)/(2*x**2))
# Starting points converge to true zeros
x = _aberth(f, fp, x0)
# Improve precision using Newton's method on each
for i in range(len(x)):
x[i] = optimize.newton(f, x[i], fp, tol=1e-15)
# Average complex conjugates to make them exactly symmetrical
x = np.mean((x, x[::-1].conj()), 0)
# Zeros should sum to -1
if abs(np.sum(x) + 1) > 1e-15:
raise RuntimeError('Generated zeros are inaccurate')
return x
def _norm_factor(p, k):
"""
Numerically find frequency shift to apply to delay-normalized filter such
that -3 dB point is at 1 rad/sec.
`p` is an array_like of polynomial poles
`k` is a float gain
First 10 values are listed in "Bessel Scale Factors" table,
"Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond."
"""
p = asarray(p, dtype=complex)
def G(w):
"""
Gain of filter
"""
return abs(k / prod(1j*w - p))
def cutoff(w):
"""
When gain = -3 dB, return 0
"""
return G(w) - 1/np.sqrt(2)
return optimize.newton(cutoff, 1.5)
def besselap(N, norm='phase'):
"""
Return (z,p,k) for analog prototype of an Nth-order Bessel filter.
Parameters
----------
N : int
The order of the filter.
norm : {'phase', 'delay', 'mag'}, optional
Frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at an angular (e.g. rad/s) cutoff frequency of 1. This
happens for both low-pass and high-pass filters, so this is the
"phase-matched" case. [6]_
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1 (e.g. 1 second). This is the "natural" type obtained by
solving Bessel polynomials
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency 1. This is called "frequency normalization" by
Bond. [1]_
.. versionadded:: 0.18.0
Returns
-------
z : ndarray
Zeros of the transfer function. Is always an empty array.
p : ndarray
Poles of the transfer function.
k : scalar
Gain of the transfer function. For phase-normalized, this is always 1.
See Also
--------
bessel : Filter design function using this prototype
Notes
-----
To find the pole locations, approximate starting points are generated [2]_
for the zeros of the ordinary Bessel polynomial [3]_, then the
Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to
calculate more accurate zeros, and these locations are then inverted about
the unit circle.
References
----------
.. [1] C.R. Bond, "Bessel Filter Constants",
http://www.crbond.com/papers/bsf.pdf
.. [2] Campos and Calderon, "Approximate closed-form formulas for the
zeros of the Bessel Polynomials", arXiv:1105.0957 [math-ph],
http://arxiv.org/abs/1105.0957
.. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
.. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial
Simultaneously", Mathematics of Computation, Vol. 27, No. 122,
April 1973
.. [5] Ehrlich, "A modified Newton method for polynomials", Communications
of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967,
DOI:10.1145/363067.363115
.. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to
Others", RaneNote 147, 1998, http://www.rane.com/note147.html
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
if N == 0:
p = []
k = 1
else:
# Find roots of reverse Bessel polynomial
p = 1/_bessel_zeros(N)
a_last = _falling_factorial(2*N, N) // 2**N
# Shift them to a different normalization if required
if norm in ('delay', 'mag'):
# Normalized for group delay of 1
k = a_last
if norm == 'mag':
# -3 dB magnitude point is at 1 rad/sec
norm_factor = _norm_factor(p, k)
p /= norm_factor
k = norm_factor**-N * a_last
elif norm == 'phase':
# Phase-matched (1/2 max phase shift at 1 rad/sec)
# Asymptotes are same as Butterworth filter
p *= 10**(-math.log10(a_last)/N)
k = 1
else:
raise ValueError('normalization not understood')
return asarray([]), asarray(p, dtype=complex), float(k)
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'bessel_phase': [besselap],
'bessel_delay': [besselap],
'bessel_mag': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
bessel_norms = {'bessel': 'phase',
'bessel_phase': 'phase',
'bessel_delay': 'delay',
'bessel_mag': 'mag'}
| bsd-3-clause |
Barmaley-exe/scikit-learn | sklearn/neighbors/regression.py | 39 | 10464 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
detrout/debian-statsmodels | statsmodels/stats/contrast.py | 1 | 11337 | from statsmodels.compat.python import range
import numpy as np
from scipy.stats import f as fdist
from scipy.stats import t as student_t
from scipy import stats
from statsmodels.tools.tools import clean0, fullrank
from statsmodels.compat.numpy import np_matrix_rank
#TODO: should this be public if it's just a container?
class ContrastResults(object):
"""
Class for results of tests of linear restrictions on coefficients in a model.
This class functions mainly as a container for `t_test`, `f_test` and
`wald_test` for the parameters of a model.
The attributes depend on the statistical test and are either based on the
normal, the t, the F or the chisquare distribution.
"""
def __init__(self, t=None, F=None, sd=None, effect=None, df_denom=None,
df_num=None, alpha=0.05, **kwds):
self.effect = effect # Let it be None for F
if F is not None:
self.distribution = 'F'
self.fvalue = F
self.df_denom = df_denom
self.df_num = df_num
self.dist = fdist
self.dist_args = (df_num, df_denom)
self.pvalue = fdist.sf(F, df_num, df_denom)
elif t is not None:
self.distribution = 't'
self.tvalue = t
self.statistic = t # generic alias
self.sd = sd
self.df_denom = df_denom
self.dist = student_t
self.dist_args = (df_denom,)
self.pvalue = self.dist.sf(np.abs(t), df_denom) * 2
elif 'statistic' in kwds:
# TODO: currently targeted to normal distribution, and chi2
self.distribution = kwds['distribution']
self.statistic = kwds['statistic']
self.tvalue = value = kwds['statistic'] # keep alias
# TODO: for results instance we decided to use tvalues also for normal
self.sd = sd
self.dist = getattr(stats, self.distribution)
self.dist_args = ()
if self.distribution is 'chi2':
self.pvalue = self.dist.sf(self.statistic, df_denom)
else:
"normal"
self.pvalue = self.dist.sf(np.abs(value)) * 2
# cleanup
# should we return python scalar?
self.pvalue = np.squeeze(self.pvalue)
def conf_int(self, alpha=0.05):
"""
Returns the confidence interval of the value, `effect` of the constraint.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns.
"""
if self.effect is not None:
# confidence intervals
q = self.dist.ppf(1 - alpha / 2., *self.dist_args)
lower = self.effect - q * self.sd
upper = self.effect + q * self.sd
return np.column_stack((lower, upper))
else:
raise NotImplementedError('Confidence Interval not available')
def __array__(self):
if hasattr(self, "fvalue"):
return self.fvalue
else:
return self.tvalue
def __str__(self):
return self.summary().__str__()
def __repr__(self):
return str(self.__class__) + '\n' + self.__str__()
def summary(self, xname=None, alpha=0.05, title=None):
"""Summarize the Results of the hypothesis test
Parameters
-----------
xname : list of strings, optional
Default is `c_##` for ## in p the number of regressors
alpha : float
significance level for the confidence intervals. Default is
alpha = 0.05 which implies a confidence level of 95%.
title : string, optional
Title for the params table. If not None, then this replaces the
default title
Returns
-------
smry : string or Summary instance
This contains a parameter results table in the case of t or z test
in the same form as the parameter results table in the model
results summary.
For F or Wald test, the return is a string.
"""
if self.effect is not None:
# TODO: should also add some extra information, e.g. robust cov ?
# TODO: can we infer names for constraints, xname in __init__ ?
if title is None:
title = 'Test for Constraints'
elif title == '':
# don't add any title,
# I think SimpleTable skips on None - check
title = None
# we have everything for a params table
use_t = (self.distribution == 't')
yname='constraints' # Not used in params_frame
if xname is None:
xname = ['c%d'%ii for ii in range(len(self.effect))]
from statsmodels.iolib.summary import summary_params
pvalues = np.atleast_1d(self.pvalue)
summ = summary_params((self, self.effect, self.sd, self.statistic,
pvalues, self.conf_int(alpha)),
yname=yname, xname=xname, use_t=use_t,
title=title)
return summ
elif hasattr(self, 'fvalue'):
# TODO: create something nicer for these casee
return '<F test: F=%s, p=%s, df_denom=%d, df_num=%d>' % \
(repr(self.fvalue), self.pvalue, self.df_denom, self.df_num)
else:
# generic
return '<Wald test: statistic=%s, p-value=%s>' % \
(self.statistic, self.pvalue)
def summary_frame(self, xname=None, alpha=0.05):
"""Return the parameter table as a pandas DataFrame
This is only available for t and normal tests
"""
if self.effect is not None:
# we have everything for a params table
use_t = (self.distribution == 't')
yname='constraints' # Not used in params_frame
if xname is None:
xname = ['c%d'%ii for ii in range(len(self.effect))]
from statsmodels.iolib.summary import summary_params_frame
summ = summary_params_frame((self, self.effect, self.sd,
self.statistic,self.pvalue,
self.conf_int(alpha)), yname=yname,
xname=xname, use_t=use_t)
return summ
else:
# TODO: create something nicer
raise NotImplementedError('only available for t and z')
class Contrast(object):
"""
This class is used to construct contrast matrices in regression models.
They are specified by a (term, design) pair. The term, T, is a linear
combination of columns of the design matrix. The matrix attribute of
Contrast is a contrast matrix C so that
colspan(dot(D, C)) = colspan(dot(D, dot(pinv(D), T)))
where pinv(D) is the generalized inverse of D. Further, the matrix
Tnew = dot(C, D)
is full rank. The rank attribute is the rank of
dot(D, dot(pinv(D), T))
In a regression model, the contrast tests that E(dot(Tnew, Y)) = 0
for each column of Tnew.
Parameters
----------
term : array-like
design : array-like
Attributes
----------
contrast_matrix
Examples
--------
>>> import numpy.random as R
>>> import statsmodels.api as sm
>>> import numpy as np
>>> R.seed(54321)
>>> X = R.standard_normal((40,10))
Get a contrast
>>> new_term = np.column_stack((X[:,0], X[:,2]))
>>> c = sm.contrast.Contrast(new_term, X)
>>> test = [[1] + [0]*9, [0]*2 + [1] + [0]*7]
>>> np.allclose(c.contrast_matrix, test)
True
Get another contrast
>>> P = np.dot(X, np.linalg.pinv(X))
>>> resid = np.identity(40) - P
>>> noise = np.dot(resid,R.standard_normal((40,5)))
>>> new_term2 = np.column_stack((noise,X[:,2]))
>>> c2 = Contrast(new_term2, X)
>>> print(c2.contrast_matrix)
[ -1.26424750e-16 8.59467391e-17 1.56384718e-01 -2.60875560e-17
-7.77260726e-17 -8.41929574e-18 -7.36359622e-17 -1.39760860e-16
1.82976904e-16 -3.75277947e-18]
Get another contrast
>>> zero = np.zeros((40,))
>>> new_term3 = np.column_stack((zero,X[:,2]))
>>> c3 = sm.contrast.Contrast(new_term3, X)
>>> test2 = [0]*2 + [1] + [0]*7
>>> np.allclose(c3.contrast_matrix, test2)
True
"""
def _get_matrix(self):
"""
Gets the contrast_matrix property
"""
if not hasattr(self, "_contrast_matrix"):
self.compute_matrix()
return self._contrast_matrix
contrast_matrix = property(_get_matrix)
def __init__(self, term, design):
self.term = np.asarray(term)
self.design = np.asarray(design)
def compute_matrix(self):
"""
Construct a contrast matrix C so that
colspan(dot(D, C)) = colspan(dot(D, dot(pinv(D), T)))
where pinv(D) is the generalized inverse of D=design.
"""
T = self.term
if T.ndim == 1:
T = T[:,None]
self.T = clean0(T)
self.D = self.design
self._contrast_matrix = contrastfromcols(self.T, self.D)
try:
self.rank = self.matrix.shape[1]
except:
self.rank = 1
#TODO: fix docstring after usage is settled
def contrastfromcols(L, D, pseudo=None):
"""
From an n x p design matrix D and a matrix L, tries
to determine a p x q contrast matrix C which
determines a contrast of full rank, i.e. the
n x q matrix
dot(transpose(C), pinv(D))
is full rank.
L must satisfy either L.shape[0] == n or L.shape[1] == p.
If L.shape[0] == n, then L is thought of as representing
columns in the column space of D.
If L.shape[1] == p, then L is thought of as what is known
as a contrast matrix. In this case, this function returns an estimable
contrast corresponding to the dot(D, L.T)
Note that this always produces a meaningful contrast, not always
with the intended properties because q is always non-zero unless
L is identically 0. That is, it produces a contrast that spans
the column space of L (after projection onto the column space of D).
Parameters
----------
L : array-like
D : array-like
"""
L = np.asarray(L)
D = np.asarray(D)
n, p = D.shape
if L.shape[0] != n and L.shape[1] != p:
raise ValueError("shape of L and D mismatched")
if pseudo is None:
pseudo = np.linalg.pinv(D) # D^+ \approx= ((dot(D.T,D))^(-1),D.T)
if L.shape[0] == n:
C = np.dot(pseudo, L).T
else:
C = L
C = np.dot(pseudo, np.dot(D, C.T)).T
Lp = np.dot(D, C.T)
if len(Lp.shape) == 1:
Lp.shape = (n, 1)
if np_matrix_rank(Lp) != Lp.shape[1]:
Lp = fullrank(Lp)
C = np.dot(pseudo, Lp).T
return np.squeeze(C)
| bsd-3-clause |
lukebarnard1/bokeh | bokeh/charts/builder/area_builder.py | 43 | 6813 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Area class which lets you build your Area charts just passing
the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
from six import string_types
try:
import numpy as np
except ImportError:
raise RuntimeError("bokeh.charts Area chart requires NumPy.")
from ..utils import cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer, Range1d
from ...models.glyphs import Patch
from ...properties import Any, Bool
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Area(values, index=None, **kws):
""" Create an area chart using the :class:`AreaBuilder <bokeh.charts.builder.area_builder.AreaBuilder>`
to render the geometry from values.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.charts import Area, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = dict(
python=[2, 3, 7, 5, 26, 221, 44, 233, 254, 265, 266, 267, 120],
pypy=[12, 33, 47, 15, 126, 121, 144, 233, 254, 225, 226, 267, 110],
jython=[22, 43, 10, 25, 26, 101, 114, 203, 194, 215, 201, 227, 139],
)
area = Area(
xyvalues, title="Area Chart", xlabel='time', legend=True,
ylabel='memory', stacked=True,
)
output_file('area.html')
show(area)
"""
return create_and_build(AreaBuilder, values, index=index, **kws)
class AreaBuilder(Builder):
"""This is the Area class and it is in charge of plotting
Area chart in an easy and intuitive way.
Essentially, it provides a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (patch) taking the references
from the source.
"""
stacked = Bool(False, help="""
Whether to stack the areas. (Defaults to False)
If True, areas are draw as a stack, to show the relationship of
parts to a whole. Otherwise, areas are layered above one another.
""")
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""Calculate the chart properties accordingly from area.values.
Then build a dict containing references to all the points to be used by
the patch glyph inside the ``_yield_renderers`` method.
"""
xs = self._values_index
last = np.zeros(len(xs))
x2 = np.hstack((xs[::-1], xs))
self.set_and_get("x", "", x2)
for grp, col_values in self._values.items():
# TODO: This condition may be removed or changed depending on
# the validation of self.index
if isinstance(self.index, string_types) and grp == self.index:
continue
# get single series values
_values = [col_values[x] for indx, x in enumerate(xs)]
# to draw area we need 2 coordinates. The lower values will always
# be:
# - 0 in case of non stacked area
# - the previous series top value in case of stacked charts
next = last + _values
values = np.hstack((last[::-1], next))
# only update when stacked, otherwise we always want to start from 0
if self.stacked:
last = next
# save values and new group
self.set_and_get("y_", grp, values)
self._groups.append(u"%s" % grp)
def _set_sources(self):
"""
Push the Line data into the ColumnDataSource and calculate the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
y_names = self._attr[1:]
endy = max(max(self._data[i]) for i in y_names)
starty = min(min(self._data[i]) for i in y_names)
self.y_range = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def _yield_renderers(self):
"""Use the patch glyphs to fill the area connecting the xy points
in the series taken from the data added with area._process_data.
Takes reference points from the data loaded at the ColumnDataSource.
"""
colors = cycle_colors(self._attr, self.palette)
# parse all series. We exclude the first attr as it's the x values
# added for the index
for i, series_name in enumerate(self._attr[1:]):
glyph = Patch(
x='x', y=series_name, fill_color=colors[i], fill_alpha=0.9)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
| bsd-3-clause |
WaterIsland/DLStudy | mln/neuralnet-fitting/test_handwrite.py | 1 | 2445 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This precision method refered by
# http://scikit-learn.org/stable/auto_examples/classification/plot_digits_classification.html
# https://github.com/sylvan5/PRML/blob/master/ch5/digits.py
#
import cv2
import numpy as np
import Mln as mln
import dump as dp
import progress as prg
import image as img
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import confusion_matrix, classification_report
def recognition_digit_image(fname, digit = 100):
im = cv2.imread(fname)
im = img.change_size_with_size(im, 28, 28)
im = img.change_grayscale(im)
im = 255. - im
input_data = im
input_data = input_data.astype(np.float64)
input_data = im / im.max()
tmp_data = np.reshape(input_data, (28*28, 1))
neuro_obj.test(tmp_data, teach_data)
output = neuro_obj.get_output()
if digit >=0 and digit <= 9:
if neuro_obj.get_max_output_index() == digit : print "judged(success):", neuro_obj.get_max_output_index()
else : print "judged(miss) :", neuro_obj.get_max_output_index()
else:
print "judged:", neuro_obj.get_max_output_index()
# if you wanna show image then you delete comment-out fllowing 3 lines.
# cv2.imshow("input_data", im)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
teach_data = [0, 0, 0, 1, 0, 0, 0, 0, 0, 0] # dummy
print '--start--'
print '@@ Show after learning @@'
neuro_obj = dp.obj_load('./learn-classification.pkl') # online training's pkl
#neuro_obj = dp.obj_load('./learn-classification-batch.pkl') # batch training's pkl
recognition_digit_image("../../hand-writing-number/0.png", 0)
recognition_digit_image("../../hand-writing-number/1.png", 1)
recognition_digit_image("../../hand-writing-number/2.png", 2)
recognition_digit_image("../../hand-writing-number/3.png", 3)
recognition_digit_image("../../hand-writing-number/4.png", 4)
recognition_digit_image("../../hand-writing-number/5.png", 5)
recognition_digit_image("../../hand-writing-number/6.png", 6)
recognition_digit_image("../../hand-writing-number/7.png", 7)
recognition_digit_image("../../hand-writing-number/8.png", 8)
recognition_digit_image("../../hand-writing-number/9.png", 9)
# judging '9' is success.
recognition_digit_image("../../hand-writing-number/number.png")
| mit |
HPCC-Cloud-Computing/press | prediction/auto_period/autoperiod.py | 1 | 3366 | import numpy as np
import matplotlib.pyplot as plt
import math
SEQUENCE = 0
PERIODGRAM = 1
ACF = 2
DEFAULT_INTERVAL_VALUE = 600
DEFAULT_PEAK_PERCENT = 99
class Autoperiod(object):
def __init__(self, time_series=[], interval=DEFAULT_INTERVAL_VALUE,
peak_percent=DEFAULT_PEAK_PERCENT):
self.time_series = time_series
self.interval = interval
self.peak_percent = peak_percent
def fft(self):
a = self.time_series
return np.fft.rfft(a)
def length_series(self):
return len(self.time_series)
def get_ESD(self):
a = self.fft()
return np.abs(a ** 2)
def ACF_list(self):
s = []
for i in range(self.length_series()):
s.append(self.get_ACF(i))
return s
# In ra bieu do cua tung dang cu the
def diagram(self, n):
if n == PERIODGRAM:
x_axis = np.arange(3, len(self.get_ESD())) / float(
2 * len(self.get_ESD()))
a = self.get_ESD()[3:]
plt.xlabel('Frequency')
plt.ylabel('Power')
plt.title('Periodgram')
plt.plot(x_axis, a)
plt.show()
if n == SEQUENCE:
x_axis = np.arange(0, self.length_series()) / 3600 * self.interval
plt.plot(x_axis, self.time_series)
plt.xlabel('Hours')
plt.ylabel('Requests number by interval')
plt.title('Sequence diagram')
plt.show()
if n == ACF:
plt.xlabel('Tau')
plt.ylabel('ACF_value')
plt.title('Circular Autocorrection')
plt.plot(self.ACF_list())
plt.show()
# Lay ACF theo tung phan tu
def get_ACF(self, tau):
tempACF = 0
for i in range(self.length_series()):
if i + tau == self.length_series():
break
tempACF += self.time_series[tau] * self.time_series[i + tau]
return float(tempACF) / float(self.length_series())
# Lay cac chu ki trien vong dua vao kiem tra
def period_hints(self):
threshold = math.floor(
self.length_series() * (100 - self.peak_percent) / 100.0)
# Lay danh sach cac phan tu co nang luong lon nhat
period_temp_list = []
index_hint_list = []
temp_ESD = []
for t in np.arange(len(self.get_ESD())):
temp_ESD.append(self.get_ESD()[t])
hints_list = sorted(temp_ESD, reverse=True)[:threshold]
# Chuyen cac phan tu sang dang chi so va loc cac phan tu
for element in hints_list:
hint_index = temp_ESD.index(element)
# Bo qua cac phan khong co chu ki ( k=0 ) va phan lay chu ki la so phan tu cua mau ( k=1 )
if (hint_index < 2):
continue
else:
# Kiem tra dieu kien ACF
check_acf = (self.ACF_list()[hint_index]
>= (self.ACF_list()[hint_index - 1]
+ self.ACF_list()[hint_index + 1]) / 2)
if check_acf:
index_hint_list.append(hint_index)
temp_period_element = math.floor(
float(self.length_series()) / hint_index)
period_temp_list.append(temp_period_element)
return period_temp_list
| mit |
qiudebo/13learn | code/matplotlib/aqy/aqy_doughnut_chart2.py | 1 | 1535 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'qiudebo'
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
labels = (u'web', u'Android',u'IOS',u'web', u'Android',u'IOS')
y1 = (9.51, 26.35, 64.14) # 我的前半生
y2 = (12.06, 34.21, 53.73) # 三生三世十里桃花
N = 3
x = np.arange(N)
# 设置分开的距离
explode = (0, 0.1, 0)
# 创建子图
fig, ax = plt.subplots()
#ax.set_title("pie")
# 定义颜色
colors2 = ['lightcoral', 'purple', 'lightgreen']
colors1 = ['yellowgreen', 'gold', 'lightskyblue']
ax.pie(y1, autopct='%1.1f%%', radius=1.2, colors=colors1, pctdistance=0.9)
ax.pie(y2, autopct='%1.2f%%', radius=1, colors=colors2, pctdistance=0.5)
ax.pie(y1, radius=0.6, colors='w')
# ax.pie(y1, explode=explode, labels=labels,
# autopct='%1.1f%%', shadow=True, radius=1.2, pctdistance=0.9, labels=labels)
#
# ax.pie(y2, explode=explode, labels=labels,
# autopct='%1.2f%%', shadow=True, radius=1, pctdistance=0.5, labels=labels)
# 图形的对称
ax.axis('equal')
# 模块颜色、标签、百分比、标题、角度、圆形半径,以及某一块凸出(explode)进行设置后,最终呈现的图形效果
# 饼图半径的设置——radius
#bbox_to_anchor 控制图例布局
ax.legend(labels,bbox_to_anchor=(1.05, 1), loc='best', borderaxespad=0.)
#plt.legend()
plt.show()
| mit |
victor-cortez/Heimdall | pile/plotter.py | 1 | 1281 | import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import numpy as np
name = "outfile5-6.txt"
name2 = "outfile.txt"
arquivo = open(name,"r")
arquivo2= open(name2,"r")
data = [[float(n) for n in i.replace("/n","").split()] for i in arquivo.readlines()]
data2 = [[float(n) for n in i.replace("/n","").split()] for i in arquivo2.readlines()]
x = [i[0] for i in data2]
y2 = [i[1] for i in data2]
y1 = [i[1] for i in data]
y = []
for i in range(len(y2)):
if i < len(y1):
y.append((y2[i] + y1[i])/2)
else:
y.append(y2[i])
#print(data)
thefit = np.polyfit(x,y,2)
#print(thefit)
def exponenial_func(x, a, b, c):
return a*np.exp(-b*x)+c
#print(len(x),len(y))
popt, pcov = curve_fit(exponenial_func, x[:9], y[:9], p0=(1, 1e-6, 1))
xx = np.linspace(0,100000,1000)
yy2 = exponenial_func(xx, *popt)
def thevalues(xval,fitdata):
components = []
fitdata = np.fliplr([fitdata])[0]
for i in range(len(fitdata)):
value = (xval**i)*fitdata[i]
components.append(value)
return sum(components)
yy = [thevalues(k,thefit) for k in x]
#print(yy)
#plt.plot(x[:10],yy[:10])
plt.plot(x[:9],y[:9])
#print(yy2)
plt.plot(xx,yy2)
plt.savefig("5ucasatpile.jpg",dpi=100)
plt.show()
print(y[9])
| mit |
jougs/nest-simulator | pynest/nest/raster_plot.py | 15 | 9348 | # -*- coding: utf-8 -*-
#
# raster_plot.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
""" Functions for raster plotting."""
import nest
import numpy
__all__ = [
'extract_events',
'from_data',
'from_device',
'from_file',
'from_file_numpy',
'from_file_pandas'
]
def extract_events(data, time=None, sel=None):
"""Extract all events within a given time interval.
Both time and sel may be used at the same time such that all
events are extracted for which both conditions are true.
Parameters
----------
data : list
Matrix such that
data[:,0] is a vector of all node_ids and
data[:,1] a vector with the corresponding time stamps.
time : list, optional
List with at most two entries such that
time=[t_max] extracts all events with t< t_max
time=[t_min, t_max] extracts all events with t_min <= t < t_max
sel : list, optional
List of node_ids such that
sel=[node_id1, ... , node_idn] extracts all events from these node_ids.
All others are discarded.
Returns
-------
numpy.array
List of events as (node_id, t) tuples
"""
val = []
if time:
t_max = time[-1]
if len(time) > 1:
t_min = time[0]
else:
t_min = 0
for v in data:
t = v[1]
node_id = v[0]
if time and (t < t_min or t >= t_max):
continue
if not sel or node_id in sel:
val.append(v)
return numpy.array(val)
def from_data(data, sel=None, **kwargs):
"""Plot raster plot from data array.
Parameters
----------
data : list
Matrix such that
data[:,0] is a vector of all node_ids and
data[:,1] a vector with the corresponding time stamps.
sel : list, optional
List of node_ids such that
sel=[node_id1, ... , node_idn] extracts all events from these node_ids.
All others are discarded.
kwargs:
Parameters passed to _make_plot
"""
if len(data) == 0:
raise nest.kernel.NESTError("No data to plot.")
ts = data[:, 1]
d = extract_events(data, sel=sel)
ts1 = d[:, 1]
node_ids = d[:, 0]
return _make_plot(ts, ts1, node_ids, data[:, 0], **kwargs)
def from_file(fname, **kwargs):
"""Plot raster from file.
Parameters
----------
fname : str or tuple(str) or list(str)
File name or list of file names
If a list of files is given, the data from them is concatenated as if
it had been stored in a single file - useful when MPI is enabled and
data is logged separately for each MPI rank, for example.
kwargs:
Parameters passed to _make_plot
"""
if isinstance(fname, str):
fname = [fname]
if isinstance(fname, (list, tuple)):
try:
global pandas
pandas = __import__('pandas')
from_file_pandas(fname, **kwargs)
except ImportError:
from_file_numpy(fname, **kwargs)
else:
print('fname should be one of str/list(str)/tuple(str).')
def from_file_pandas(fname, **kwargs):
"""Use pandas."""
data = None
for f in fname:
dataFrame = pandas.read_table(f, header=2, skipinitialspace=True)
newdata = dataFrame.values
if data is None:
data = newdata
else:
data = numpy.concatenate((data, newdata))
return from_data(data, **kwargs)
def from_file_numpy(fname, **kwargs):
"""Use numpy."""
data = None
for f in fname:
newdata = numpy.loadtxt(f, skiprows=3)
if data is None:
data = newdata
else:
data = numpy.concatenate((data, newdata))
return from_data(data, **kwargs)
def from_device(detec, **kwargs):
"""
Plot raster from a spike recorder.
Parameters
----------
detec : TYPE
Description
kwargs:
Parameters passed to _make_plot
Raises
------
nest.kernel.NESTError
"""
type_id = nest.GetDefaults(detec.get('model'), 'type_id')
if not type_id == "spike_recorder":
raise nest.kernel.NESTError("Please provide a spike_recorder.")
if detec.get('record_to') == "memory":
ts, node_ids = _from_memory(detec)
if not len(ts):
raise nest.kernel.NESTError("No events recorded!")
if "title" not in kwargs:
kwargs["title"] = "Raster plot from device '%i'" % detec.get('global_id')
if detec.get('time_in_steps'):
xlabel = "Steps"
else:
xlabel = "Time (ms)"
return _make_plot(ts, ts, node_ids, node_ids, xlabel=xlabel, **kwargs)
elif detec.get("record_to") == "ascii":
fname = detec.get("filenames")
return from_file(fname, **kwargs)
else:
raise nest.kernel.NESTError("No data to plot. Make sure that \
record_to is set to either 'ascii' or 'memory'.")
def _from_memory(detec):
ev = detec.get("events")
return ev["times"], ev["senders"]
def _make_plot(ts, ts1, node_ids, neurons, hist=True, hist_binwidth=5.0,
grayscale=False, title=None, xlabel=None):
"""Generic plotting routine.
Constructs a raster plot along with an optional histogram (common part in
all routines above).
Parameters
----------
ts : list
All timestamps
ts1 : list
Timestamps corresponding to node_ids
node_ids : list
Global ids corresponding to ts1
neurons : list
Node IDs of neurons to plot
hist : bool, optional
Display histogram
hist_binwidth : float, optional
Width of histogram bins
grayscale : bool, optional
Plot in grayscale
title : str, optional
Plot title
xlabel : str, optional
Label for x-axis
"""
import matplotlib.pyplot as plt
plt.figure()
if grayscale:
color_marker = ".k"
color_bar = "gray"
else:
color_marker = "."
color_bar = "blue"
color_edge = "black"
if xlabel is None:
xlabel = "Time (ms)"
ylabel = "Neuron ID"
if hist:
ax1 = plt.axes([0.1, 0.3, 0.85, 0.6])
plotid = plt.plot(ts1, node_ids, color_marker)
plt.ylabel(ylabel)
plt.xticks([])
xlim = plt.xlim()
plt.axes([0.1, 0.1, 0.85, 0.17])
t_bins = numpy.arange(
numpy.amin(ts), numpy.amax(ts),
float(hist_binwidth)
)
n, _ = _histogram(ts, bins=t_bins)
num_neurons = len(numpy.unique(neurons))
heights = 1000 * n / (hist_binwidth * num_neurons)
plt.bar(t_bins, heights, width=hist_binwidth, color=color_bar,
edgecolor=color_edge)
plt.yticks([
int(x) for x in
numpy.linspace(0.0, int(max(heights) * 1.1) + 5, 4)
])
plt.ylabel("Rate (Hz)")
plt.xlabel(xlabel)
plt.xlim(xlim)
plt.axes(ax1)
else:
plotid = plt.plot(ts1, node_ids, color_marker)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if title is None:
plt.title("Raster plot")
else:
plt.title(title)
plt.draw()
return plotid
def _histogram(a, bins=10, bin_range=None, normed=False):
"""Calculate histogram for data.
Parameters
----------
a : list
Data to calculate histogram for
bins : int, optional
Number of bins
bin_range : TYPE, optional
Range of bins
normed : bool, optional
Whether distribution should be normalized
Raises
------
ValueError
"""
from numpy import asarray, iterable, linspace, sort, concatenate
a = asarray(a).ravel()
if bin_range is not None:
mn, mx = bin_range
if mn > mx:
raise ValueError("max must be larger than min in range parameter")
if not iterable(bins):
if bin_range is None:
bin_range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in bin_range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins, endpoint=False)
else:
if (bins[1:] - bins[:-1] < 0).any():
raise ValueError("bins must increase monotonically")
# best block size probably depends on processor cache size
block = 65536
n = sort(a[:block]).searchsorted(bins)
for i in range(block, a.size, block):
n += sort(a[i:i + block]).searchsorted(bins)
n = concatenate([n, [len(a)]])
n = n[1:] - n[:-1]
if normed:
db = bins[1] - bins[0]
return 1.0 / (a.size * db) * n, bins
else:
return n, bins
| gpl-2.0 |
Tong-Chen/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 12 | 2892 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix += dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N / 2), rng.randint(N, size=N * N / 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
jonyroda97/redbot-amigosprovaveis | lib/matplotlib/backends/qt_compat.py | 2 | 7983 | """ A Qt API selector that can be used to switch between PyQt and PySide.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import sys
from matplotlib import rcParams, verbose
# Available APIs.
QT_API_PYQT = 'PyQt4' # API is not set here; Python 2.x default is V 1
QT_API_PYQTv2 = 'PyQt4v2' # forced to Version 2 API
QT_API_PYSIDE = 'PySide' # only supports Version 2 API
QT_API_PYQT5 = 'PyQt5' # use PyQt5 API; Version 2 with module shim
QT_API_PYSIDE2 = 'PySide2' # Version 2 API with module shim
ETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),
pyqt5=(QT_API_PYQT5, 5), pyside2=(QT_API_PYSIDE2, 5))
# ETS is a dict of env variable to (QT_API, QT_MAJOR_VERSION)
# If the ETS QT_API environment variable is set, use it, but only
# if the varible if of the same major QT version. Note that
# ETS requires the version 2 of PyQt4, which is not the platform
# default for Python 2.x.
QT_API_ENV = os.environ.get('QT_API')
if rcParams['backend'] == 'Qt5Agg':
QT_RC_MAJOR_VERSION = 5
elif rcParams['backend'] == 'Qt4Agg':
QT_RC_MAJOR_VERSION = 4
else:
# A different backend was specified, but we still got here because a Qt
# related file was imported. This is allowed, so lets try and guess
# what we should be using.
if "PyQt4" in sys.modules or "PySide" in sys.modules:
# PyQt4 or PySide is actually used.
QT_RC_MAJOR_VERSION = 4
else:
# This is a fallback: PyQt5
QT_RC_MAJOR_VERSION = 5
QT_API = None
# check if any binding is already imported, if so silently ignore the
# rcparams/ENV settings and use what ever is already imported.
if 'PySide' in sys.modules:
# user has imported PySide before importing mpl
QT_API = QT_API_PYSIDE
if 'PySide2' in sys.modules:
# user has imported PySide before importing mpl
QT_API = QT_API_PYSIDE2
if 'PyQt4' in sys.modules:
# user has imported PyQt4 before importing mpl
# this case also handles the PyQt4v2 case as once sip is imported
# the API versions can not be changed so do not try
QT_API = QT_API_PYQT
if 'PyQt5' in sys.modules:
# the user has imported PyQt5 before importing mpl
QT_API = QT_API_PYQT5
if (QT_API_ENV is not None) and QT_API is None:
try:
QT_ENV_MAJOR_VERSION = ETS[QT_API_ENV][1]
except KeyError:
raise RuntimeError(
('Unrecognized environment variable %r, valid values are:'
' %r, %r, %r or %r'
% (QT_API_ENV, 'pyqt', 'pyside', 'pyqt5', 'pyside2')))
if QT_ENV_MAJOR_VERSION == QT_RC_MAJOR_VERSION:
# Only if backend and env qt major version are
# compatible use the env variable.
QT_API = ETS[QT_API_ENV][0]
_fallback_to_qt4 = False
if QT_API is None:
# No ETS environment or incompatible so use rcParams.
if rcParams['backend'] == 'Qt5Agg':
QT_API = rcParams['backend.qt5']
elif rcParams['backend'] == 'Qt4Agg':
QT_API = rcParams['backend.qt4']
else:
# A non-Qt backend was specified, no version of the Qt
# bindings is imported, but we still got here because a Qt
# related file was imported. This is allowed, fall back to Qt5
# using which ever binding the rparams ask for.
_fallback_to_qt4 = True
QT_API = rcParams['backend.qt5']
# We will define an appropriate wrapper for the differing versions
# of file dialog.
_getSaveFileName = None
# Flag to check if sip could be imported
_sip_imported = False
# Now perform the imports.
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYQT5):
try:
import sip
_sip_imported = True
except ImportError:
# Try using PySide
if QT_RC_MAJOR_VERSION == 5:
QT_API = QT_API_PYSIDE2
else:
QT_API = QT_API_PYSIDE
cond = ("Could not import sip; falling back on PySide\n"
"in place of PyQt4 or PyQt5.\n")
verbose.report(cond, 'helpful')
if _sip_imported:
if QT_API == QT_API_PYQTv2:
if QT_API_ENV == 'pyqt':
cond = ("Found 'QT_API=pyqt' environment variable. "
"Setting PyQt4 API accordingly.\n")
else:
cond = "PyQt API v2 specified."
try:
sip.setapi('QString', 2)
except:
res = 'QString API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
# condition has now been reported, no need to repeat it:
cond = ""
try:
sip.setapi('QVariant', 2)
except:
res = 'QVariant API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
if QT_API == QT_API_PYQT5:
try:
from PyQt5 import QtCore, QtGui, QtWidgets
_getSaveFileName = QtWidgets.QFileDialog.getSaveFileName
except ImportError:
if _fallback_to_qt4:
# fell through, tried PyQt5, failed fall back to PyQt4
QT_API = rcParams['backend.qt4']
QT_RC_MAJOR_VERSION = 4
else:
raise
# needs to be if so we can re-test the value of QT_API which may
# have been changed in the above if block
if QT_API in [QT_API_PYQT, QT_API_PYQTv2]: # PyQt4 API
from PyQt4 import QtCore, QtGui
try:
if sip.getapi("QString") > 1:
# Use new getSaveFileNameAndFilter()
_getSaveFileName = QtGui.QFileDialog.getSaveFileNameAndFilter
else:
# Use old getSaveFileName()
def _getSaveFileName(*args, **kwargs):
return (QtGui.QFileDialog.getSaveFileName(*args, **kwargs),
None)
except (AttributeError, KeyError):
# call to getapi() can fail in older versions of sip
def _getSaveFileName(*args, **kwargs):
return QtGui.QFileDialog.getSaveFileName(*args, **kwargs), None
try:
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
try:
QtCore.Slot = QtCore.pyqtSlot
except AttributeError:
# Not a perfect match but works in simple cases
QtCore.Slot = QtCore.pyqtSignature
QtCore.Property = QtCore.pyqtProperty
__version__ = QtCore.PYQT_VERSION_STR
except NameError:
# QtCore did not get imported, fall back to pyside
if QT_RC_MAJOR_VERSION == 5:
QT_API = QT_API_PYSIDE2
else:
QT_API = QT_API_PYSIDE
if QT_API == QT_API_PYSIDE2:
try:
from PySide2 import QtCore, QtGui, QtWidgets, __version__
_getSaveFileName = QtWidgets.QFileDialog.getSaveFileName
except ImportError:
# tried PySide2, failed, fall back to PySide
QT_RC_MAJOR_VERSION = 4
QT_API = QT_API_PYSIDE
if QT_API == QT_API_PYSIDE: # try importing pyside
try:
from PySide import QtCore, QtGui, __version__, __version_info__
except ImportError:
raise ImportError(
"Matplotlib qt-based backends require an external PyQt4, PyQt5,\n"
"PySide or PySide2 package to be installed, but it was not found.")
if __version_info__ < (1, 0, 3):
raise ImportError(
"Matplotlib backend_qt4 and backend_qt4agg require PySide >=1.0.3")
_getSaveFileName = QtGui.QFileDialog.getSaveFileName
# Apply shim to Qt4 APIs to make them look like Qt5
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYSIDE):
'''Import all used QtGui objects into QtWidgets
Here I've opted to simple copy QtGui into QtWidgets as that
achieves the same result as copying over the objects, and will
continue to work if other objects are used.
'''
QtWidgets = QtGui
def is_pyqt5():
return QT_API == QT_API_PYQT5
| gpl-3.0 |
SeldonIO/seldon-server | python/seldon/text/tests/test_docsim.py | 2 | 1746 | import unittest
from seldon.text import DocumentSimilarity, DefaultJsonCorpus
import logging
from gensim import interfaces, utils
from gensim.corpora.dictionary import Dictionary
import copy
class Test_DocumentSimilarity(unittest.TestCase):
def get_docs(self):
return [{"id":1,"text":"an article about sports and football, Arsenel, Liverpool","tags":"football"},
{"id":2,"text":"an article about football and finance, Liverpool, Arsenel","tags":"football"},
{"id":3,"text":"an article about money and lending","tags":"money"},
{"id":4,"text":"an article about money and banking and lending","tags":"money"}]
def test_sklearn_nmf(self):
corpus = DefaultJsonCorpus(self.get_docs())
ds = DocumentSimilarity(model_type="sklearn_nmf")
ds.fit(corpus)
res = ds.nn(0,k=1)
self.assertEqual(res[0][0],1)
def test_gensim_lsi(self):
corpus = DefaultJsonCorpus(self.get_docs())
ds = DocumentSimilarity(model_type="gensim_lsi")
ds.fit(corpus)
res = ds.nn(0,k=1)
self.assertEqual(res[0][0],1)
def test_gensim_rp(self):
corpus = DefaultJsonCorpus(self.get_docs())
ds = DocumentSimilarity(model_type="gensim_rp")
ds.fit(corpus)
res = ds.nn(0,k=1)
self.assertEqual(res[0][0],1)
def test_gensim_lsi(self):
corpus = DefaultJsonCorpus(self.get_docs())
ds = DocumentSimilarity(model_type="gensim_lsi")
ds.fit(corpus)
score = ds.score(k=1)
self.assertEqual(score,1.0)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
unittest.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.