repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
badlands-model/BayesLands | pyBadlands/simulation/buildFlux.py | 1 | 13637 | ##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
## ##
## This file forms part of the Badlands surface processes modelling application. ##
## ##
## For full license and copyright information, please refer to the LICENSE.md file ##
## located at the project root, or contact the authors. ##
## ##
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
"""
This file is the main entry point to compute flow network and associated sedimentary fluxes.
"""
import sys
import time
import numpy as np
import mpi4py.MPI as mpi
from matplotlib import path
from pyBadlands import (elevationTIN)
def streamflow(input, FVmesh, recGrid, force, hillslope, flow, elevation, \
lGIDs, rain, tNow, verbose=False):
"""
Compute flow network.
"""
rank = mpi.COMM_WORLD.rank
size = mpi.COMM_WORLD.size
comm = mpi.COMM_WORLD
# Update sea-level
walltime = time.clock()
force.getSea(tNow)
fillH = None
# Update river input
force.getRivers(tNow)
riverrain = rain+force.rivQw
# Build an initial depression-less surface at start time if required
if input.tStart == tNow and input.nopit == 1 :
fillH = elevationTIN.pit_stack_PD(elevation,input.nopit,force.sealevel)
elevation = fillH
else:
fillH = elevationTIN.pit_stack_PD(elevation,0,force.sealevel)
if rank == 0 and verbose and input.spl:
print " - depression-less algorithm PD with stack", time.clock() - walltime
# Compute stream network
walltime = time.clock()
flow.SFD_receivers(fillH, elevation, FVmesh.neighbours,
FVmesh.vor_edges, FVmesh.edge_length,
lGIDs)
if rank == 0 and verbose:
print " - compute receivers parallel ", time.clock() - walltime
# Distribute evenly local minimas to processors on filled surface
walltime = time.clock()
flow.localbase = np.array_split(flow.base, size)[rank]
flow.ordered_node_array_filled()
if rank == 0 and verbose:
print " - compute stack order locally for filled surface", time.clock() - walltime
walltime = time.clock()
stackNbs = comm.allgather(len(flow.localstack))
globalstack = np.zeros(sum(stackNbs), dtype=flow.localstack.dtype)
comm.Allgatherv(sendbuf=[flow.localstack, mpi.INT],
recvbuf=[globalstack, (stackNbs, None), mpi.INT])
flow.stack = globalstack
if rank == 0 and verbose:
print " - send stack order for filled surface globally ", time.clock() - walltime
# Distribute evenly local minimas on real surface
walltime = time.clock()
flow.localbase1 = np.array_split(flow.base1, size)[rank]
flow.ordered_node_array_elev()
if rank == 0 and verbose:
print " - compute stack order locally for real surface", time.clock() - walltime
walltime = time.clock()
stackNbs1 = comm.allgather(len(flow.localstack1))
globalstack1 = np.zeros(sum(stackNbs1), dtype=flow.localstack1.dtype)
comm.Allgatherv(sendbuf=[flow.localstack1, mpi.INT],
recvbuf=[globalstack1, (stackNbs1, None), mpi.INT])
flow.stack1 = globalstack1
if rank == 0 and verbose:
print " - send stack order for real surface globally ", time.clock() - walltime
# Compute a unique ID for each local depression and their downstream draining nodes
flow.compute_parameters_depression(fillH,elevation,FVmesh.control_volumes,force.sealevel)
# Compute discharge
walltime = time.clock()
flow.compute_flow(elevation, FVmesh.control_volumes, riverrain)
if rank == 0 and verbose:
print " - compute discharge ", time.clock() - walltime
return fillH, elevation
def sediment_flux(input, recGrid, hillslope, FVmesh, tMesh, flow, force, rain, lGIDs, applyDisp, straTIN, \
mapero, cumdiff, cumhill, fillH, disp, inGIDs, elevation, tNow, tEnd, verbose=False):
"""
Compute sediment fluxes.
"""
rank = mpi.COMM_WORLD.rank
size = mpi.COMM_WORLD.size
comm = mpi.COMM_WORLD
flow_time = time.clock()
#verbose = True
# Get active layer
if straTIN is not None:
walltime = time.clock()
flow.activelay[flow.activelay<1.] = 1.
flow.activelay[flow.activelay>straTIN.activeh] = straTIN.activeh
straTIN.get_active_layer(flow.activelay,verbose)
activelay = straTIN.alayR
flow.straTIN = 1
# Set the average erodibility based on rock types in the active layer
flow.erodibility = np.sum(straTIN.rockCk*activelay/flow.activelay.reshape(len(elevation),1),axis=1)
eroCk = straTIN.rockCk
if rank == 0 and verbose:
print " - Get active layer ", time.clock() - walltime
else:
activelay = None
eroCk = 0.
# Find border/inside nodes
if flow.domain is None:
ids = np.arange(len(FVmesh.control_volumes))
tmp1 = np.where(FVmesh.control_volumes>0.)[0]
xyMin = [recGrid.regX.min()-1., recGrid.regY.min()-1.]
xyMax = [recGrid.regX.max()+1., recGrid.regY.max()+1.]
flow.domain = path.Path([(xyMin[0],xyMin[1]),(xyMax[0],xyMin[1]), (xyMax[0],xyMax[1]), (xyMin[0],xyMax[1])])
tmp2 = flow.domain.contains_points(flow.xycoords)
flow.insideIDs = np.intersect1d(tmp1,ids[tmp2])
flow.borders = np.zeros(len(FVmesh.control_volumes),dtype=int)
flow.borders[flow.insideIDs] = 1
flow.outsideIDs = np.where(flow.borders==0)[0]
xyMin2 = [recGrid.regX.min()+recGrid.resEdges, recGrid.regY.min()+recGrid.resEdges]
xyMax2 = [recGrid.regX.max()-recGrid.resEdges, recGrid.regY.max()-recGrid.resEdges]
xyMin2 = [recGrid.regX.min()+1, recGrid.regY.min()+1]
xyMax2 = [recGrid.regX.max()-1, recGrid.regY.max()-1]
domain = path.Path([(xyMin2[0],xyMin2[1]),(xyMax2[0],xyMin2[1]), (xyMax2[0],xyMax2[1]), (xyMin2[0],xyMax2[1])])
tmp3 = domain.contains_points(flow.xycoords)
flow.insideIDs2 = ids[tmp3]
flow.borders2 = np.zeros(len(FVmesh.control_volumes),dtype=int)
flow.borders2[flow.insideIDs2] = 1
flow.outsideIDs2 = np.where(flow.borders2==0)[0]
# Compute CFL condition
walltime = time.clock()
if input.Hillslope and hillslope.updatedt == 0:
if hillslope.Sc == 0:
hillslope.dt_stability(FVmesh.edge_length[inGIDs,:tMesh.maxNgbh])
else:
hillslope.dt_stabilityCs(elevation, FVmesh.neighbours, FVmesh.edge_length,
lGIDs, flow.borders2)
if hillslope.CFL < input.minDT:
print 'Decrease your hillslope diffusion coefficients to ensure stability.'
sys.exit(0)
hillslope.dt_stability_ms(FVmesh.edge_length[inGIDs,:tMesh.maxNgbh])
elif hillslope.CFL is None:
hillslope.CFL = tEnd-tNow
flow.dt_stability(fillH, inGIDs)
CFLtime = min(flow.CFL, hillslope.CFL)
if CFLtime>1.:
CFLtime = float(round(CFLtime-0.5,0))
if rank == 0 and verbose:
print 'CFL for hillslope and flow ',hillslope.CFL,flow.CFL,CFLtime
CFLtime = min(CFLtime, tEnd - tNow)
CFLtime = max(input.minDT, CFLtime)
CFLtime = min(input.maxDT, CFLtime)
if rank == 0 and verbose:
print " - Get CFL time step ", time.clock() - walltime
# Compute sediment fluxes
if input.erolays >= 0:
oldelev = np.copy(elevation)
# Initial cumulative elevation change
walltime = time.clock()
timestep, sedchange, erosion, deposition = flow.compute_sedflux(FVmesh.control_volumes, elevation, rain, fillH,
CFLtime, activelay, eroCk, force.rivQs, force.sealevel, input.perc_dep,
input.slp_cr, FVmesh.neighbours, verbose=False)
if rank == 0 and verbose:
print " - Get stream fluxes ", time.clock() - walltime
ed = np.sum(sedchange,axis=1)
elevation += ed
cumdiff += ed
# Compute marine sediment diffusion
if hillslope.CDriver > 0.:
walltime = time.clock()
# Initialise marine sediments diffusion array
it = 0
sumdep = np.sum(deposition,axis=1)
maxth = 0.1
diffstep = timestep
diffcoeff = hillslope.sedfluxmarine(force.sealevel, elevation, FVmesh.control_volumes)
# Perform river related sediment diffusion
while diffstep > 0. and it < 1000:
# Define maximum time step
maxstep = min(hillslope.CFLms,diffstep)
# Compute maximum marine fluxes and maximum timestep to avoid excessive diffusion erosion
diffmarine, mindt = flow.compute_marine_diffusion(elevation, sumdep, FVmesh.neighbours, FVmesh.vor_edges,
FVmesh.edge_length, diffcoeff, lGIDs, force.sealevel, maxth, maxstep)
diffmarine[flow.outsideIDs] = 0.
maxstep = min(mindt,maxstep)
# if maxstep < input.minDT:
# print 'WARNING: marine diffusion time step is smaller than minimum timestep:',maxstep
# print 'You will need to decrease your diffusion coefficient for criver'
# stop
# Update diffusion time step and total diffused thicknesses
diffstep -= maxstep
# Distribute rock based on their respective proportions in the deposited columns
if straTIN is not None:
# Compute multi-rock diffusion
sedpropflux, difftot = flow.compute_sediment_marine(elevation, deposition, sumdep,
diffcoeff*maxstep, FVmesh.neighbours, force.sealevel,
maxth, FVmesh.vor_edges, FVmesh.edge_length, lGIDs)
difftot[flow.outsideIDs] = 0.
sedpropflux[flow.outsideIDs,:] = 0.
# Update deposition for each rock type
deposition += sedpropflux
deposition[deposition<0] = 0.
# Update elevation, erosion/deposition
sumdep += difftot
elevation += difftot
cumdiff += difftot
else:
# Update elevation, erosion/deposition
sumdep += diffmarine*maxstep
elevation += diffmarine*maxstep
cumdiff += diffmarine*maxstep
it += 1
if rank == 0 and verbose:
print " - Get river sediment marine fluxes ", time.clock() - walltime
# Compute hillslope processes
dtype = 1
if straTIN is None:
dtype = 0
walltime = time.clock()
area = np.copy(FVmesh.control_volumes)
area[flow.outsideIDs2] = 0.
diffcoeff = hillslope.sedflux(force.sealevel, elevation, FVmesh.control_volumes)
diffcoeff[flow.outsideIDs2] = 0.
diff_flux = flow.compute_hillslope_diffusion(elevation, FVmesh.neighbours, FVmesh.vor_edges,
FVmesh.edge_length, lGIDs, dtype, hillslope.Sc)
diff_flux[flow.outsideIDs2] = 0.
cdiff = diffcoeff*diff_flux*timestep
if straTIN is None:
if input.btype == 'outlet':
cdiff[flow.insideIDs[0]] = 0.
# Update dataset
elevation[flow.insideIDs] += cdiff[flow.insideIDs]
cumdiff[flow.insideIDs] += cdiff[flow.insideIDs]
cumhill[flow.insideIDs] += cdiff[flow.insideIDs]
else:
straTIN.update_layers(erosion, deposition, elevation, verbose)
# Get the active layer thickness to erode using diffusion
maxlayh = -cdiff
maxlayh[maxlayh<1.] = 1.
straTIN.get_active_layer(maxlayh)
# Compute multi-rock diffusion
tdiff, erosion, deposition = flow.compute_sediment_hillslope(elevation, straTIN.alayR,
diffcoeff*timestep, FVmesh.neighbours, FVmesh.vor_edges,
maxlayh, FVmesh.edge_length, lGIDs)
if input.btype == 'outlet':
tdiff[flow.insideIDs[0],:] = 0.
# # Update dataset
elevation += tdiff
cumdiff += tdiff
cumhill += tdiff
# Update active layer
straTIN.update_layers(erosion, deposition, elevation, verbose)
if input.btype == 'slope':
elevation[:len(flow.parentIDs)] = elevation[flow.parentIDs]-0.1
elif input.btype == 'flat':
elevation[:len(flow.parentIDs)] = elevation[flow.parentIDs]
elif input.btype == 'wall':
elevation[:len(flow.parentIDs)] = elevation[flow.parentIDs]+100.
elif input.btype == 'outlet':
elevation[1:len(flow.parentIDs)] = elevation[flow.parentIDs[1:]]+100.
elif input.btype == 'wall1':
elevation[:len(flow.parentIDs)] = elevation[flow.parentIDs]-0.1
elevation[:recGrid.nx+1] = elevation[flow.parentIDs[:recGrid.nx+1]]+100.
if rank == 0 and verbose:
print " - Get hillslope fluxes ", time.clock() - walltime
# Update erodibility values
if input.erolays >= 0:
mapero.getErodibility(elevation-oldelev)
flow.erodibility = mapero.erodibility
if applyDisp:
elevation += disp * timestep
tNow += timestep
if rank == 0 and verbose:
print " - Flow computation ", time.clock() - flow_time
return tNow,elevation,cumdiff,cumhill
| gpl-3.0 |
numenta/NAB | nab/detectors/random_cut_forest/random_cut_forest.py | 1 | 16737 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Use this script to evaluate [Robust Random Cut Forest Based Anomaly Detection On
Streams][4] algorithm on NAB.
This script will create an [AWS Kinesis Analytics][2] application that
will use the [RANDOM_CUT_FOREST][3] function to detect anomalies on NAB data
files as they are streamed via AWS API ("boto3"). The application's output
stream with the anomaly scores will be stored in the "results" folder for
further processing using NAB standard tools ("run.py") to optimize, normalize
and score the results.
See [NAB Entry Points][1] "Option 2" for more information.
The following commands were used calculate NAB scores using this script:
```
# Create results folders
python scripts/create_new_detector.py --detector randomCutForest
# Create kinesis application
python nab/detectors/random_cut_forest/random_cut_forest.py --create
# Stream all NAB data
python nab/detectors/random_cut_forest/random_cut_forest.py --stream
# Clean up
python nab/detectors/random_cut_forest/random_cut_forest.py --delete
# Compute NAB scores
python run.py -d randomCutForest --optimize --score --normalize
```
[1]: https://github.com/numenta/NAB/wiki/NAB-Entry-Points
[2]: https://aws.amazon.com/kinesis/data-analytics/
[3]: https://docs.aws.amazon.com/kinesisanalytics/latest/sqlref/sqlrf-random-cut-forest.html
[4]: http://proceedings.mlr.press/v48/guha16.pdf
"""
import os
import sys
import time
import argparse
import boto3
import pandas
from nab.corpus import Corpus
from nab.labeler import CorpusLabel
SCRIPT_PATH = os.path.abspath(os.path.dirname(__file__))
DATA_PATH = os.path.normpath(os.path.join(SCRIPT_PATH, os.path.pardir,
os.path.pardir, os.path.pardir,
"data"))
RESULTS_PATH = os.path.normpath(os.path.join(SCRIPT_PATH, os.path.pardir,
os.path.pardir, os.path.pardir,
"results"))
LABELS_FILE = os.path.normpath(os.path.join(SCRIPT_PATH, os.path.pardir,
os.path.pardir, os.path.pardir,
"labels", "combined_windows.json"))
APPLICATION_SOURCE_FILE = os.path.join(SCRIPT_PATH, "random_cut_forest.sql")
ROLE_TRUST_POLICY_FILE = os.path.join(SCRIPT_PATH, "role_trust_policy.json")
ROLE_PERMISSION_POLICY_FILE = os.path.join(SCRIPT_PATH,
"role_permission_policy.json")
DETECTOR_NAME = "randomCutForest"
APPLICATION_NAME = "nab_rcf"
OUTPUT_STREAM_NAME = "nab_output"
INPUT_STREAM_NAME = "nab_input"
# Configure kinesis analytics application input stream schema. This schema
# should match the SQL stream definition. See "random_cut_forest.sql"
INPUT_SCHEMA = {
"RecordColumns": [{
"Name": "COL_TIMESTAMP",
"SqlType": "TIMESTAMP"
}, {
"Name": "COL_VALUE",
"SqlType": "DOUBLE"
}],
"RecordFormat": {
"MappingParameters": {
"CSVMappingParameters": {
"RecordColumnDelimiter": ",",
"RecordRowDelimiter": "\n"
}
},
"RecordFormatType": "CSV"
}
}
def createStreams():
"""
Creates AWS Kinesis input and output streams
:return: dictionary with the newly created stream ARNs
"""
kinesis = boto3.client("kinesis")
result = {}
streams = [INPUT_STREAM_NAME, OUTPUT_STREAM_NAME]
for name in streams:
kinesis.create_stream(StreamName=name, ShardCount=1)
# Wait until all streams are created
waiter = kinesis.get_waiter('stream_exists')
for name in streams:
waiter.wait(StreamName=name)
response = kinesis.describe_stream(StreamName=name)
result[name] = response["StreamDescription"]["StreamARN"]
return result
def deleteStreams():
"""
Deletes AWS Kinesis streams created by "createStreams"
"""
kinesis = boto3.client("kinesis")
streams = [INPUT_STREAM_NAME, OUTPUT_STREAM_NAME]
for name in streams:
try:
kinesis.delete_stream(StreamName=name)
except kinesis.exceptions.ResourceNotFoundException:
pass
# Wait until all streams are deleted
waiter = kinesis.get_waiter('stream_not_exists')
for name in streams:
waiter.wait(StreamName=name)
def createRole(inputStream, outputStream):
"""
Creates a new AWS IAM Role with access to the input and output AWS Kinesis
streams created by "createStreams".
See "role_permission_policy.json" and "role_trust_policy.json"
:param inputStream: Kinesis input stream ARN
:param outputStream: Kinesis output stream ARN
:return: The Role ARN
"""
iam = boto3.client("iam")
roleName = "kinesis-analytics-service-{0}-role".format(APPLICATION_NAME)
# Create new rolte and give it access to the input and output streams
trustPolicy = open(ROLE_TRUST_POLICY_FILE, "r").read()
response = iam.create_role(Path="/nab/", RoleName=roleName,
AssumeRolePolicyDocument=trustPolicy)
role = response["Role"]
permissionPolicy = open(ROLE_PERMISSION_POLICY_FILE, "r").read()
policyName = "kinesis-analytics-service-{0}-policy".format(APPLICATION_NAME)
iam.put_role_policy(RoleName=roleName, PolicyName=policyName,
PolicyDocument=permissionPolicy % {
"inputStream": inputStream,
"outputStream": outputStream})
# FIXME: Wait until IAM role policy update is propagated
time.sleep(20)
return role['Arn']
def deleteRole():
"""
Deletes the role created via "createRole" for the given application
"""
iam = boto3.client("iam")
roleName = "kinesis-analytics-service-{0}-role".format(APPLICATION_NAME)
policyName = "kinesis-analytics-service-{0}-policy".format(APPLICATION_NAME)
try:
iam.delete_role_policy(RoleName=roleName, PolicyName=policyName)
except iam.exceptions.NoSuchEntityException:
pass
try:
iam.delete_role(RoleName=roleName)
except iam.exceptions.NoSuchEntityException:
pass
def createApplication():
"""
Create a new AWS Kinesis Analytics Application used to provide anomaly
scores from NAB data files. See "random_cut_forest.sql"
"""
print("Creating kinesis streams")
streams = createStreams()
inputStream = streams[INPUT_STREAM_NAME]
outputStream = streams[OUTPUT_STREAM_NAME]
print("Creating IAM Role")
role = createRole(inputStream, outputStream)
print("Creating kinesis analytics application")
sourceCode = open(APPLICATION_SOURCE_FILE, "r").read()
kinesisAnalytics = boto3.client("kinesisanalytics")
kinesisAnalytics.create_application(
ApplicationName=APPLICATION_NAME,
ApplicationCode=sourceCode,
Inputs=[{
"NamePrefix": "SOURCE_SQL_STREAM",
"InputSchema": INPUT_SCHEMA,
"KinesisStreamsInput": {
"ResourceARN": inputStream,
"RoleARN": role
}
}],
Outputs=[{
"Name": "DESTINATION_SQL_STREAM",
"DestinationSchema": {
"RecordFormatType": "CSV"
},
"KinesisStreamsOutput": {
"ResourceARN": outputStream,
"RoleARN": role
}
}])
def startApplication():
"""
Starts the application created via "createApplication"
"""
kinesisAnalytics = boto3.client("kinesisanalytics")
response = kinesisAnalytics.describe_application(
ApplicationName=APPLICATION_NAME)
application = response["ApplicationDetail"]
inputId = application['InputDescriptions'][0]['InputId']
kinesisAnalytics.start_application(ApplicationName=APPLICATION_NAME,
InputConfigurations=[{
"Id": inputId,
"InputStartingPositionConfiguration": {
"InputStartingPosition": "NOW"
}
}])
# Wait until application starts running
response = kinesisAnalytics.describe_application(
ApplicationName=APPLICATION_NAME)
status = response["ApplicationDetail"]["ApplicationStatus"]
sys.stdout.write('Starting ')
while status != "RUNNING":
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(1)
response = kinesisAnalytics.describe_application(
ApplicationName=APPLICATION_NAME)
status = response["ApplicationDetail"]["ApplicationStatus"]
sys.stdout.write(os.linesep)
def stopApplication():
"""
Stops the application created via "createApplication"
"""
kinesisAnalytics = boto3.client("kinesisanalytics")
kinesisAnalytics.stop_application(ApplicationName=APPLICATION_NAME)
# Wait until application stops running
response = kinesisAnalytics.describe_application(
ApplicationName=APPLICATION_NAME)
status = response["ApplicationDetail"]["ApplicationStatus"]
sys.stdout.write('Stopping ')
while status != "READY":
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(1)
response = kinesisAnalytics.describe_application(
ApplicationName=APPLICATION_NAME)
status = response["ApplicationDetail"]["ApplicationStatus"]
sys.stdout.write(os.linesep)
def deleteApplication():
"""
Deletes the application created via "createApplication"
"""
print("Deleting IAM Role")
deleteRole()
print("Deleting kinesis streams")
deleteStreams()
print("Deleting kinesis analytics application")
kinesisAnalytics = boto3.client("kinesisanalytics")
try:
response = kinesisAnalytics.describe_application(
ApplicationName=APPLICATION_NAME)
kinesisAnalytics.delete_application(
ApplicationName=response["ApplicationDetail"]["ApplicationName"],
CreateTimestamp=response["ApplicationDetail"]["CreateTimestamp"])
except kinesisAnalytics.exceptions.ResourceNotFoundException:
pass
def streamFile(corpus, corpusLabel, resultsdir, name):
"""
Streams a single NAB data file to Kinesis Analytics Application saving the
results for further processing by NAB tools
:param corpus: NAB corpus created via "corpus = Corpus(datadir)"
:param corpusLabel: NAB corpus labels
:param resultsdir: Path to store the results. Make sure to run
'scripts/create_new_detector.py --detector randomCutForest'
first
:param name: NAB data file name (i.e. "realKnownCause/nyc_taxi.csv")
:return: The result file absolute path
"""
print("Streaming", name)
startApplication()
# Get latest position from the output stream before streaming new records
kinesis = boto3.client("kinesis")
response = kinesis.describe_stream(StreamName=OUTPUT_STREAM_NAME)
shardId = response["StreamDescription"]["Shards"][0]["ShardId"]
response = kinesis.get_shard_iterator(StreamName=OUTPUT_STREAM_NAME,
ShardId=shardId,
ShardIteratorType="LATEST")
shardIterator = response["ShardIterator"]
# Send NAB data as a single CSV file to the input stream
datafile = corpus.dataFiles[name]
total = datafile.data.shape[0]
kinesis.put_record(StreamName=INPUT_STREAM_NAME,
PartitionKey=name,
Data=datafile.data.to_csv(header=False, index=False))
# Make sure to read all records from output stream
rows = []
sys.stdout.write("\rProcessed 0/{} ".format(total))
sys.stdout.flush()
while len(rows) < total:
response = kinesis.get_records(ShardIterator=shardIterator)
records = response["Records"]
if len(records) > 0:
parsed_records = []
for rec in records:
parsed_record = str(rec["Data"], "utf-8")
parsed_record = parsed_record.strip('\n')
parsed_record = parsed_record.split(",")
parsed_records.append(parsed_record)
rows.extend(parsed_records)
shardIterator = response["NextShardIterator"]
sys.stdout.write("\rProcessed {}/{} ".format(len(rows), total))
sys.stdout.flush()
else:
# Back off until the application starts streaming the anomalies
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(1)
sys.stdout.write(os.linesep)
# Streaming results may arrive out of order
rows.sort()
results = pandas.DataFrame(rows, columns=["timestamp", "value",
"anomaly_score"])
# Add NAB corpus labels
results["label"] = corpusLabel.labels[name]["label"]
# Save results
relativeDir, fileName = os.path.split(name)
resultFile = os.path.join(resultsdir, DETECTOR_NAME, relativeDir,
"{}_{}".format(DETECTOR_NAME, fileName))
results.to_csv(resultFile, index=False)
# Stop application after every data file to reset the algorithm
stopApplication()
return resultFile
def streamAll(corpus, corpusLabel, resultsdir):
"""
Streams all files in the NAB corpus
:param corpus: NAB corpus created via "corpus = Corpus(dataDir)"
:param corpusLabel: NAB corpus labels
:param resultsdir: Path to store the results. Make sure to run
'scripts/create_new_detector.py --detector randomCutForest'
first
"""
for name in list(corpus.dataFiles.keys()):
streamFile(corpus, corpusLabel, resultsdir, name)
def main(args):
if args.create:
createApplication()
if args.start:
startApplication()
if args.stop:
stopApplication()
if args.file:
corpus = Corpus(args.data)
labels = CorpusLabel(path=args.labels, corpus=corpus)
streamFile(corpus, labels, args.results, args.file)
if args.stream:
corpus = Corpus(args.data)
labels = CorpusLabel(path=args.labels, corpus=corpus)
streamAll(corpus, labels, args.results)
if args.delete:
deleteApplication()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Use this script to evaluate 'Robust Random Cut Forest Based "
"Anomaly Detection On Streams' algorithm on NAB.",
epilog="Make sure to run 'scripts/create_new_detector.py --detector "
"randomCutForest' before using this script. See README.md for "
"details",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--data",
default=DATA_PATH,
help="Path to NAB data files.")
parser.add_argument("--labels",
default=LABELS_FILE,
help="JSON file containing ground truth labels for the "
"corpus.")
parser.add_argument("--results",
default=RESULTS_PATH,
help="Path to NAB results path.")
parser.add_argument("--create", "-c",
help="Create AWS Kinesis application",
default=False,
action="store_true")
parser.add_argument("--delete", "-d",
help="Delete AWS Kinesis application",
default=False,
action="store_true")
parser.add_argument("--start",
help="Start AWS Kinesis application",
default=False,
action="store_true")
parser.add_argument("--stop",
help="Stop AWS Kinesis application",
default=False,
action="store_true")
parser.add_argument("--stream", "-s",
default=False,
help="Stream all NAB data files to AWS Kinesis "
"application",
action="store_true")
parser.add_argument("--file", "-f",
help="Stream a single NAB data file name to AWS Kinesis "
"application")
args = parser.parse_args()
if args.create or args.start or args.file or args.stream or args.stop \
or args.delete:
main(args)
else:
parser.print_help()
| agpl-3.0 |
scizen9/kpy | SEDM/GUIhandle.py | 2 | 20745 | '''
SegmentationMap class wraps around several functions in SEDM package and
provides a convenient way to handle SEDM data.
'''
import NPK.Atmosphere as Atm
import NPK.PlotHelp as PH
import datetime
import numpy as np
import json
import os
import pyfits
import scipy.io
import matplotlib.pyplot as pl
from matplotlib.backend_bases import KeyEvent
from matplotlib.backend_bases import PickEvent
from scipy.interpolate import interp1d
import Disp
import SegMap
reload(SegMap)
from SegMap import SegmentationMap
import Extract
from NPK.Standards import Standards
reload(Extract)
reload(Disp)
def clean_header(header):
todel = ['SIMPLE', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2',
'NAXIS0', 'EXTEND', 'BZERO', 'BSCALE']
for d in todel:
try: del header[d]
except: pass
return header
def merge_headers(headers):
h = headers[0]
i = 1
for new in headers[1:]:
for key in new.keys():
if 'NAXIS' in key: continue
if key in h:
if h[key] != new[key]:
try: h[key + "_%2.2i" % i] = new[key]
except: pass
else:
try: h[key + "_%2.2i" % i] = new[key]
except: pass
return h
class PositionPicker(object):
'''Shows the IFU field and allows the user to select an object.
Takes:
A plan list that is composed of dictionary that includes:
infiles [N_spec]: String of paths to .mat files
name: String of object name
Optional:
outdir: path to write the results to, otherwise defaults to
oudri/[name]/[obsdate]/version#/....
shifts [N_spec]: Float of pixels to shift spectrum for flexure
'''
index = 0 # index into the file list
spx_ix = 0 # index into spectrum list
plan = None
subtract_sky = False
picked = None # Spectrum position pixed in IFU coordinates
olines = [372.7, 486.1, 500.7, 656.3]
tlines = [761.5, 589.0, 557.7, 435.8, 519.9, 630.0]
positions=None
qecurve = None
output_head = "/scr2/npk/sedm/reduced/"
show_calib = True
norm = None
def __init__(self,plan,positions=('OnSkyX','OnSkyY'),
qefunction=None, stdpaths=None, normpath=None):
print "Starting picker GUI"
self.plan = plan
self.positions=positions
self.stdpaths = stdpaths
self.load_stds()
if normpath is not None:
self.norm = np.load(normpath)
self.norm[self.norm < .5] = np.nan
self.norm[self.norm > 2] = np.nan
self.fig = pl.figure(1)
self.fig.canvas.mpl_connect("key_press_event", self)
self.fig.canvas.mpl_connect("pick_event", self)
self.fig.canvas.mpl_connect("button_press_event", self)
self.fig2 = pl.figure(2,figsize=(16,4.5))
self.fig2.canvas.mpl_connect("key_press_event", self)
self.fig2.canvas.mpl_connect("pick_event", self)
self.fig3 = pl.figure(3, figsize=(12,4))
self.fig3.canvas.mpl_connect("key_press_event", self)
self.fig3.canvas.mpl_connect("pick_event", self)
self.fig4 = pl.figure(4, figsize=(8,2.25))
self.fig4.canvas.mpl_connect("key_press_event", self)
self.fig4.canvas.mpl_connect("pick_event", self)
if qefunction is not None:
self.qecurve = qefunction
self.index = 0
self.check_plan()
self.load()
self.draw()
def check_plan(self):
'''Checks the plan to see if it makes sense and the files load'''
for el in self.plan:
print el['name']
for fname in el['infiles']:
if not os.path.exists(fname):
raise Exception("%s: file does not exist" % fname)
if 'object_diam' in el:
if (0 > el['object_diam'] < 300):
raise Exception("%s: not appropriate object diam" %
el['object_diam'])
if 'sky_annulus' in el:
if len(el['sky_annulus']) != 2:
raise Exception("%s: not appropriate sky annulus" %
el["sky_annulus"])
def dump(self):
"""Write status to file"""
plan = self.plan[self.index]
dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if self.status == 'standard':
outfile = os.path.join(self.outdir,
"STD-%s.json" % plan['name'])
else:
outfile = os.path.join(self.outdir,
"%s.json" % plan['name'])
print "Writing to: %s" % outfile
str = json.dumps({"outfile": outfile,
"infiles": plan['infiles'],
"object_diam": plan['object_diam'],
"sky_annulus": plan['sky_annulus'],
"status": self.status,
"picked": self.picked,
"pixel_shift": self.pixel_shift,
"when": dt}, indent=4)
try:
f=open(outfile, "w")
f.write(str)
f.close()
except Exception as e:
raise("Could not write %s: %s" % (outfile, e))
outfile = os.path.join(self.outdir,
"%s_all" % plan['name'])
np.savez(outfile, self.all_obj_spec)
outfile = os.path.join(self.outdir,
"%s_sky" % plan['name'])
np.savez(outfile, self.all_sky_spec)
result = np.array([self.sky_spec[0], self.sky_spec[1],
self.obj_spec[1]-self.sky_spec[1]])
self.RESULTS[self.spx_ix] = result
header = merge_headers(self.headers)
pf = pyfits.PrimaryHDU(result,header=header)
name = self.plan[self.index]['name']
pf.header["OBJECT"] = name
pf.header["SPEC"] = self.plan[self.index]['infiles'][self.spx_ix]
outpath = os.path.join(self.outdir, "%s_%i.fits" % (name,
self.spx_ix))
try: os.remove(outpath)
except: pass
pf.writeto(outpath)
self.draw_res()
def draw_res(self):
''' Draw the resulting spectrum'''
pl.figure(3)
pl.clf()
pl.xlim([350, 950])
pl.ylim(-1000,4000)
allwav = allsky = allobj = None
headers = []
for i in xrange(len(self.RESULTS)):
res = self.RESULTS[i]
if res is None: continue
header = self.headers[i]
header = clean_header(header)
headers.append(header)
airmass = header['airmass']
wave, sky, obj = res.copy()
skyf = interp1d(wave,sky,fill_value=np.nan,bounds_error=False)
objf = interp1d(wave,obj,fill_value=np.nan,bounds_error=False)
if self.qecurve is None: correction = 1.0
else:
print "Applying correction"
ext = 10**(-Atm.ext(wave*10)*airmass/2.5)
correction = 1/self.qecurve(wave*10)*ext
correction=1.
pl.step(wave,obj*correction)
if allwav is None:
allwav=wave[:]
allsky=sky[:]
allobj=obj[:]
else:
allsky += skyf(allwav)
allobj += objf(allwav)
if self.qecurve is None: correction = 1.0
else: correction = 1/self.qecurve(allwav*10)
pl.step(allwav, allobj*correction, linewidth=3)
#pl.step(allwav, allobj, linewidth=3)
# Raw data
result = np.array([allwav, allsky, allobj])
pf = pyfits.PrimaryHDU(result, header=header)
name = self.plan[self.index]['name']
pf.header["REDNAME"] = name
outpath = os.path.join(self.outdir, "%s.fits" % (name))
try: os.remove(outpath)
except: pass
pf.writeto(outpath)
# Corrected
if self.qecurve is not None:
result = np.array([allwav, allsky*correction, allobj*correction])
pf = pyfits.PrimaryHDU(result, header=header)
name = self.plan[self.index]['name']
pf.header["REDNAME"] = name
outpath = os.path.join(self.outdir, "%s_corr.fits" % (name))
try: os.remove(outpath)
except: pass
pf.writeto(outpath)
def load(self):
"""Load the Segmentation map"""
self.spx_ix = 0
pl.figure(1)
pl.clf()
if self.index < 0: self.index =0
if self.index >= len(self.plan):
self.index = len(self.plan)-1
print "REACHED THE END"
cur_plan = self.plan[self.index]
nfiles = len(cur_plan['infiles'])
self.SM = []
self.headers = []
for i in xrange(nfiles):
self.SM.append(SegmentationMap(cur_plan['infiles'][i],
positions=self.positions, norm=self.norm))
fits = cur_plan['infiles'][i].replace("shrunk_","").rstrip('_SI.mat')
FF = pyfits.open(fits)
self.headers.append(FF[0].header)
self.status = ["unknown"] * len(self.SM)
self.picked = [None] * len(self.SM)
self.pixel_shift = [0.0] * len(self.SM)
self.RESULTS = [None] * len(self.SM)
fn = cur_plan['infiles'][0]
fname = fn.split("/")[-1]
print fname
if "shrunk" in fname: fname = fname.replace("shrunk","")
if "_crr" in fname: fname = fname.replace("_crr","")
if "_s" in fname: fname = fname.replace("_s","")
if "_b" in fname: fname = fname.replace("_b","")
if "b_" in fname: fname = fname.replace("b_","")
if "ifu" in fname: fname = fname.replace("_ifu","")
fname = fname.rstrip(".fits_SI.mat")
print fname
fn = fname
y,mon,d = (fn[0:4], fn[4:6], fn[6:8])
h,min,s = (fn[9:11], fn[12:14], fn[15:17])
month = ["none","jan", "feb", "mar", "apr", "may", "jun", "jul", "aug",
"sep", "oct", "nov", "dec"][int(mon)]
outprefix = os.path.join(self.output_head,
cur_plan['name'],
'%s_%s_%s_%s_%s_%s' % (y, month, d, h, min, s))
try:
os.makedirs(outprefix)
except:
pass
for i in xrange(99):
outdir = os.path.join(outprefix, "v%2.2i" % i)
if not os.path.exists(outdir):
break
if len(os.listdir(outdir)) == 0:
break
if not os.path.exists(outdir):
os.makedirs(outdir)
self.outdir = outdir
print ("Loaded. Outdir: %s" % (self.outdir))
def create_spectrum(self):
plan = self.plan[self.index]
if self.picked[self.spx_ix] is None: return
self.SM[self.spx_ix].pixel_shift = self.pixel_shift[self.spx_ix]
X,Y = self.picked[self.spx_ix]
header = self.headers[self.spx_ix]
header = clean_header(header)
exptime = header['exptime']
if "object_diam" in plan: D = plan['object_diam']
else:
D = 2
plan['object_diam'] = D
header['extdiam'] = (D, 'Extraction diameter')
if "sky_annulus" in plan: s1,s2 = plan['sky_annulus']
else:
s1,s2 = 4,6
plan['sky_annulus'] = [s1,s2]
header['sky1'] = (s1, 'Sky inner annulus diameter')
header['sky2'] = (s2, 'Sky outer annulus diameter')
header['ext_pos'] = ("%s,%s" % (X,Y), 'Extraction position')
print "Extracting at: %s,%s: diam %s, sky %s/%s" % (X,Y,D,s1,s2)
sky_spec, all_sky_spec = self.SM[self.spx_ix].spectrum_in_annulus(
X,Y,small=s1,
large=s2)
wave = sky_spec["wave_nm"]
obj_spec, all_obj_spec = self.SM[self.spx_ix].spectrum_near_position(X,Y,
distance=D, onto=wave)
sky = [wave, sky_spec["spec_adu"]/len(all_sky_spec)/exptime*len(all_obj_spec)]
object = [wave, obj_spec["spec_adu"]/exptime]
self.sky_spec = sky
self.obj_spec = object
self.all_sky_spec = all_sky_spec
self.all_obj_spec = all_obj_spec
def draw_spectrum(self):
''' Draw the spectrum in figure(2)
'''
pl.figure(2)
pl.clf()
pl.xlim(350,920)
if self.picked[self.spx_ix] is None:
return
pl.xlabel("wavelength [nm]")
obj_spec = self.obj_spec[:]
sky_spec = self.sky_spec[:]
wave = obj_spec[0]
pl.step(obj_spec[0], obj_spec[1], linewidth=1)
pl.step(sky_spec[0], sky_spec[1], linewidth=1)
pl.step(sky_spec[0], obj_spec[1]-sky_spec[1], linewidth=2)
obj_spec[1] -= sky_spec[1]
correction = 1.0
if self.qecurve is not None:
correction = 1/self.qecurve(wave*10)
correction /= np.median(correction)
bad = (wave < 400) | (wave > 920) | (correction < 0)
correction[bad] =np.nan
if self.show_calib:
pl.step(wave, obj_spec[1]*correction)
pl.step(wave, sky_spec[1]*correction)
PH.transparent_legend(['o', 's', 'o-s', 'c x (o-s)', 'c x s'])
pl.xlim(350,920)
for line in self.olines:
pl.axvline(line)
for line in self.tlines:
pl.axvline(line, color='r')
def handle_shift(self, xdata, ydata):
if (xdata < 360) or (xdata > 1000): return
lines = np.concatenate((self.olines, self.tlines))
delts = (lines - xdata)
ix = np.argmin(np.abs(delts))
print "Closest to %f" % lines[ix]
line = lines[ix]
delt = delts[ix]
wave = self.sky_spec[0]
wix = np.nanargmin(np.abs(wave-line))
dw = wave[wix]-wave[wix-1]
print "Delt: {0}, dw: {1}".format(delt, dw)
self.pixel_shift[self.spx_ix] += delt/dw
print "pixel shift is: {0}".format(self.pixel_shift[self.spx_ix])
self.draw_spectrum()
def __call__(self, event):
'''Event call handler for Picker gui.'''
if event.name == 'button_press_event':
print event.xdata, event.ydata
self.picked[self.spx_ix] = (event.xdata, event.ydata)
self.create_spectrum()
self.draw_spectrum()
self.draw_selection_circle()
elif event.name == 'key_press_event':
if event.key == '\\':
print "Shifting"
self.handle_shift(event.xdata, event.ydata)
if event.key == '.':
self.next_spec()
if event.key == ',':
self.spx_ix -= 1
if self.spx_ix < 0:
self.spx_ix = 0
else:
self.draw()
self.draw_spectrum()
if event.key == 'c':
print "toggle show calib"
self.show_calib = not self.show_calib
self.draw_spectrum()
if event.key == 'n':
print "next"
self.index += 1
self.load()
self.draw()
if event.key == 'p':
print "prev"
self.index -= 1
self.load()
self.draw()
if event.key == '-':
self.subtract_sky = not self.subtract_sky
print "Substract sky: %s" % self.subtract_sky
self.draw()
if event.key == "u":
self.status[self.spx_ix] = "unsure"
self.dump()
self.draw()
if event.key == "b":
self.status[self.spx_ix] = "bad"
self.dump()
self.draw()
if event.key == "o":
self.status[self.spx_ix] = "ok"
self.dump()
self.next_spec()
if event.key == "s":
self.status[self.spx_ix] = "standard"
self.dump()
self.next_spec()
if event.key == 'h':
print """Help---
n - next
p - prev
- - subtract sky
u - unsure: there are targets visible, not sure which is correct.
b - bad: nothing visible
o - ok: target visible
"""
print event.key
def draw(self):
if self.subtract_sky:
sky = self.SM[self.spx_ix].sky_median()
sky_spec = sky['wave_nm'], sky['spec_adu']
else:
sky_spec = None
x,y,v = Extract.segmap_to_img(self.SM[self.spx_ix].SegMap,
sky_spec=sky_spec,
minl=500, maxl=700,positions=self.positions)
self.Xs = x
self.Ys = y
self.Values = v
self.draw_selection_circle()
def load_stds(self):
if self.stdpaths is None:
return
STDS = []
for stdpath in self.stdpaths:
try:
print stdpath
FF = pyfits.open(stdpath)
except:
print "Ignoring %s" % stdpath
continue
name = FF[0].header['OBJECT']
pl.figure(4)
pl.clf()
for std in Standards.keys():
if name.lower() in std.lower():
print name, std
ang = Standards[std][:,0]
stdflux = Standards[std][:,1] * 1e-16 # to erg/s/cm2/A
stdf = interp1d(ang, stdflux, bounds_error = False)
# stdf in ADU
obsl, obss, obsf = FF[0].data[0,:], FF[0].data[1,:], FF[0].data[2,:]
exptime = FF[0].header['EXPTIME']
std = (obsf - obss)/exptime
print "Std exptime: %s" % exptime
to_fit = std/stdf(obsl*10)
zero = (obsl> 990)
to_fit /= np.median(to_fit)
to_fit[zero] = 0.0
ok = (np.isfinite(to_fit)) & (obsl<1100) & (obsl>350)
poly= np.polyfit(obsl[ok]*10, to_fit[ok], 25)
qef = np.poly1d(poly)
print poly
pl.xlim([350, 950])
pl.plot(obsl[ok], to_fit[ok],'.')
pl.plot(obsl[ok], qef(obsl[ok]*10))
pl.xlim([350, 950])
def qe_clean_functor(qef):
def to_return(lam):
v = qef(lam)
v[lam<3750] = np.nan
v[lam>9500] = np.nan
return v
return to_return
self.qecurve = qe_clean_functor(qef)
def draw_selection_circle(self):
pl.figure(1, figsize=(9,8))
pl.ion()
pl.clf()
name = self.plan[self.index]['name']
if self.positions[0] == 'OnSkyX': diam = 0.5
else: diam = 1
pl.title("{0}:{1}. {2} of {3}".format(name,
self.status[self.spx_ix],
self.spx_ix,
len(self.SM)-1))
if self.subtract_sky:
vals = self.Values[:]
cut=25
vals[vals<-cut/4] = -cut/4
vals[vals>cut] = cut
else: vals = self.Values
v = vals[:]
#v[v<-100] = -100
#v[v>50000] = 50000
pl.scatter(self.Xs,
self.Ys,
c=vals,
s=40,
picker=diam,
marker='h')
pl.xlim(-100,2048+100)
pl.ylim(-100,2048+100)
pl.colorbar()
if self.picked[self.spx_ix] is not None:
X,Y = self.picked[self.spx_ix]
s1,s2 = self.plan[self.index]['sky_annulus']
print("Adding circle at {0}/{1}".format(X,Y))
obj_radius = self.plan[self.index]['object_diam']/2.0
obj_circle = pl.Circle((X,Y), radius=obj_radius, fill=False,
color='black',linewidth=2)
s1_circle = pl.Circle((X,Y), radius=s1/2.0, fill=False,
color='black',linewidth=2)
s2_circle = pl.Circle((X,Y), radius=s2/2.0, fill=False,
color='black',linewidth=2)
fig = pl.figure(1)
ax = fig.add_subplot(1,1,1)
ax.add_patch(obj_circle)
ax.add_patch(s1_circle)
ax.add_patch(s2_circle)
def next_spec(self):
self.spx_ix += 1
max_spx_ix = len(self.SM)
if self.spx_ix >= max_spx_ix:
self.spx_ix = max_spx_ix-1
else:
self.draw()
self.draw_spectrum()
self.draw_selection_circle()
| gpl-2.0 |
bmroach/Audio_Effects_Suite | Working-Directory/Flanger/mainFlanger.py | 2 | 2083 | """
Filename: mainFlanger.py
See README.md
Developed under the Apache License 2.0
"""
#______________________________________________________________________________
#Header Imports
import array
import contextlib
import wave
import matplotlib.pyplot as plt
import numpy as np
import math
import copy
import sys
sys.path.append('../Utilities')
import utilities as ut
import vocoder as vo
#______________________________________________________________________________
#Start mainFlanger.py
# Global parameters
dirIn = "../../Original-Audio-Samples/"
dirOut = "../../Output-Audio-Samples/Flanger/"
numChannels = 1 # mono
sampleWidth = 2 # in bytes, a 16-bit short
sampleRate = 44100
mulFactor = sampleRate * 10
#______________________________________________________________________________
def flanger(signal, delay=20):
"""delay in milliseconds"""
delaySamples = int(44.1*delay)
length = len(signal)
signal1 = [x*.5 for x in signal]
signal2 = copy.deepcopy(signal1)
outputSignal = []
for i in range(length):
if i < length-delaySamples:
outputSignal += [ (signal1[i]+ signal2[i+delaySamples]) ]
else:
outputSignal += [ (signal1[i] * 2) ]
outputSignal = np.array(outputSignal)
outputSignal = vo.vocoder(outputSignal,P=.5)
outputSignal = np.ndarray.tolist(outputSignal)
outputSignal = [int(x) for x in outputSignal]
return outputSignal
def flangerDemo():
jfk = ut.readWaveFile(dirIn+"jfk.wav")
jfkFlanger = flanger(jfk)
ut.writeWaveFile(dirOut + "JFK_Flanger.wav", jfkFlanger)
piano = ut.readWaveFile(dirIn+"piano.wav")
pianoFlanger = flanger(piano)
ut.writeWaveFile(dirOut + "Piano_Flanger.wav", pianoFlanger)
violin = ut.readWaveFile(dirIn+"Violin2.wav")
violinFlanger = flanger(violin)
ut.writeWaveFile(dirOut + "Violin_Flanger.wav", violinFlanger)
print("Flanger Demo Complete.")
flangerDemo()
| apache-2.0 |
gcvalderrama/Palantir | Classification/trainer.py | 1 | 11921 | """
This script process all the clean news, to determine if they are into 'policiales' or 'nonattack' category
"""
import os
import glob
import random
import pickle
import unicodedata
from nltk import NaiveBayesClassifier, FreqDist, classify
from nltk.corpus import stopwords, PlaintextCorpusReader
from nltk.classify.scikitlearn import SklearnClassifier
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from sklearn.pipeline import Pipeline
from string import punctuation
ADDITIONAL_STOPWORDS = ['Tags', 'MÁS', 'EN', '.+MÁS', '+Tags', '...', ',', '.', '[', ']', '"', '(',
')', '…', 'el', 'la', 'los', 'uno', 'una', '-', ':', '``', "''"]
ALL_STOPWORDS = set(stopwords.words('spanish') + ADDITIONAL_STOPWORDS)
def get_documents_words(news_files, corpus_news):
"""
Given a set of documents it will return the dictionary with their
respective categories
:param news_files: List of raw news file names
:param corpus_news: PlainTextCorpusReader object
:return: Dictionary with words and categories associated
"""
root = corpus_news.root
news = []
for file in news_files:
category = file.split('/')[-1].split('--')[0]
file_name = file.replace(root, '', 1)
words = corpus_news.words(file_name[1:])
news.append((list(words), category))
random.shuffle(news)
return news
def find_features(document, word_features):
"""
Given a string with a word_features as universe,
it will return their respective features
:param document: String to generate the features to
:param word_features: Universe of words
:return: Dictionary with the features for document string
"""
words = set(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
def vectorize_documents(root_folder, extension='txt'):
"""
In construction
:param root_folder:
:param extension:
:return:
"""
# pipeline = Pipeline([
# ('vectorizer', CountVectorizer()),
# ('tfidfTrans', TfidfVectorizer()),
# ('sgdclf', SGDClassifier(loss='modied_huber'))
# ])
#
# params = { 'vectorizer__max_df': (0.5, 1.0),
# 'vectorizer__ngrams_range': {(1, 1), (1, 2)},
# 'tfidfTrans__use_idf': (True, False),
# 'sgdclf__alpha': (0.0001, 0.00001, 0.000001)
# }
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5)
news_files = glob.glob(root_folder + "/*." + extension)
news_content = []
for file in news_files:
with open(file, 'r') as news_file:
content = news_file.read()
news_content.append(content)
features_train = news_content[:3]
features_test = news_content[150:]
fit = vectorizer.fit(features_train)
features_train_transformed = vectorizer.fit_transform(features_train)
features_test_transformed = vectorizer.transform(features_test)
# print(fit)
for w in features_train:
print(w)
# print(features_train)
print('TRAINED FEATURES: \n', features_train_transformed[0])
# print('TESTED FEATURES: \n', features_test_transformed[0])
def train_classifier(root_folder, train_folder, devtest_folder, files_extension='txt'):
"""
Generates .pickle file with trained classifier and words universe
:param root_folder: Folder that contains train and devtest folders
:param train_folder: Folder inside root_folder that contains the news to train with
:param devtest_folder: Folder inside root_folder that contains the new to test with
:param files_extension: File extension to search for
:return: void - empty
"""
train_news_files = glob.glob(root_folder + "/" + train_folder + "/*." + files_extension)
devtest_news_files = glob.glob(root_folder + "/" + devtest_folder + "/*." + files_extension)
corpus_news = PlaintextCorpusReader(root_folder, '.*\.' + files_extension)
words_train_docs = get_documents_words(train_news_files, corpus_news)
words_devtest_docs = get_documents_words(devtest_news_files, corpus_news)
all_words = FreqDist(word.lower() for word in corpus_news.words())
word_features = list(all_words.keys())
with open('word_features.pickle', 'wb') as words_saver:
pickle.dump(word_features, words_saver)
training_set = [(find_features(news, word_features), category) for (news, category) in words_train_docs]
with open('training_set.pickle', 'wb') as training_writer:
pickle.dump(training_set, training_writer)
testing_set = [(find_features(news, word_features), category) for (news, category) in words_devtest_docs]
with open('devtesting_set.pickle', 'wb') as devtesting_writer:
pickle.dump(testing_set, devtesting_writer)
classifier = NaiveBayesClassifier.train(training_set)
accuracy = classify.accuracy(classifier, testing_set)
print('Naive Bayes accuracy percent: ', (accuracy * 100))
classifier.show_most_informative_features(20)
# saves classifier progress to a pickle file
with open('naives_classifier.pickle', 'wb') as save_classifier:
pickle.dump(classifier, save_classifier)
def compare_classifiers_accuracy():
with open('naives_classifier.pickle', 'rb') as read_classifier:
naive_bayes_classifier = pickle.load(read_classifier)
with open('training_set.pickle', 'rb') as training_reader:
training_set = pickle.load(training_reader)
with open('devtesting_set.pickle', 'rb') as devtesting_reader:
devtesting_set = pickle.load(devtesting_reader)
accuracy = classify.accuracy(naive_bayes_classifier, devtesting_set)
print('Naive Bayes accuracy percent: ', (accuracy * 100))
mnb_classifier = SklearnClassifier(MultinomialNB())
mnb_classifier.train(training_set)
mnb_accuracy = classify.accuracy(mnb_classifier, devtesting_set)
print('MNB accuracy percent: ', (mnb_accuracy * 100))
bernoullinb_classifier = SklearnClassifier(BernoulliNB())
bernoullinb_classifier.train(training_set)
bernoullinb_accuracy = classify.accuracy(bernoullinb_classifier, devtesting_set)
print('BernoulliNB accuracy percent: ', (bernoullinb_accuracy * 100))
# gaussiannb_classifier = SklearnClassifier(GaussianNB())
# gaussiannb_classifier.train(training_set)
# gaussiannb_accuracy = classify.accuracy(gaussiannb_classifier, devtesting_set)
# print('GaussianNB accuracy percent: ', (gaussiannb_accuracy * 100))
logisticregression_classifier = SklearnClassifier(LogisticRegression())
logisticregression_classifier.train(training_set)
logisticregression_accuracy = classify.accuracy(logisticregression_classifier, devtesting_set)
print('LogisticRegression accuracy percent: ', (logisticregression_accuracy * 100))
sgdclassifier_classifier = SklearnClassifier(SGDClassifier())
sgdclassifier_classifier.train(training_set)
sgdclassifier_accuracy = classify.accuracy(sgdclassifier_classifier, devtesting_set)
print('SGDClassifier accuracy percent: ', (sgdclassifier_accuracy * 100))
svc_classifier = SklearnClassifier(SVC())
svc_classifier.train(training_set)
svc_accuracy = classify.accuracy(svc_classifier, devtesting_set)
print('SVC accuracy percent: ', (svc_accuracy * 100))
linearsvc_classifier = SklearnClassifier(LinearSVC())
linearsvc_classifier.train(training_set)
linearsvc_accuracy = classify.accuracy(linearsvc_classifier, devtesting_set)
print('LinearSVC accuracy percent: ', (linearsvc_accuracy * 100))
nusvc_classifier = SklearnClassifier(NuSVC())
nusvc_classifier.train(training_set)
nusvc_accuracy = classify.accuracy(nusvc_classifier, devtesting_set)
print('NuSVC accuracy percent: ', (nusvc_accuracy * 100))
def classify_document(file_name):
"""
Load words and naives classifier from pickle and recognize a file
:param file_name: File name for clean text
:return: Category obtained from text sent
"""
# load the pickle file with the classifier progress
with open('naives_classifier.pickle', 'rb') as read_classifier:
naive_bayes_classifier = pickle.load(read_classifier)
with open('word_features.pickle', 'rb') as words_reader:
word_features = pickle.load(words_reader)
with open(file_name, 'r') as file_text:
text = file_text.read()
text_feature = find_features(text, word_features)
result = naive_bayes_classifier.classify(text_feature)
return result
def clean_tokenize(doc):
"""
Clean document, removing accents, punctuation and symbols
:param doc: string to clean
:return: string cleaned without punctuation and stop words
"""
doc = doc.replace('\n', ' ').replace('\r', '').replace('”', '').replace('“', '')
nfkd_form = unicodedata.normalize('NFKD', doc)
unicode_doc = u"".join([c for c in nfkd_form if not unicodedata.combining(c)]).lower()
clean_doc = unicode_doc.translate(punctuation)
words = word_tokenize(clean_doc)
clean = []
for word in words:
if word not in ALL_STOPWORDS:
clean.append(word)
return clean
def tokenize_files(source_folder, destination_folder):
"""
Search for all the txt files in source folder and clean them
:param source_folder: Source folder with news to clean
:param destination_folder: Destination folder where news will be created
:return: void - Generates all the destination files with clean text
"""
news = glob.glob(source_folder + "/*.txt")
for news_file in news:
file_name = news_file.split('/')[1]
with open(news_file, 'r') as original:
doc_text = original.read()
tokenize_cont = clean_tokenize(doc_text)
with open(destination_folder+"/"+file_name, 'w') as modified:
modified.write(' '.join(tokenize_cont))
def docs_tfidf(file, max_features=5000, ngram_range=(1, 1), max_df=.8):
"""
:param file:
:param max_features:
:param ngram_range:
:param max_df:
:return:
"""
vec = TfidfVectorizer(input=file,
max_features=max_features,
ngram_range=ngram_range,
max_df=max_df)
return vec
# X = vec.fit_transform(clean_articles)
# return X, vec
def rename_files(source_folder, extension='txt'):
"""
:param source_folder:
:param extension:
:return:
"""
news = glob.glob(source_folder + "/*." + extension)
for news_file in news:
if news_file.startswith(source_folder + "/policiales--"):
updated_name = source_folder + '/attack--' + news_file.split('/')[-1].split('--')[-1]
os.rename(news_file, updated_name)
elif not news_file.startswith(source_folder + "/attack--"):
updated_name = source_folder + '/nonattack--' + news_file.split('/')[-1].split('--')[-1]
os.rename(news_file, updated_name)
def remove_first_line(source_folder, extension):
"""
:param source_folder:
:param extension:
:return:
"""
files = glob.glob(source_folder + '/*.' + extension)
for file in files:
with open(file, 'r') as fin:
data = fin.read().splitlines(True)
with open(file, 'w') as fout:
fout.writelines(data[1:])
# remove_first_line('cleanNews', 'txt')
# tokenize_files('cleanNews', 'corporaNews')
# train_classifier('corpusnews', 'train', 'devtest')
compare_classifiers_accuracy()
# classify_document('corporaNews/nonattack--india-espera-compartir-experiencias-gobierno-ppk-noticia-1910298.txt')
# vectorize_documents('corporaNews','txt')
| bsd-2-clause |
iGEM-QSF/SimCircus | visualize.py | 1 | 7330 | #!/usr/bin/env python
import matplotlib
matplotlib.use('TkAgg')
#from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, \
NavigationToolbar2TkAgg
#from matplotlib import pyplot as plt
from matplotlib.figure import Figure
import sys
if sys.version_info[0] < 3:
import Tkinter as Tk
else:
import tkinter as Tk
class Visualization(object):
"""
Visualization is a class for visualizing any kind of time series data in
real time.
@requires:
simulation, object
@attrs
simulation.data, dictionary
Includes all the parameter names as keys and the time series
(list of values) as the values
simulation.timesteps
List of timesteps (float or integer),
same dimension as every paramter
simulation.ib, float
Blue light intensity parameter with value between 0.0 and 1.0
start()
Initializes the GUI, required to visualize the simulation
update()
Updates the data in the graph
start() has to be run before calling this function
Example 1:
vis = Visualization(simulation)
vis.start()
for i in range(1000):
simulation.iteration()
vis.update()
The previous example updates the simulation data in each timestep
Example 2:
import random
import threading
class FakeSimulation(threading.Thread):
def __init__(self):
#Threading actions, non threaded example may be feasible
#now that a new visualization version was made.
threading.Thread.__init__(self)
self.data = {
"Parameter 1": [],
"Parameter 2": []
}
self.timesteps = []
self.vis = Visualization(self)
self.vis.start()
def run(self):
for i in range(100):
print i
self.data["Parameter 1"].append(random.randint(1, 15))
self.data["Parameter 2"].append(random.randint(1, 15))
self.timesteps.append(i)
self.vis.update()
sim = FakeSimulation()
sim.start() #May not be necessary
The example 2 is a ready copy-and-paste demonstration of how this works.
"""
def __init__(self, simulation):
#Initialize drawing parameters
self.simulation = simulation
self.legend = simulation.data.keys()
self.toggle_list = [1 for i in simulation.data.keys()]
self.colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'm:', 'b:']
self.graph = 0
def start(self):
#Tkinter related actions
self.root = Tk.Tk()
self.root.wm_title('SimCircusVisualizer 3000 Ultra+ by Aalto-Helsinki')
button = Tk.Button(master=self.root, text='Quit', command=self._quit)
button.pack(side=Tk.BOTTOM)
# Creating the right sidebar
self.button_frame = Tk.Frame(self.root)
buttons = []
for ind, parameter in enumerate(self.simulation.data.keys()):
temp = Tk.Button(
master=self.button_frame,
text=parameter,
command=lambda ind=ind: self.toggle(ind))
temp.pack(side=Tk.TOP)
buttons.append(temp)
self.blue_intensity = Tk.DoubleVar()
scale_blue = Tk.Scale(
self.button_frame,
variable=self.blue_intensity,
label="Blue intensity",
from_=1.0,
to=0.0,
resolution=0.01,
command=self.set_blue_intensity)
scale_blue.pack(side=Tk.BOTTOM)
self.red_intensity = Tk.DoubleVar()
scale_red = Tk.Scale(
self.button_frame,
variable=self.red_intensity,
label="Red intensity",
from_=1.0,
to=0.0,
resolution=0.01,
command=self.set_red_intensity)
scale_red.pack(side=Tk.BOTTOM)
toggle = Tk.Button(
master=self.button_frame,
text="Graph",
command=self.toggle_graph)
toggle.pack(side=Tk.TOP)
self.button_frame.pack(side=Tk.RIGHT)
# DrawingArea
self.figure = Figure(figsize=(10, 8), dpi=100)
self.canvas = FigureCanvasTkAgg(self.figure, master=self.root)
self.canvas.show()
self.canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self.root)
self.toolbar.update()
self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self.root.after(0, self.simulation.start)
self.root.mainloop()
def toggle_graph(self):
self.graph += 1
if self.graph == 3:
self.graph = 0
def update(self):
'''
Visualizes the current situation
'''
active_legend = [legend for ind, legend in enumerate(self.legend)
if self.toggle_list[ind]]
self.figure.clf()
if self.graph in [0, 2]:
subplot = 111
if self.graph == 2:
subplot = 211
a = self.figure.add_subplot(
subplot,
xlabel="Time",
ylabel="Protein concentration")
for ind, parameter in enumerate(self.simulation.data.keys()):
if self.toggle_list[ind]:
a.plot(
self.simulation.timesteps,
self.simulation.data.get(parameter),
self.colors[ind])
a.set_xlim(0, max(self.simulation.timesteps))
if self.graph in [1, 2]:
subplot = 111
if self.graph == 2:
subplot = 212
a = self.figure.add_subplot(
subplot,
xlabel="Time",
ylabel="Protein concentration")
for ind, parameter in enumerate(self.simulation.data.keys()):
if self.toggle_list[ind]:
a.plot(
self.simulation.timesteps[-50:],
self.simulation.data.get(parameter)[-50:],
self.colors[ind])
a.set_xlim(
max([0, self.simulation.timesteps[max(
[0, len(self.simulation.timesteps) - 50]
)
]]),
max(self.simulation.timesteps)
)
a.legend(active_legend, "upper left")
self.canvas.show()
self.toolbar.update()
def set_blue_intensity(self, current_value):
self.simulation.blue_intensity = float(current_value)
def set_red_intensity(self, current_value):
self.simulation.red_intensity = float(current_value)
def toggle(self, ind):
if self.toggle_list[ind]:
self.toggle_list[ind] = 0
else:
self.toggle_list[ind] = 1
self.update()
def _quit(self):
self.root.quit() # stops mainloop
self.root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
| mit |
deepchem/deepchem | examples/kaggle/KAGGLE_rf_model.py | 6 | 3245 | """
Script that trains RF model on KAGGLE datasets.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import tempfile
import shutil
import deepchem as dc
from sklearn.ensemble import RandomForestRegressor
from deepchem.molnet import load_kaggle
###Load data###
np.random.seed(123)
shard_size = 2000
num_trials = 5
print("About to load KAGGLE data.")
KAGGLE_tasks, datasets, transformers = load_kaggle(shard_size=shard_size)
train_dataset, valid_dataset, test_dataset = datasets
print("Number of compounds in train set")
print(len(train_dataset))
print("Number of compounds in validation set")
print(len(valid_dataset))
print("Number of compounds in test set")
print(len(test_dataset))
num_features = train_dataset.get_data_shape()[0]
print("Num features: %d" % num_features)
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, task_averager=np.mean)
def task_model_builder(model_dir):
sklearn_model = RandomForestRegressor(
#n_estimators=100, max_features=int(num_features/3),
n_estimators=1,
max_features=int(num_features / 3),
min_samples_split=5,
n_jobs=-1)
return dc.models.SklearnModel(sklearn_model, model_dir)
all_results = []
for trial in range(num_trials):
print("Starting trial %d" % trial)
model = dc.models.SingletaskToMultitask(KAGGLE_tasks, task_model_builder)
print("Training model")
model.fit(train_dataset)
print("Evaluating models")
train_score, train_task_scores = model.evaluate(
train_dataset, [metric], transformers, per_task_metrics=True)
valid_score, valid_task_scores = model.evaluate(
valid_dataset, [metric], transformers, per_task_metrics=True)
test_score, test_task_scores = model.evaluate(
test_dataset, [metric], transformers, per_task_metrics=True)
all_results.append((train_score, train_task_scores, valid_score,
valid_task_scores, test_score, test_task_scores))
print("----------------------------------------------------------------")
print("Scores for trial %d" % trial)
print("----------------------------------------------------------------")
print("train_task_scores")
print(train_task_scores)
print("Mean Train score")
print(train_score)
print("valid_task_scores")
print(valid_task_scores)
print("Mean Validation score")
print(valid_score)
print("test_task_scores")
print(test_task_scores)
print("Mean Test score")
print(test_score)
print("####################################################################")
for trial in range(num_trials):
(train_score, train_task_scores, valid_score, valid_task_scores, test_score,
test_task_scores) = all_results[trial]
print("----------------------------------------------------------------")
print("Scores for trial %d" % trial)
print("----------------------------------------------------------------")
print("train_task_scores")
print(train_task_scores)
print("Mean Train score")
print(train_score)
print("valid_task_scores")
print(valid_task_scores)
print("Mean Validation score")
print(valid_score)
print("test_task_scores")
print(test_task_scores)
print("Mean Test score")
print(test_score)
| mit |
toastedcornflakes/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 33 | 10515 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature elimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
def test_rfe_cv_n_jobs():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
rfecv = RFECV(estimator=SVC(kernel='linear'))
rfecv.fit(X, y)
rfecv_ranking = rfecv.ranking_
rfecv_grid_scores = rfecv.grid_scores_
rfecv.set_params(n_jobs=2)
rfecv.fit(X, y)
assert_array_almost_equal(rfecv.ranking_, rfecv_ranking)
assert_array_almost_equal(rfecv.grid_scores_, rfecv_grid_scores)
| bsd-3-clause |
losonczylab/Zaremba_NatNeurosci_2017 | losonczy_analysis_bundle/lab/figures/analysisFigures.py | 1 | 99631 | """Figure generating functions to accompany behavior_analysis,
used by automatic scripts
All functions should return either a figure or list of figures.
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import itertools as it
from scipy.misc import comb
try:
from bottleneck import nanmean, nanstd
except ImportError:
from numpy import nanmean, nanstd
from warnings import warn
import pandas as pd
import seaborn.apionly as sns
from collections import defaultdict
from copy import copy
import lab
from ..analysis import imaging_analysis as ia
from ..analysis import signals_analysis as sa
from ..analysis import reward_analysis as ra
from ..analysis import intervals as inter
from ..analysis import filters as af
from ..classes.classes import ExperimentGroup as eg
from .. import plotting
from .. import misc
from ..plotting import plot_metric, plot_paired_metrics, color_cycle
import lab.plotting.analysis_plotting as ap
def activityByExposureFigure(exptGrp, rasterized=False, **kwargs):
fig, axs = plt.subplots(2, 3, figsize=(15, 8))
ap.activityByExposure(
exptGrp, ax=axs[0][0], stat='mean', rasterized=rasterized, **kwargs)
ap.activityByExposure(
exptGrp, ax=axs[0][1], stat='responseMagnitude',
rasterized=rasterized, **kwargs)
ap.activityByExposure(
exptGrp, ax=axs[0][2], stat='norm transient auc2',
rasterized=rasterized, **kwargs)
ap.activityByExposure(
exptGrp, ax=axs[1][0], stat='amplitude', rasterized=rasterized,
**kwargs)
ap.activityByExposure(
exptGrp, ax=axs[1][1], stat='duration', rasterized=rasterized,
**kwargs)
ap.activityByExposure(
exptGrp, ax=axs[1][2], stat='frequency', rasterized=rasterized,
**kwargs)
return fig
def activityComparisonFigure(exptGrp, method='mean', rasterized=False):
nCols = 4
exposure = exptGrp.priorDaysOfExposure(ignoreContext=False)
pairs = it.combinations(exptGrp, 2)
nPairs = 0
valid_pairs = []
# A valid experiment pair is from the same mouse and either the same
# context or the same day of exposure
for pair in pairs:
if (pair[0].parent == pair[1].parent) \
and (pair[0].sameContext(pair[1]) or
(exposure[pair[0]] == exposure[pair[1]])):
valid_pairs.append(pair)
nPairs += 1
nFigs = int(np.ceil(nPairs / float(nCols)))
figs = []
axs = []
for f in range(nFigs):
fig, ax = plt.subplots(2, nCols, figsize=(15, 8), squeeze=False)
ax_pairs = [(ax[0][x], ax[1][x]) for x in range(nCols)]
axs.extend(ax_pairs)
figs.append(fig)
n_extras = (nFigs * nCols) - nPairs
if n_extras > 0:
for a in axs[-n_extras:]:
a[0].set_visible(False)
a[1].set_visible(False)
for pair, ax in it.izip(valid_pairs, axs):
grp = lab.ExperimentGroup(pair)
label1 = 'Day {}, Ctx {}'.format(
exposure[grp[0]] + 1, grp[0].get('environment'))
label2 = 'Day {}, Ctx {}'.format(
exposure[grp[1]] + 1, grp[1].get('environment'))
ap.activityComparisonPlot(
grp, method=method, ax=ax[0], mask1=None, mask2=None, label1=label1,
label2=label2, roiNamesToLabel=None, normalize=False,
rasterized=rasterized, dF='from_file')
grp2 = lab.ExperimentGroup(pair[::-1])
ap.activityComparisonPlot(
grp2, method=method, ax=ax[1], mask1=None, mask2=None, label1=label2,
label2=label1, roiNamesToLabel=None, normalize=False,
rasterized=rasterized, dF='from_file')
return figs
def salience_responses_figures(
exptGrp, stimuli, pre_time=None, post_time=None, channel='Ch2',
label=None, roi_filter=None, exclude_running=False, rasterized=False):
"""Plot each ROI's response to each stim in stimuli"""
if exclude_running:
stimuli = [stim for stim in stimuli if 'running' not in stim]
# Stims labeled 'off' just flip the tail of the responsive distribution
# but are actually the same PSTH as the 'on' version
# No need to plot both
stimuli = [stim for stim in stimuli if 'off' not in stim]
if not len(stimuli):
warn("No stimuli to analyze, aborting.")
return []
cmap = matplotlib.cm.get_cmap(name='Spectral')
color_cycle = [cmap(i) for i in np.linspace(0, 0.9, len(stimuli))]
psths = []
for stim in stimuli:
psth, rois, x_ranges = ia.PSTH(
exptGrp, stimulus=stim, channel=channel, label=label, roi_filter=roi_filter,
pre_time=pre_time, post_time=post_time,
exclude='running' if exclude_running else None)
psths.append(psth)
figs, axs, axs_to_label = plotting.layout_subplots(
n_plots=len(psths[0]) + 1, rows=3, cols=4, polar=False,
sharex=False, figsize=(15, 8), rasterized=rasterized)
for fig in figs:
fig.suptitle('Salience Responses: {}'.format(
'running excluded' if exclude_running else 'running included'))
for psth, color, stim in it.izip(psths, color_cycle, stimuli):
for ax, roi_psth, roi, x_range in it.izip(axs, psth, rois, x_ranges):
ax.plot(x_range, roi_psth, color=color)
ax.set_title(roi[0].get('mouseID') + ', ' + roi[1] + ', ' + roi[2])
ax.axvline(0, linestyle='dashed', color='k')
ax.set_xlim(x_range[0], x_range[-1])
ylims = np.round(ax.get_ylim(), 2)
if ylims[1] != 0:
ax.set_yticks([0, ylims[1]])
elif ylims[0] != 0:
ax.set_yticks([ylims[0], 0])
else:
ax.set_yticks([0])
if ax not in axs_to_label:
ax.tick_params(labelbottom=False)
# Last axis will just be for labels
axs[-1].plot([0, 1],
[-color_cycle.index(color), -color_cycle.index(color)],
color=color, label=stim)
axs[-1].set_xlim(0, 1)
axs[-1].set_ylim(-len(stimuli), 1)
axs[-1].tick_params(labelbottom=False, labelleft=False, bottom=False,
left=False, top=False, right=False)
axs[-1].legend()
for ax in axs_to_label:
ax.set_ylabel(r'Average $\Delta$F/F')
ax.set_xlabel('Time (s)')
return figs
def salience_expt_summary_figure(
expt, stimuli, method='responsiveness', pre_time=None, post_time=None,
channel='Ch2', label=None, roi_filter=None, exclude_running=False,
rasterized=False, n_processes=1):
"""Summary of salience responses.
Includes trialAverageHeatmap, psth of responsive ROIs and image overlay of
responsive ROIs.
"""
fig, axs = plt.subplots(3, len(stimuli), figsize=(15, 8), squeeze=False,
subplot_kw={'rasterized': rasterized})
fig.suptitle('Salience Experiment Summary: {}'.format(
'running excluded' if exclude_running else 'running included'))
frame_period = expt.frame_period()
pre_frames = None if pre_time is None else int(pre_time / frame_period)
post_frames = None if post_time is None else int(post_time / frame_period)
for stim_idx, stim in enumerate(stimuli):
expt.trialAverageHeatmap(
stimulus=stim, ax=axs[0, stim_idx], sort=False, smoothing=None,
window_length=5, channel=channel, label=label,
roi_filter=roi_filter, exclude_running=exclude_running)
axs[0, stim_idx].set_title(stim)
responsive_filter = af.identify_stim_responsive_cells(
expt, stimulus=stim, method=method, pre_frames=pre_frames,
post_frames=post_frames, data=None, ax=axs[1, stim_idx],
conf_level=95, sig_tail='upper', transients_conf_level=95,
plot_mean=True, exclude='running' if exclude_running else None,
channel=channel, label=label, roi_filter=roi_filter,
n_bootstraps=10000, save_to_expt=True, n_processes=n_processes)
rois = expt.roiVertices(
channel=channel, label=label, roi_filter=responsive_filter)
plotting.roiDataImageOverlay(
ax=axs[2, stim_idx],
background=expt.returnFinalPrototype(channel=channel),
rois=rois, values=None, vmin=0, vmax=.8)
return fig
def salience_exptGrp_summary_figure(
exptGrp, stimuli, method='responsiveness', pre_time=None,
post_time=None, channel='Ch2', label=None, roi_filter=None,
exclude_running=False, rasterized=False, save_data=False,
n_processes=1, n_bootstraps=10000):
STIMS_PER_FIG = 6
data_to_save = {}
if exclude_running:
stimuli = [stim for stim in stimuli if 'running' not in stim]
if not len(stimuli):
warn("No stimuli to analyze, aborting.")
return []
n_figs = int(np.ceil(len(stimuli) / float(STIMS_PER_FIG)))
figs, psth_axs, response_axs, fraction_axs, first_col_axs = \
[], [], [], [], []
for n in range(n_figs):
fig, axs = plt.subplots(
3, STIMS_PER_FIG, figsize=(15, 8), squeeze=False,
subplot_kw={'rasterized': rasterized})
fig.suptitle('Responsive ROIs summary: {}'.format(
'running excluded' if exclude_running else 'running included'))
figs.append(fig)
psth_axs.append(axs[0, :])
response_axs.append(axs[1, :])
fraction_axs.append(axs[2, :])
first_col_axs.append(axs[:, 0])
psth_axs = np.hstack(psth_axs)
response_axs = np.hstack(response_axs)
fraction_axs = np.hstack(fraction_axs)
first_col_axs = np.hstack(first_col_axs)
min_psth_y_lim = np.inf
max_psth_y_lim = -np.inf
responsive_cells = {}
for ax, stimulus in it.izip(psth_axs, stimuli):
responsive_cells[stimulus] = ia.identify_stim_responsive_cells(
exptGrp, stimulus=stimulus, method=method, ax=ax, pre_time=pre_time,
post_time=post_time, data=None, conf_level=95, sig_tail='upper',
plot_mean=True, exclude='running' if exclude_running else None,
channel=channel, label=label, roi_filter=roi_filter,
n_bootstraps=n_bootstraps, save_to_expt=True,
n_processes=n_processes)
ax.set_title(stimulus)
min_psth_y_lim = np.amin([min_psth_y_lim, ax.get_ylim()[0]])
max_psth_y_lim = np.amax([max_psth_y_lim, ax.get_ylim()[1]])
max_bar_y_lim = 0
n_responsive_rois = {}
data_to_save['responsive_responses'] = []
data_to_save['non_responsive_responses'] = []
for ax, stimulus in it.izip(response_axs, stimuli):
responses = ia.response_magnitudes(
exptGrp, stimulus, method=method, pre_time=pre_time, post_time=post_time,
data=None, exclude='running' if exclude_running else None,
channel=channel, label=label,
roi_filter=responsive_cells[stimulus])
data_to_save['responsive_responses'].append(
[stimulus] + ['{:f}'.format(val) for val in responses])
plotting.scatter_bar(
ax, [np.abs(responses)], labels=[''], jitter_x=True)
max_bar_y_lim = np.amax([max_bar_y_lim, ax.get_ylim()[1]])
ax.tick_params(bottom=False, labelbottom=False)
n_responsive_rois[stimulus] = len(responses)
non_responses = ia.response_magnitudes(
exptGrp, stimulus, method=method, pre_time=pre_time, post_time=post_time,
data=None, exclude='running' if exclude_running else None,
channel=channel, label=label,
roi_filter=misc.invert_filter(responsive_cells[stimulus]))
data_to_save['non_responsive_responses'].append(
[stimulus] + ['{:f}'.format(val) for val in non_responses])
fractions = []
n_rois = {}
for ax, stimulus in it.izip(fraction_axs, stimuli):
all_psths, _, _ = ia.PSTH(
exptGrp, stimulus=stimulus, pre_time=pre_time, post_time=post_time,
data=None, exclude='running' if exclude_running else None,
channel=channel, label=label, roi_filter=roi_filter)
# Find how many of the ROIs were imaged with the current stimulus
n_rois[stimulus] = np.sum(
[not np.all(np.isnan(psth)) for psth in all_psths])
# n_responsive_rois = len(responsive_psths[stimulus])
if n_rois[stimulus] > 0:
fractions.append(
n_responsive_rois[stimulus] / float(n_rois[stimulus]))
plotting.scatter_bar(
ax, [[fractions[-1]]],
labels=['{} / {}'.format(
n_responsive_rois[stimulus], n_rois[stimulus])],
jitter_x=False)
else:
fractions.append(np.nan)
ax.set_ylim(0, 1)
ax.tick_params(bottom=False)
for ax in set(psth_axs).difference(first_col_axs):
ax.set_xlabel('')
ax.set_ylabel('')
ax.tick_params(labelleft=False, labelbottom=False)
for ax in psth_axs:
ax.set_ylim(min_psth_y_lim, max_psth_y_lim)
for ax in set(response_axs).intersection(first_col_axs):
ax.set_ylabel('Stim response')
for ax in set(response_axs).difference(first_col_axs):
ax.tick_params(labelleft=False)
for ax in response_axs:
ax.set_ylim(0, max_bar_y_lim)
for ax in set(fraction_axs).intersection(first_col_axs):
ax.set_ylabel('Responsive cell fraction')
for ax in set(fraction_axs).difference(first_col_axs):
ax.tick_params(labelleft=False)
if len(stimuli) % STIMS_PER_FIG:
extra_axs = len(stimuli) - n_figs * STIMS_PER_FIG
for ax in it.chain(psth_axs[extra_axs:], response_axs[extra_axs:],
fraction_axs[extra_axs:]):
ax.set_visible(False)
if save_data:
# Need to update for multiple pages
raise NotImplemented
psths = {}
non_responsive_psths = {}
for stimulus in stimuli:
# Responders
psth, x_range = ia.PSTH(
exptGrp, stimulus=stimulus, pre_time=pre_time, post_time=post_time,
channel=channel, label=label,
roi_filter=responsive_cells[stimulus], return_full='norm',
exclude='running' if exclude_running else None)
psth_list = [x_range]
for roi in psth:
psth_list.append(['{:f}'.format(val) for val in roi])
label_strs = np.array(
['Time (s)'] + ['ROI ' + str(x) for x in range(psth.shape[0])])
psths[stimulus] = np.hstack([label_strs[:, None], psth_list])
# Non-responders
psth, x_range = ia.PSTH(
exptGrp, stimulus=stimulus, pre_time=pre_time, post_time=post_time,
channel=channel, label=label,
roi_filter=misc.invert_filter(responsive_cells[stimulus]),
return_full='norm', exclude='running' if exclude_running else None)
psth_list = [x_range]
for roi in psth:
psth_list.append(['{:f}'.format(val) for val in roi])
label_strs = np.array(
['Time (s)'] + ['ROI ' + str(x) for x in range(psth.shape[0])])
non_responsive_psths[stimulus] = np.hstack([label_strs[:, None], psth_list])
data_to_save['psths'] = psths
data_to_save['non_responsive_psths'] = non_responsive_psths
data_to_save['fractions'] = [stimuli, fractions]
data_to_save['n_responding'] = [
stimuli, [n_responsive_rois[stim] for stim in stimuli]]
data_to_save['n_rois'] = [stimuli, [n_rois[stim] for stim in stimuli]]
misc.save_data(
data_to_save, fig=fig, label='salience_summary', method=save_data)
return figs
def salience_expt_grp_dataframe_figure(
expt_grps, stimuli, plotby, method='responsiveness', pre_time=None,
post_time=None, channel='Ch2', label=None, roi_filters=None,
colors=None, exclude_running=False, rasterized=False, save_data=False,
n_bootstraps=10000, n_processes=1):
# data_to_save = {}
STIMS_PER_FIG = 4
if roi_filters is None:
roi_filters = [None] * len(expt_grps)
if exclude_running:
stimuli = [stim for stim in stimuli if 'running' not in stim]
if not len(stimuli):
warn("No stimuli to analyze, aborting.")
return []
n_figs = int(np.ceil(len(stimuli) / float(STIMS_PER_FIG)))
figs, response_axs, fraction_axs, first_col_axs = [], [], [], []
for n in range(n_figs):
fig, axs = plt.subplots(
2, STIMS_PER_FIG, figsize=(15, 8), squeeze=False,
subplot_kw={'rasterized': rasterized})
fig.suptitle('Responsive ROIs by {}: {}'.format(
plotby,
'running excluded' if exclude_running else 'running included'))
figs.append(fig)
response_axs.append(axs[0, :])
fraction_axs.append(axs[1, :])
first_col_axs.append(axs[:, 0])
response_axs = np.hstack(response_axs)
fraction_axs = np.hstack(fraction_axs)
first_col_axs = np.hstack(first_col_axs)
if method == 'responsiveness':
activity_label = 'Responsiveness (dF/F)'
elif method == 'peak':
activity_label = 'Peak responsiveness (dF/F)'
else:
raise ValueError("Unrecognized 'method' value")
responsive_cells = {}
responsive_dfs = {}
for stimulus in stimuli:
responsive_cells[stimulus] = []
responsive_dfs[stimulus] = []
stimulus_filters = {}
stimulus_dfs = {}
for expt_grp, roi_filter in it.izip(expt_grps, roi_filters):
stimulus_filters = []
stimulus_dfs = []
for key, grp in expt_grp.groupby(plotby):
stimulus_filters.append(
ia.identify_stim_responsive_cells(
grp, stimulus=stimulus, method=method, pre_time=pre_time,
post_time=post_time, data=None, conf_level=95,
sig_tail='upper',
exclude='running' if exclude_running else None,
channel=channel, label=label, roi_filter=roi_filter,
n_bootstraps=n_bootstraps, save_to_expt=True,
n_processes=n_processes))
df = ia.response_magnitudes(
grp, stimulus, method=method, pre_time=pre_time,
post_time=post_time, data=None,
exclude='running' if exclude_running else None,
channel=channel, label=label,
roi_filter=stimulus_filters[-1], return_df=True)
# Put the grouping info back in the dataframe
# For example:
# plotby = ['condition_day']
# keys will be ['A_0', 'A_1', 'B_0', etc...]
# So df['condition_day'] == 'A_0' for the first group, etc.
for key_value, grouping in zip(key, plotby):
df[grouping] = key_value
stimulus_dfs.append(df)
responsive_dfs[stimulus].append(pd.concat(
stimulus_dfs, ignore_index=True))
responsive_cells[stimulus].append(misc.filter_union(
stimulus_filters))
#
# Plot mean PSTH for each stim/group
#
pass
#
# Plot the mean response of responsive cells
#
max_response_y_lim = 0
for ax, stimulus in it.izip(response_axs, stimuli):
plotting.plot_dataframe(
ax, responsive_dfs[stimulus],
labels=[expt_grp.label() for expt_grp in expt_grps],
activity_label=activity_label, groupby=None, plotby=plotby,
orderby=None, plot_method='line', plot_shuffle=False,
shuffle_plotby=False, pool_shuffle=False,
agg_fn=np.mean, colors=colors)
max_response_y_lim = np.amax([max_response_y_lim, ax.get_ylim()[1]])
ax.set_title(stimulus)
plt.setp(ax.get_xticklabels(), rotation='40',
horizontalalignment='right')
#
# Plot fraction of responsive ROIs
#
groupby = [['mouseID', 'uniqueLocationKey', 'roi_id'] + plotby,
['mouseID'] + plotby]
activity_kwargs = [
{'channel': channel, 'label': label, 'include_roi_filter': inc_filter}
for inc_filter in roi_filters]
for ax, stimulus in it.izip(fraction_axs, stimuli):
plot_metric(
ax, expt_grps, eg.filtered_rois, 'line',
roi_filters=responsive_cells[stimulus], groupby=groupby,
plotby=plotby, orderby=None, plot_shuffle=False,
shuffle_plotby=False, pool_shuffle=False, plot_abs=False,
activity_kwargs=activity_kwargs,
activity_label='Fraction responding', label_every_n=1,
rotate_labels=True, colors=colors)
# ax.set_ylim(0, 1)
ax.set_ylim(-0.05, 1.05)
ax.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
for ax in set(response_axs).difference(first_col_axs):
ax.set_ylabel('')
for ax in response_axs:
ax.set_ylim(0, max_response_y_lim)
ax.set_xlabel('')
for ax in set(fraction_axs).difference(first_col_axs):
ax.set_ylabel('')
ax.set_title('')
if len(stimuli) % STIMS_PER_FIG:
extra_axs = len(stimuli) - n_figs * STIMS_PER_FIG
for ax in it.chain(response_axs[extra_axs:], fraction_axs[extra_axs:]):
ax.set_visible(False)
return figs
def compare_psth_summary_figure(
expt_grps, stimuli, pre_time=None, post_time=None, channel='Ch2',
label=None, roi_filters=None, colors=None, exclude_running=False,
rasterized=False):
STIMS_PER_FIG = 6
if colors is None:
colors = sns.color_palette()
data_to_save = {}
if exclude_running:
stimuli = [stim for stim in stimuli if 'running' not in stim]
if not len(stimuli):
warn("No stimuli to analyze, aborting.")
return []
n_figs = int(np.ceil(len(stimuli) / float(STIMS_PER_FIG)))
figs, response_axs, first_col_axs = [], [], []
psth_axs = defaultdict(list)
for n in range(n_figs):
fig, axs = plt.subplots(
len(expt_grps) + 1, STIMS_PER_FIG, figsize=(15, 8), squeeze=False,
subplot_kw={'rasterized': rasterized})
fig.suptitle('All ROIs summary: {}'.format(
'running excluded' if exclude_running else 'running included'))
figs.append(fig)
for expt_grp, grp_axs in zip(expt_grps, axs):
psth_axs[expt_grp].append(grp_axs)
plotting.right_label(grp_axs[-1], expt_grp.label())
response_axs.append(axs[-1, :])
first_col_axs.append(axs[:, 0])
for expt_grp in expt_grps:
psth_axs[expt_grp] = np.hstack(psth_axs[expt_grp])
response_axs = np.hstack(response_axs)
first_col_axs = np.hstack(first_col_axs)
min_psth_y_lim = np.inf
max_psth_y_lim = -np.inf
for expt_grp, roi_filter, color in zip(expt_grps, roi_filters, colors):
for ax, stimulus in it.izip(psth_axs[expt_grp], stimuli):
ia.PSTH(
expt_grp, stimulus, ax=ax, pre_time=pre_time, post_time=post_time,
exclude='running' if exclude_running else None, data=None,
shade_ste=False, plot_mean=True, channel=channel, label=label,
roi_filter=roi_filter, color=color)
ax.set_title(stimulus)
min_psth_y_lim = np.amin([min_psth_y_lim, ax.get_ylim()[0]])
max_psth_y_lim = np.amax([max_psth_y_lim, ax.get_ylim()[1]])
max_bar_y_lim = 0
data_to_save['responses'] = {expt_grp.label(): [] for expt_grp in expt_grps}
for ax, stimulus in it.izip(response_axs, stimuli):
responses = []
for expt_grp, roi_filter, color in zip(expt_grps, roi_filters, colors):
responses.append(ia.response_magnitudes(
expt_grp, stimulus, method='responsiveness', pre_time=pre_time,
post_time=post_time, data=None,
exclude='running' if exclude_running else None,
channel=channel, label=label, roi_filter=roi_filter))
data_to_save['responses'][expt_grp.label()].append(
[stimulus] + ['{:f}'.format(val) for val in responses[-1]])
plotting.grouped_bar(
ax, values=[[np.abs(r)] for r in responses], cluster_labels=[''],
condition_labels=[expt_grp.label() for expt_grp in expt_grps],
bar_colors=colors, scatter_points=True, jitter_x=True, s=20)
max_bar_y_lim = np.amax([max_bar_y_lim, ax.get_ylim()[1]])
ax.tick_params(bottom=False, labelbottom=False)
for ax in set(it.chain(*psth_axs.itervalues())).difference(first_col_axs):
ax.set_xlabel('')
ax.set_ylabel('')
ax.tick_params(labelleft=False, labelbottom=False)
for ax in it.chain(*psth_axs.itervalues()):
ax.set_ylim(min_psth_y_lim, max_psth_y_lim)
for ax in set(response_axs).intersection(first_col_axs):
ax.set_ylabel('Stim response')
for ax in set(response_axs).difference(first_col_axs):
ax.tick_params(labelleft=False)
legend = ax.get_legend()
if legend is not None:
legend.set_visible(False)
for ax in response_axs:
ax.set_ylim(0, max_bar_y_lim)
# for ax in set(fraction_axs).intersection(first_col_axs):
# ax.set_ylabel('Responsive cell fraction')
# for ax in set(fraction_axs).difference(first_col_axs):
# ax.tick_params(labelleft=False)
if len(stimuli) % STIMS_PER_FIG:
extra_axs = len(stimuli) - n_figs * STIMS_PER_FIG
for ax in it.chain(response_axs[extra_axs:], *[
grp_axs[extra_axs:] for grp_axs in psth_axs.itervalues()]):
ax.set_visible(False)
return figs
def plotRoisOverlay(expt, channel='Ch2', label=None, roi_filter=None,
rasterized=False):
"""Generate a figure of the imaging location with all ROIs overlaid"""
figs = []
background_image = expt.returnFinalPrototype(channel=channel)
roiVerts = expt.roiVertices(
channel=channel, label=label, roi_filter=roi_filter)
labels = expt.roi_ids(channel=channel, label=label, roi_filter=roi_filter)
imaging_parameters = expt.imagingParameters()
aspect_ratio = imaging_parameters['pixelsPerLine'] \
/ imaging_parameters['linesPerFrame']
for plane in xrange(background_image.shape[0]):
fig = plt.figure()
ax = fig.add_subplot(111, rasterized=rasterized)
roi_inds = [i for i, v in enumerate(roiVerts) if v[0][0][2] == plane]
# plane_verts = np.array(roiVerts)[roi_inds].tolist()
plane_verts = [roiVerts[x] for x in roi_inds]
twoD_verts = []
for roi in plane_verts:
roi_polys = []
for poly in roi:
roi_polys.append(poly[:, :2])
twoD_verts.append(roi_polys)
plotting.roiDataImageOverlay(
ax, background_image[plane, :, :], twoD_verts, values=None,
vmin=0, vmax=1, labels=np.array(labels)[roi_inds].tolist(),
cax=None, alpha=0.2, aspect=aspect_ratio)
ax.set_title('{}_{}: plane {}'.format(
expt.parent.get('mouseID'), expt.get('startTime'), plane))
figs.append(fig)
return figs
def trial_responses(
exptGrp, stimuli, channel='Ch2', label=None, roi_filter=None,
exclude_running=False, rasterized=False, plot_mean=False,
gray_traces=False, **psth_kwargs):
"""Plots the response to each stim in 'stimuli' for all rois and trials in
'exptGrp'
"""
if exclude_running:
stimuli = [stim for stim in stimuli if 'running' not in stim]
if not len(stimuli):
warn("No stimuli to analyze, aborting.")
return
# Stims labeled 'off' just flip the tail of the responsive distribution
# but are actually the same PSTH as the 'on' version
# No need to plot both
stimuli = [stim for stim in stimuli if 'off' not in stim]
psths = {}
for stimulus in stimuli:
psths[stimulus], rois, x_range = ia.PSTH(
exptGrp, stimulus, channel=channel, label=label, roi_filter=roi_filter,
return_full=True, exclude='running' if exclude_running else None,
**psth_kwargs)
figs, axs, axs_to_label = plotting.layout_subplots(
n_plots=len(rois) * len(stimuli), rows=4, cols=len(stimuli),
polar=False, sharex=False, figsize=(15, 8), rasterized=rasterized)
for fig in figs:
fig.suptitle('Trial Responses: {}'.format(
'running excluded' if exclude_running else 'running included'))
for ax in axs_to_label:
ax.set_ylabel(r'Average $\Delta$F/F')
ax.set_xlabel('Time (s)')
ax_idx = 0
for roi_idx in xrange(len(rois)):
for stimulus in stimuli:
ax = axs[ax_idx]
# If there are no trial psths for this roi, just move along
if psths[stimulus][roi_idx].shape[1] > 0:
if gray_traces:
ax.plot(x_range[roi_idx], psths[stimulus][roi_idx],
color='0.8')
else:
ax.plot(x_range[roi_idx], psths[stimulus][roi_idx])
if plot_mean:
ax.plot(
x_range[roi_idx],
np.nanmean(psths[stimulus][roi_idx], axis=1),
lw=2, color='k')
ax.axvline(0, linestyle='dashed', color='k')
ax.set_xlim(x_range[roi_idx][0], x_range[roi_idx][-1])
ylims = np.round(ax.get_ylim(), 2)
if ylims[1] != 0:
ax.set_yticks([0, ylims[1]])
elif ylims[0] != 0:
ax.set_yticks([ylims[0], 0])
else:
ax.set_yticks([0])
ax_geometry = ax.get_geometry()
# If ax is in top row add a stim title
if ax_geometry[2] <= ax_geometry[1]:
ax.set_title(stimulus)
# If ax is in last column add an roi label
if ax_geometry[2] % ax_geometry[1] == 0:
roi_label = rois[roi_idx][0].get('mouseID') + '\n' + \
rois[roi_idx][1] + '\n' + rois[roi_idx][2]
# Bbox = ax.figbox
# ax.figure.text(Bbox.p1[0] + 0.02,
# (Bbox.p1[1] + Bbox.p0[1]) / 2,
# roi_label, rotation='vertical',
# verticalalignment='center')
plotting.right_label(
ax, roi_label, rotation='vertical',
verticalalignment='center', horizontalalignment='center')
# Remove extra labels
if ax not in axs_to_label:
ax.tick_params(labelbottom=False)
ax_idx += 1
if np.mod(roi_idx, 4) == 3:
yield figs[roi_idx / 4]
def compare_stim_responses(
exptGrp, stimuli, channel='Ch2', label=None, roi_filter=None,
exclude_running=False, rasterized=False, plot_method='scatter',
z_score=True, **kwargs):
"""Plot of each pair of stims in stimuli against each other."""
if exclude_running:
stimuli = [stim for stim in stimuli if 'running' not in stim]
if not len(stimuli):
warn("No stimuli to analyze, aborting.")
return []
figs, axs, _ = plotting.layout_subplots(
comb(len(stimuli), 2), rows=2, cols=4, figsize=(15, 8),
sharex=False, rasterized=rasterized)
rois = {}
means = {}
stds = {}
for stimulus in stimuli:
means[stimulus], stds[stimulus], _, rois[stimulus], _ = \
ia.response_magnitudes(
exptGrp, stimulus, channel=channel, label=label, roi_filter=roi_filter,
return_full=True, z_score=z_score,
exclude='running' if exclude_running else None, **kwargs)
for ax, (stim1, stim2) in zip(axs, it.combinations(stimuli, 2)):
if plot_method == 'ellipse':
raise NotImplemented
means_1 = []
means_2 = []
stds_1 = []
stds_2 = []
all_rois = rois[stim1] + rois[stim2]
for roi in set(all_rois):
if roi in rois[stim1] and roi in rois[stim2]:
idx_1 = rois[stim1].index(roi)
idx_2 = rois[stim2].index(roi)
means_1.append(means[stim1][idx_1])
means_2.append(means[stim2][idx_2])
stds_1.append(stds[stim1][idx_1])
stds_2.append(stds[stim2][idx_2])
max_x = np.nanmax(np.array(means_1) + np.array(stds_1))
x_std = 4 * nanstd(means_1)
max_x = min([max_x, nanmean(means_1) + x_std])
max_y = np.nanmax(np.array(means_2) + np.array(stds_2))
y_std = 4 * nanstd(means_2)
max_y = min([max_y, nanmean(means_2) + y_std])
min_x = np.nanmin(np.array(means_1) - np.array(stds_1))
min_x = max([min_x, nanmean(means_1) - x_std])
min_y = np.nanmin(np.array(means_2) - np.array(stds_2))
min_y = max([min_y, nanmean(means_2) - y_std])
finite_means = np.isfinite(means_1) & np.isfinite(means_2)
if not np.any(finite_means):
continue
plotting.ellipsePlot(ax, means_1, means_2, stds_1, stds_2,
axesCenter=False, print_stats=True)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
ax.axvline(0, linestyle=':', color='k')
ax.axhline(0, linestyle=':', color='k')
ax.set_xlabel(stim1)
ax.set_ylabel(stim2)
elif plot_method == 'scatter':
means_1 = []
means_2 = []
all_rois = rois[stim1] + rois[stim2]
for roi in set(all_rois):
if roi in rois[stim1] and roi in rois[stim2]:
idx_1 = rois[stim1].index(roi)
idx_2 = rois[stim2].index(roi)
means_1.append(means[stim1][idx_1])
means_2.append(means[stim2][idx_2])
finite_means = np.isfinite(means_1) & np.isfinite(means_2)
if not np.any(finite_means):
continue
plotting.scatterPlot(
ax, [means_1, means_2], [stim1, stim2], s=1.5,
print_stats=True)
ax.axvline(0, linestyle=':', color='k')
ax.axhline(0, linestyle=':', color='k')
else:
raise ValueError
for fig in figs:
fig.suptitle('Stim response {}comparison: {}'.format(
'z-score ' if z_score else '',
'running excluded' if exclude_running else 'running included'))
return figs
def quantify_multi_responses(
exptGrp, stimuli, method='responsiveness', channel='Ch2', label=None,
roi_filter=None, pre_time=None, post_time=None, rasterized=False,
n_processes=1, n_bootstraps=10000):
"""Quantifies the number of stimuli that each ROI responds to,
plots as a histogram"""
fig, axs = plt.subplots(1, 2, subplot_kw={'rasterized': rasterized},
figsize=(15, 8))
ia.plot_number_of_stims_responsive(
exptGrp, axs[0], stimuli, method=method, pre_time=pre_time,
post_time=post_time, exclude=None, channel=channel, label=label,
roi_filter=roi_filter, n_processes=n_processes,
n_bootstraps=n_bootstraps)
ia.plot_number_of_stims_responsive(
exptGrp, axs[1], stimuli, method=method, pre_time=pre_time,
post_time=post_time, exclude='running', channel=channel, label=label,
roi_filter=roi_filter, n_processes=n_processes,
n_bootstraps=n_bootstraps)
axs[0].set_title('Running included')
axs[1].set_title('Running excluded')
return fig
def response_linearity(
exptGrp, paired_stimuli, channel='Ch2', label=None, roi_filter=None,
exclude_running=False, responsive_method=None, rasterized=False,
plot_method='ellipse', **kwargs):
"""Histogram of response linearities
Calculated as combined_response / (single_response_1 + single_response_2)
Parameters
----------
paired_stimuli : list of paired stimuli to analyze
responsive_method : None, to include all rois, or a method for identifying
stim responsive rois
"""
paired_stimuli = [stim for stim in paired_stimuli if 'Paired' in stim]
if not paired_stimuli:
return []
figs, axs, _ = plotting.layout_subplots(
len(paired_stimuli), rows=2, cols=4, figsize=(15, 8),
sharex=False, rasterized=rasterized)
for stimulus, ax in zip(paired_stimuli, axs):
stims = stimulus.split()[1:]
if responsive_method:
stimulus_filter = ia.identify_stim_responsive_cells(
exptGrp, stimulus=stimulus, method=responsive_method,
channel=channel, label=label, roi_filter=roi_filter,
exclude='running' if exclude_running else None,
**kwargs)
else:
stimulus_filter = roi_filter
psth1, rois_1, x_ranges1 = ia.PSTH(
exptGrp, stims[0], channel=channel, label=label, roi_filter=stimulus_filter,
return_full=True, exclude='running' if exclude_running else None,
**kwargs)
responses_1 = []
for roi_psth, roi_x_range in zip(psth1, x_ranges1):
responses_1.append(nanmean(roi_psth[roi_x_range > 0], axis=0) -
nanmean(roi_psth[roi_x_range < 0], axis=0))
psth2, rois_2, x_ranges2 = ia.PSTH(
exptGrp, stims[1], channel=channel, label=label, roi_filter=stimulus_filter,
return_full=True, exclude='running' if exclude_running else None,
**kwargs)
responses_2 = []
for roi_psth, roi_x_range in zip(psth2, x_ranges2):
responses_2.append(nanmean(roi_psth[roi_x_range > 0], axis=0) -
nanmean(roi_psth[roi_x_range < 0], axis=0))
psth_combo, rois_combo, x_ranges_combo = ia.PSTH(
exptGrp, stimulus, channel=channel, label=label, roi_filter=stimulus_filter,
return_full=True, exclude='running' if exclude_running else None,
**kwargs)
responses_combo = []
for roi_psth, roi_x_range in zip(psth_combo, x_ranges_combo):
responses_combo.append(
nanmean(roi_psth[roi_x_range > 0], axis=0) -
nanmean(roi_psth[roi_x_range < 0], axis=0))
shared_rois = set(rois_1).intersection(rois_2).intersection(rois_combo)
combined_mean = []
combined_std = []
summed_mean = []
summed_std = []
linearity_ratios = []
for roi in shared_rois:
combo = responses_combo[rois_combo.index(roi)]
stim1 = responses_1[rois_1.index(roi)]
stim2 = responses_2[rois_2.index(roi)]
combined_mean.append(nanmean(combo))
combined_std.append(nanstd(combo))
summed_mean.append(nanmean(stim1) + nanmean(stim2))
# Propagate summed std
summed_std.append(
np.sqrt(nanstd(stim1) ** 2 + nanstd(stim2) ** 2))
linearity_ratios.append(combined_mean[-1] / summed_mean[-1])
if np.all(np.isnan(linearity_ratios)):
ax.set_visible(False)
continue
if plot_method == 'hist':
linearity_ratios = [ratio for ratio in linearity_ratios
if not np.isnan(ratio)]
if len(linearity_ratios) == 0:
return []
plotting.histogram(ax, linearity_ratios, bins=10, plot_mean=True)
ax.set_title(stimulus)
ax.set_xlabel('combined / (stim1 + stim2)')
ax.set_ylabel('Number')
elif plot_method == 'ellipse':
plotting.ellipsePlot(
ax, summed_mean, combined_mean, summed_std, combined_std,
axesCenter=False, print_stats=True)
ax.set_title(stimulus)
ax.set_xlabel('stim1 + stim2')
ax.set_ylabel('combined')
combined_mean = np.array(combined_mean)
combined_std = np.array(combined_std)
summed_mean = np.array(summed_mean)
summed_std = np.array(summed_std)
max_x = np.nanmax(summed_mean + summed_std)
x_std = 4 * nanstd(summed_mean)
max_x = min([max_x, nanmean(summed_mean) + x_std])
max_y = np.nanmax(combined_mean + combined_std)
y_std = 4 * nanstd(combined_mean)
max_y = min([max_y, nanmean(combined_mean) + y_std])
min_x = np.nanmin(summed_mean - summed_std)
min_x = max([min_x, nanmean(summed_mean) - x_std])
min_y = np.nanmin(combined_mean - combined_std)
min_y = max([min_y, nanmean(combined_mean) - y_std])
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
ax.axvline(0, linestyle=':', color='k')
ax.axhline(0, linestyle=':', color='k')
elif plot_method == 'scatter':
plotting.scatterPlot(
ax, [summed_mean, combined_mean],
['stim1 + stim2', 'combined'], s=1, print_stats=True)
ax.axvline(0, linestyle=':', color='k')
ax.axhline(0, linestyle=':', color='k')
ax.set_title(stimulus)
else:
raise ValueError(
'Unrecognized plot method: {}'.format(plot_method))
for fig in figs:
fig.suptitle('Stim response linearity, {}: {}'.format(
'all ROIs' if responsive_method is None else 'responsive ROIs only',
'running excluded' if exclude_running else 'running included'))
return figs
def run_duration_responsiveness(
exptGrp, channel='Ch2', label=None, roi_filter=None, rasterized=False,
method='responsiveness', **psth_kwargs):
"""Create figure comparing the magnitude of running responses versus
duration of running bout.
"""
figs = []
fig, axs = plt.subplots(2, 2, subplot_kw={'rasterized': rasterized},
figsize=(15, 8))
ia.compare_run_response_by_running_duration(
exptGrp, axs[0, 0], run_intervals='running_start',
response_method='responsiveness', plot_method='scatter',
channel=channel, label=label, roi_filter=roi_filter,
responsive_method=method, **psth_kwargs)
ia.compare_run_response_by_running_duration(
exptGrp, axs[0, 1], run_intervals='running_stop',
response_method='responsiveness', plot_method='scatter',
channel=channel, label=label, roi_filter=roi_filter,
responsive_method=method, **psth_kwargs)
# ia.compare_run_response_by_running_duration(
# exptGrp, axs[1, 0], run_intervals='running_stim',
# response_method='responsiveness', plot_method='scatter',
# channel=channel, label=label, roi_filter=roi_filter,
# responsive_method=method, **psth_kwargs)
# ia.compare_run_response_by_running_duration(
# exptGrp, axs[1, 1], run_intervals='running_no_stim',
# response_method='responsiveness', plot_method='scatter',
# channel=channel, label=label, roi_filter=roi_filter,
# responsive_method=method, **psth_kwargs)
# figs.append(fig)
# fig, axs = plt.subplots(2, 2, subplot_kw={'rasterized': rasterized},
# figsize=(15, 8))
ia.compare_run_response_by_running_duration(
exptGrp, axs[1, 0], run_intervals='running_start',
response_method='mean', plot_method='scatter',
channel=channel, label=label, roi_filter=roi_filter,
responsive_method=method, **psth_kwargs)
ia.compare_run_response_by_running_duration(
exptGrp, axs[1, 1], run_intervals='running_stop',
response_method='mean', plot_method='scatter',
channel=channel, label=label, roi_filter=roi_filter,
responsive_method=method, **psth_kwargs)
# ia.compare_run_response_by_running_duration(
# exptGrp, axs[1, 0], run_intervals='running_stim',
# response_method='mean', plot_method='scatter',
# channel=channel, label=label, roi_filter=roi_filter,
# responsive_method=method, **psth_kwargs)
# ia.compare_run_response_by_running_duration(
# exptGrp, axs[1, 1], run_intervals='running_no_stim',
# response_method='mean', plot_method='scatter',
# channel=channel, label=label, roi_filter=roi_filter,
# responsive_method=method, **psth_kwargs)
figs.append(fig)
return figs
def imaging_and_behavior_summary(
exptGrp, channel='Ch2', label=None, roi_filter=None):
"""Creates a summary figure of imaging data and behavior data"""
nTrials = sum([len(expt.findall('trial')) for expt in exptGrp])
figs, axs, _ = plotting.layout_subplots(
nTrials, rows=1, cols=2, figsize=(15, 8), sharex=False)
for ax, trial in it.izip(
axs, it.chain(*[expt.findall('trial') for expt in exptGrp])):
if isinstance(trial.parent, lab.classes.SalienceExperiment):
stim = trial.get('stimulus')
if stim == 'air':
stim = 'airpuff'
stim_time = trial.parent.stimulusTime()
if 'Paired' in stim:
keys = stim.split(' ')[1:] + ['running', 'licking']
else:
keys = [stim, 'running', 'licking']
ap.plot_imaging_and_behavior(
trial, ax, keys=keys, channel=channel, label=label,
roi_filter=roi_filter, include_empty=True)
ax.axvline(stim_time, linestyle='dashed', color='k')
ax.set_xticklabels(ax.get_xticks() - stim_time)
ax.set_title('{}_{}: {}'.format(
trial.parent.parent.get('mouseID'),
trial.parent.get('uniqueLocationKey'), trial.get('time')))
else:
ap.plot_imaging_and_behavior(
trial, ax, channel=channel, label=label, roi_filter=roi_filter,
include_empty=False)
return figs
def response_cdfs(
exptGrp, stimuli, method='responsiveness', pre_time=None,
post_time=None, channel='Ch2', label=None, roi_filter=None,
rasterized=False):
"""Plot cdfs across all rois for each stim in stimuli.
Plots all stims except running/licking, all running/licking stims, and
all stims with running excluded"""
fig, axs = plt.subplots(
1, 3, figsize=(15, 8), subplot_kw={'rasterized': rasterized})
cmap = matplotlib.cm.get_cmap(name='Spectral')
#
# Plot all stims except running/licking
#
axs[0].set_title('All stims (except running/licking)')
stims = [stim for stim in stimuli
if 'running' not in stim and 'licking' not in stim]
colors = [cmap(i) for i in np.linspace(0, 0.9, len(stims))]
for stim, color in zip(stims, colors):
responses = ia.response_magnitudes(
exptGrp, stim, method=method, pre_time=pre_time, post_time=post_time,
channel=channel, label=label, roi_filter=roi_filter,
return_full=False, exclude=None)
non_nan_responses = responses[np.isfinite(responses)]
if len(non_nan_responses):
plotting.cdf(axs[0], non_nan_responses, bins='exact', color=color)
axs[0].legend(stims, loc='lower right')
#
# Plot running/licking stims
#
axs[1].set_title('Running/licking responses')
stims = [stim for stim in stimuli
if 'running' in stim or 'licking' in stim]
colors = [cmap(i) for i in np.linspace(0, 0.9, len(stims))]
for stim, color in zip(stims, colors):
responses = ia.response_magnitudes(
exptGrp, stim, method=method, pre_time=pre_time, post_time=post_time,
channel=channel, label=label, roi_filter=roi_filter,
return_full=False, exclude=None)
non_nan_responses = responses[np.isfinite(responses)]
if len(non_nan_responses):
plotting.cdf(axs[1], non_nan_responses, bins='exact', color=color)
axs[1].legend(stims, loc='lower right')
#
# Plot all stims with running excluded
#
axs[2].set_title('All stims, running excluded')
stims = [stim for stim in stimuli if 'running' not in stim]
colors = [cmap(i) for i in np.linspace(0, 0.9, len(stims))]
for stim, color in zip(stims, colors):
responses = ia.response_magnitudes(
exptGrp, stim, method=method, pre_time=pre_time, post_time=post_time,
channel=channel, label=label, roi_filter=roi_filter,
return_full=False, exclude='running')
non_nan_responses = responses[np.isfinite(responses)]
if len(non_nan_responses):
plotting.cdf(axs[2], non_nan_responses, bins='exact', color=color)
axs[2].legend(stims, loc='lower right')
for ax in axs:
ax.set_xlabel('Responsiveness')
return fig
def paired_stims_response_heatmaps(
exptGrp, stimuli, exclude_running=False, rasterized=False,
**response_kwargs):
"""Plot heatmaps of response magnitude of paired stims versus
single stims
"""
paired_stims = [stim for stim in stimuli if 'Paired' in stim]
fig, axs = plt.subplots(
1, len(paired_stims), subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(wspace=0.5)
for ax, paired_stim in it.izip(axs, paired_stims):
stims_in_pair = paired_stim.split()[1:]
stims_to_plot = [paired_stim] + stims_in_pair + \
[stim for stim in exptGrp.stimuli()
if 'Paired' not in stim and stim not in stims_in_pair]
ap.stim_response_heatmap(
exptGrp, ax, stims_to_plot, sort_by=paired_stim,
exclude='running' if exclude_running else None,
aspect_ratio=0.2, **response_kwargs)
ax.axvline(0.5, linewidth=3, color='k')
ax.axvline(2.5, linewidth=3, color='k')
for label in ax.get_yticklabels():
label.set_fontsize(7)
x_labels = []
for label in ax.get_xticklabels():
label.set_fontsize(5)
x_labels.append(''.join([s[0] for s in label.get_text().split()]))
ax.set_xticklabels(x_labels)
title = fig.suptitle(
'Paired stim heatmap, sort by paired stim, running {}'.format(
'excluded' if exclude_running else 'included'))
title.set_fontsize(7)
yield fig
fig, axs = plt.subplots(
1, len(paired_stims), subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(wspace=0.5)
for ax, paired_stim in it.izip(axs, paired_stims):
stims_in_pair = paired_stim.split()[1:]
stims_to_plot = [paired_stim] + stims_in_pair + \
[stim for stim in exptGrp.stimuli()
if 'Paired' not in stim and stim not in stims_in_pair]
ap.stim_response_heatmap(
exptGrp, ax, stims_to_plot, sort_by=stims_in_pair,
exclude='running' if exclude_running else None,
aspect_ratio=0.2, **response_kwargs)
ax.axvline(0.5, linewidth=3, color='k')
ax.axvline(2.5, linewidth=3, color='k')
for label in ax.get_yticklabels():
label.set_fontsize(7)
x_labels = []
for label in ax.get_xticklabels():
label.set_fontsize(5)
x_labels.append(''.join([s[0] for s in label.get_text().split()]))
ax.set_xticklabels(x_labels)
title = fig.suptitle(
'Paired stim heatmap, sort by single stims in pair, running {}'.format(
'excluded' if exclude_running else 'included'))
title.set_fontsize(7)
yield fig
def compare_bouton_response_figure(
exptGrp, stimuli, plot_method='cdf', save_data=False, rasterized=False,
**response_kwargs):
"""Figure to compare different types of boutons"""
fig, axs = plt.subplots(2, 3, subplot_kw={'rasterized': rasterized})
data_to_save = {}
data_to_save['angle'] = ap.compare_bouton_responses(
exptGrp, axs[0, 0], stimuli, comp_method='angle', plot_method=plot_method,
**response_kwargs)
data_to_save['abs angle'] = ap.compare_bouton_responses(
exptGrp, axs[1, 0], stimuli, comp_method='abs angle', plot_method=plot_method,
**response_kwargs)
data_to_save['corr'] = ap.compare_bouton_responses(
exptGrp, axs[0, 1], stimuli, comp_method='corr', plot_method=plot_method,
**response_kwargs)
data_to_save['abs corr'] = ap.compare_bouton_responses(
exptGrp, axs[1, 1], stimuli, comp_method='abs corr', plot_method=plot_method,
**response_kwargs)
data_to_save['mean diff'] = ap.compare_bouton_responses(
exptGrp, axs[0, 2], stimuli, comp_method='mean diff', plot_method=plot_method,
**response_kwargs)
for line_idx, line in enumerate(axs[0, 2].lines):
axs[1, 2].axhline(
line_idx, color=line.get_color(), label=line.get_label())
axs[1, 2].set_ylim(-1, len(axs[0, 2].lines))
axs[1, 2].tick_params(labelbottom=False, labelleft=False, bottom=False,
left=False, top=False, right=False)
axs[1, 2].legend()
if save_data:
misc.save_data(data_to_save, fig=fig, label='compare_bouton_responses',
method=save_data)
return fig
def hidden_rewards_learning_summary(
exptGrps, save_data=False, rasterized=False, groupby=None, plotby=None,
orderby=None, colors=None, label_every_n=1):
"""Generates a summary figure of hidden reward analysis plots"""
if groupby is None:
groupby = [['expt', 'condition_day_session']]
if plotby is None:
plotby = ['condition_day_session']
data_to_save = {}
figs = []
fig, axs = plt.subplots(
2, 4, figsize=(15, 8), subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(hspace=0.3)
data_to_save['time_per_lap'] = plot_metric(
axs[0, 0], exptGrps, metric_fn=eg.time_per_lap,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Time per lap (sec)',
label_every_n=label_every_n, label_groupby=False)
data_to_save['fraction_rewarded_laps'] = plot_metric(
axs[0, 1], exptGrps, metric_fn=ra.fraction_of_laps_rewarded,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Fraction of laps rewarded',
label_every_n=label_every_n, label_groupby=False)
data_to_save['rewards_per_lap'] = plot_metric(
axs[0, 2], exptGrps, metric_fn=eg.stims_per_lap,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Number of rewards per lap',
activity_kwargs={'stimulus': 'water'},
label_every_n=label_every_n, label_groupby=False)
data_to_save['n_laps'] = plot_metric(
axs[0, 3], exptGrps, metric_fn=eg.number_of_laps,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Number of laps',
label_every_n=label_every_n, label_groupby=False)
data_to_save['water_rate'] = plot_metric(
axs[1, 0], exptGrps, metric_fn=ra.rate_of_water_obtained,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Rate of water obtained (ms/min)',
label_every_n=label_every_n, label_groupby=False)
data_to_save['rewarded_lick_duration'] = plot_metric(
axs[1, 1], exptGrps, metric_fn=eg.lick_bout_duration,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
activity_kwargs={'bouts_to_include': 'rewarded', 'threshold': 0.5},
activity_label='Duration of rewarded lick bouts (s)',
plot_method='line', label_every_n=label_every_n, label_groupby=False)
data_to_save['n_licks'] = plot_metric(
axs[1, 2], exptGrps, metric_fn=eg.behavior_dataframe,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
activity_kwargs={'key': 'licking'}, activity_label='Number of licks',
plot_method='line', agg_fn=np.sum, label_every_n=label_every_n,
label_groupby=False)
fig.suptitle('groupby = {}'.format(groupby))
figs.append(fig)
if save_data:
misc.save_data(data_to_save, fig=figs, method=save_data,
label='hidden_rewards_behavior_1')
data_to_save = {}
fig, axs = plt.subplots(
2, 4, figsize=(15, 8), subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(hspace=0.3)
data_to_save['rewarded_lick_intervals'] = plot_metric(
axs[0, 0], exptGrps, metric_fn=ra.fraction_rewarded_lick_intervals,
groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
activity_label='Fraction of lick intervals rewarded', colors=colors,
activity_kwargs={'threshold': 0.5}, label_every_n=label_every_n,
label_groupby=False)
data_to_save['licks_in_rewarded_intervals'] = plot_metric(
axs[1, 0], exptGrps,
metric_fn=ra.fraction_licks_in_rewarded_intervals,
groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
activity_label='Fraction of licks in rewarded intervals',
colors=colors, activity_kwargs={'threshold': 0.5},
label_every_n=label_every_n, label_groupby=False)
data_to_save['licks_in_reward_zone'] = plot_metric(
axs[0, 1], exptGrps, metric_fn=ra.fraction_licks_in_reward_zone,
groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
activity_label='Fraction of licks in reward zone', colors=colors,
label_every_n=label_every_n, label_groupby=False)
data_to_save['licks_near_rewards'] = plot_metric(
axs[1, 1], exptGrps, metric_fn=ra.fraction_licks_near_rewards,
groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
activity_label='Fraction of licks near rewards', colors=colors,
label_every_n=label_every_n, label_groupby=False)
data_to_save['licking_spatial_information'] = plot_metric(
axs[0, 2], exptGrps, metric_fn=ra.licking_spatial_information,
groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
activity_label='Licking spatial information (bits/sec)', colors=colors,
label_every_n=label_every_n, label_groupby=False)
# Licking circular variance
data_to_save['lick_to_reward_distance'] = plot_metric(
axs[0, 3], exptGrps, metric_fn=ra.lick_to_reward_distance,
groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
activity_label='Lick distance to reward (norm units)', colors=colors,
label_every_n=label_every_n, label_groupby=False)
data_to_save['licks_outside_reward_vicinity'] = plot_metric(
axs[1, 2], exptGrps, metric_fn=ra.licks_outside_reward_vicinity,
groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
activity_label='Fraction of licks outside reward vicinity', colors=colors,
label_every_n=label_every_n, label_groupby=False)
# data_to_save['anticipatory_licks'] = plot_metric(
# axs[1, 3], exptGrps, metric_fn=ra.anticipatory_licking,
# groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
# activity_label='Anticipatory licking', colors=colors,
# label_every_n=label_every_n, label_groupby=False)
data_to_save['anticipatory_lick_fraction'] = plot_metric(
axs[1, 3], exptGrps, metric_fn=ra.fraction_licks_near_rewards,
groupby=groupby, plotby=plotby, orderby=orderby, plot_method='line',
activity_label='Anticipatory lick fraction', colors=colors,
label_every_n=label_every_n, label_groupby=False,
activity_kwargs={'pre_window_cm': 5, 'exclude_reward': True})
fig.suptitle('groupby = {}'.format(groupby))
figs.append(fig)
if save_data:
misc.save_data(data_to_save, fig=figs, method=save_data,
label='hidden_rewards_behavior_2')
return figs
def hidden_reward_behavior_control_summary(
exptGrps, save_data=False, rasterized=False, groupby=None, plotby=None,
orderby=None, colors=None, label_every_n=1):
"""Generate a control figure for hidden rewards behavior experiments."""
if groupby is None:
groupby = [['expt', 'condition_day_session']]
if plotby is None:
plotby = ['condition_day_session']
data_to_save = {}
fig, axs = plt.subplots(
2, 2, figsize=(15, 8), subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(hspace=0.3)
# Grouping by expt, trial, or mouse defeats the purpose of n_sessions plot
n_ses_groupby = []
for group in groupby:
new_groupby = filter(
lambda x: x not in ['expt', 'trial', 'mouseID'], group)
if len(new_groupby):
n_ses_groupby.append(new_groupby)
if not len(n_ses_groupby):
n_ses_groupby = None
data_to_save['n_sessions'] = plot_metric(
axs[0, 0], exptGrps, metric_fn=eg.dataframe,
groupby=n_ses_groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Total number of sessions',
label_every_n=label_every_n, agg_fn=np.sum)
data_to_save['n_laps'] = plot_metric(
axs[0, 1], exptGrps, metric_fn=eg.number_of_laps,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Number of laps',
label_every_n=label_every_n)
data_to_save['reward_windows_per_lap'] = plot_metric(
axs[1, 0], exptGrps, metric_fn=eg.stims_per_lap,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Number of reward windows per lap',
activity_kwargs={'stimulus': 'reward'},
label_every_n=label_every_n)
data_to_save['reward_position'] = plot_metric(
axs[1, 1], exptGrps, metric_fn=eg.stim_position,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_label='Mean reward location',
activity_kwargs={'stimulus': 'reward', 'normalized': False},
label_every_n=label_every_n)
try:
expected_positions = [expt.rewardPositions(units=None)
for exptGrp in exptGrps for expt in exptGrp]
expected_positions = set(it.chain(*expected_positions))
except AttributeError:
pass
else:
for position in expected_positions:
axs[1, 1].axhline(position, color='red')
if save_data:
misc.save_data(data_to_save, fig=fig, method=save_data,
label='hidden_rewards_control')
return fig
def hidden_rewards_move_rewards_learning(
exptGrps, groupby=None, plotby=None, orderby=None, colors=None,
label_every_n=1, rasterized=False, save_data=False,
rewards='combined', by_condition=False):
if groupby is None:
groupby = [['expt', 'condition_day_session']]
if plotby is None:
plotby = ['condition_day_session']
data_to_save = {}
if rewards == 'combined':
reward_positions = set()
for exptGrp in exptGrps:
if by_condition:
conditions, _ = exptGrp.condition_label(by_mouse=True)
reward_positions = reward_positions.union(conditions.values())
else:
for expt in exptGrp:
for pos in expt.rewardPositions(units=None):
reward_positions.add(pos)
reward_positions = sorted(reward_positions)
elif rewards == 'separate':
reward_positions = {}
for exptGrp in exptGrps:
reward_positions[exptGrp] = set()
if by_condition:
conditions, _ = exptGrp.condition_label(by_mouse=True)
reward_positions = reward_positions[exptGrp].union(
conditions.values())
else:
for expt in exptGrp:
for pos in expt.rewardPositions(units=None):
reward_positions[exptGrp].add(pos)
reward_positions[exptGrp] = sorted(reward_positions[exptGrp])
if colors is None:
if rewards == 'combined':
colors = sns.color_palette(
"Paired", len(exptGrps) * len(reward_positions))
if rewards == 'separate':
colors = sns.color_palette(
"Paired", len(exptGrps) * sum(map(len, reward_positions)))
else:
# Lightest is too light, so add an extra color that we'll ignore
colors = [sns.light_palette(
color, len(reward_positions) + 1,
reverse=True)[:len(reward_positions)] for color in colors]
colors = list(it.chain(*colors))
new_exptGrps = []
activity_kwargs = []
for exptGrp in exptGrps:
if rewards == 'combined':
pos_iter = reward_positions
elif rewards == 'separate':
pos_iter = reward_positions[exptGrp]
for pos in pos_iter:
new_exptGrp = lab.classes.HiddenRewardExperimentGroup(exptGrp)
if by_condition:
new_exptGrp.label(exptGrp.label() + '_{}'.format(pos))
activity_kwargs.append({'rewardPositions': pos})
else:
new_exptGrp.label(exptGrp.label() + '_{:0.1f}'.format(pos))
activity_kwargs.append({'rewardPositions': [pos]})
new_exptGrps.append(new_exptGrp)
fig, axs = plt.subplots(
1, 3, figsize=(15, 8), subplot_kw={'rasterized': rasterized},
squeeze=False)
fig.subplots_adjust(hspace=0.3)
data_to_save['lick_to_reward_distance'] = plot_metric(
axs[0, 0], new_exptGrps, metric_fn=ra.lick_to_reward_distance,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_kwargs=activity_kwargs,
activity_label='lick distance to reward (norm units)',
label_every_n=label_every_n, label_groupby=False)
data_to_save['licks_near_rewards'] = plot_metric(
axs[0, 1], new_exptGrps, metric_fn=ra.fraction_licks_near_rewards,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_kwargs=activity_kwargs,
activity_label='fraction of licks near rewards',
label_every_n=label_every_n, label_groupby=False)
data_to_save['fraction_laps_licking'] = plot_metric(
axs[0, 2], new_exptGrps,
metric_fn=ra.fraction_of_laps_with_licking_near_reward,
groupby=groupby, plotby=plotby, orderby=orderby, colors=colors,
plot_method='line', activity_kwargs=activity_kwargs,
activity_label='fraction of laps w/ licks near rewards',
label_every_n=label_every_n, label_groupby=False)
fig.suptitle('groupby = {}'.format(groupby))
if save_data:
misc.save_data(data_to_save, fig=fig, method=save_data,
label='hidden_rewards_behavior')
return fig
def stim_response_summary(
expt_grp, stimuli, pre_time=None, post_time=None, channel='Ch2',
label=None, roi_filter=None):
fig, axs = plt.subplots(2, len(stimuli), figsize=(15, 8))
for stim, ax_pair in zip(stimuli, axs.T):
ia.PSTH(
expt_grp, stim, ax=ax_pair[0], pre_time=pre_time, post_time=post_time,
shade_ste=False, plot_mean=True, channel=channel, label=label,
roi_filter=roi_filter, gray_traces=True)
ia.PSTH(
expt_grp, stim, ax=ax_pair[1], pre_time=pre_time, post_time=post_time,
shade_ste='sem', plot_mean=True, channel=channel, label=label,
roi_filter=roi_filter)
ax_pair[0].set_title(stim)
ax_pair[0].set_xlabel('')
ax_pair[0].tick_params(axis='x', labelbottom=False)
min_y, max_y = np.inf, -np.inf
for ax in axs[0, :]:
min_y = np.amin([min_y, ax.get_ylim()[0]])
max_y = np.amax([max_y, ax.get_ylim()[1]])
for ax in axs[0, :]:
ax.set_ylim(min_y, max_y)
min_y, max_y = np.inf, -np.inf
for ax in axs[1, :]:
min_y = np.amin([min_y, ax.get_ylim()[0]])
max_y = np.amax([max_y, ax.get_ylim()[1]])
for ax in axs[1, :]:
ax.set_ylim(min_y, max_y)
for ax_row in axs[:, 1:]:
for ax in ax_row:
ax.set_ylabel('')
return fig
def licktogram_summary(expt_grps, rasterized=False, polar=False):
"""Plots licktograms for every condition/day by mouse"""
dataframes = [expt_grp.dataframe(
expt_grp, include_columns=['mouseID', 'expt', 'condition', 'session'])
for expt_grp in expt_grps]
dataframe = pd.concat(dataframes)
mouse_grp_dict = {
mouse: expt_grp.label() for expt_grp in expt_grps for mouse in
set(expt.parent.get('mouseID') for expt in expt_grp)}
fig_dict = {}
for mouse_id, df in dataframe.groupby('mouseID'):
n_rows = len(set(df['condition']))
n_cols = df['session'].max() + 1
fig, axs = plt.subplots(
n_rows, n_cols, figsize=(15, 8), sharey=not polar,
subplot_kw={'rasterized': rasterized, 'polar': polar},
squeeze=False)
for c_idx, condition in enumerate(sorted(set(df['condition']))):
for session in range(n_cols):
df_slice = df[(df['condition'] == condition) &
(df['session'] == session)]
if len(df_slice) == 1:
expt = df_slice['expt'].iloc[0]
if polar:
expt.polar_lick_plot(ax=axs[c_idx, session])
else:
expt.licktogram(
ax=axs[c_idx, session], plot_belt=False)
else:
axs[c_idx, session].set_visible(False)
for ax, condition in zip(axs[:, -1], sorted(set(df['condition']))):
plotting.right_label(ax, condition)
for ax, session in zip(axs[0, :], range(1, n_cols + 1)):
ax.set_title('Session {}'.format(session))
for ax in axs[:, 1:].flat:
ax.set_ylabel('')
for ax in axs[1:, :].flat:
ax.set_title('')
for ax in axs[:-1, :].flat:
ax.set_xlabel('')
fig.suptitle('{}: {}'.format(mouse_grp_dict[mouse_id], mouse_id))
fig_dict[mouse_id] = fig
return [fig_dict[mouse] for mouse in sorted(fig_dict.keys())]
def behavior_cross_correlation(
expt_grps, roi_filters, behavior_key, channel='Ch2', label=None,
rasterized=False, max_lag=10, thresh=0.5, colors=None):
if colors is None:
colors = sns.color_palette()
fig, axs = plt.subplots(
3, len(expt_grps) + 1, squeeze=False,
subplot_kw={'rasterized': rasterized}, figsize=(15, 8))
fig.suptitle('Imaging-behavior cross-correlation: {}'.format(behavior_key))
corrs = {}
zero_lag, peak_offset = [], []
for grp_axs, color, expt_grp, roi_filter in zip(
axs.T, colors, expt_grps, roi_filters):
corr = sa.xcorr_imaging_behavior(
expt_grp, behavior_key, max_lag=max_lag, thresh=thresh,
return_full=False, channel=channel, label=label,
roi_filter=roi_filter)
assert 0. in corr.index
corrs[expt_grp] = corr
zero_lag.append([np.array(corr[corr.index == 0])[0]])
peak_offset.append([np.array(
[corr.index[i] for i in np.argmax(
np.abs(np.array(corr)), axis=0)])])
light_color = sns.light_palette(color)[1]
grp_axs[0].plot(corr.index, corr, color=light_color)
grp_axs[0].plot(corr.index, corr.mean(1), color=color)
grp_axs[0].set_xlim(corr.index[0], corr.index[-1])
grp_axs[0].set_title(expt_grp.label())
grp_axs[0].set_xlabel('Lag (s)')
grp_axs[0].set_ylabel('Cross-correlation')
plotting.histogram(
grp_axs[1], zero_lag[-1][0], bins=10,
range=(-1, 1), color=color, normed=False,
plot_mean=True, label=None, orientation='vertical', filled=True,
mean_kwargs=None)
grp_axs[1].set_xlabel('zero-lag cross-correlation')
grp_axs[1].set_ylabel('ROIs')
plotting.histogram(
grp_axs[2], peak_offset[-1][0], bins=10,
range=(-max_lag, max_lag), color=color, normed=False,
plot_mean=True, label=None, orientation='vertical', filled=True,
mean_kwargs=None)
grp_axs[2].set_xlabel('Time to peak (s)')
grp_axs[2].set_ylabel('ROIs')
#
# Directly compare
#
for expt_grp, color in zip(expt_grps, colors):
corr = corrs[expt_grp]
axs[0, -1].plot(
corr.index, corr.mean(1), color=color, label=expt_grp.label())
axs[0, -1].fill_between(
corr.index, corr.mean(1) - corr.sem(1), corr.mean(1) + corr.sem(1),
color=color, alpha=0.5)
axs[0, -1].set_xlim(corr.index[0], corr.index[-1])
axs[0, -1].set_xlabel('Lag (s)')
axs[0, -1].set_ylabel('Cross-correlation')
min_y, max_y = np.inf, - np.inf
for ax in axs[0, :]:
min_y = min(min_y, ax.get_ylim()[0])
max_y = max(max_y, ax.get_ylim()[1])
for ax in axs[0, :]:
ax.set_ylim(min_y, max_y)
axs[0, -1].legend(frameon=False, loc='best')
plotting.grouped_bar(
axs[1, -1], values=zero_lag, cluster_labels=[''],
condition_labels=[expt_grp.label() for expt_grp in expt_grps],
bar_colors=colors, scatter_points=True, jitter_x=True, s=20)
axs[1, -1].set_ylabel('zero-lag cross-correlation')
plotting.grouped_bar(
axs[2, -1], values=peak_offset, cluster_labels=[''],
condition_labels=[expt_grp.label() for expt_grp in expt_grps],
bar_colors=colors, scatter_points=True, jitter_x=True, s=20)
axs[2, -1].set_ylabel('Time to peak (s)')
return fig
def plotControlSummary(
exptGrps, roi_filters=None, channel='Ch2', label=None,
rasterized=False, groupby=None, plotby=None, **plot_kwargs):
"""Plot a series of potentially control analysis, looking at similarity of
data over time.
"""
fig, axs = plt.subplots(
2, 3, figsize=(15, 8), subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(hspace=0.3)
base_kwargs = {'channel': channel, 'label': label}
activity_kwargs = base_kwargs.copy()
activity_kwargs.update({'stat': 'mean'})
plot_metric(
ax=axs[0, 0], exptGrps=exptGrps, roi_filters=roi_filters,
metric_fn=ia.population_activity, plot_method='line',
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label="Mean dF/F", **plot_kwargs)
activity_kwargs = base_kwargs.copy()
activity_kwargs.update({'stat': 'amplitude'})
plot_metric(
ax=axs[0, 1], exptGrps=exptGrps, roi_filters=roi_filters,
metric_fn=ia.population_activity, plot_method='line',
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label="Mean transient amplitude", **plot_kwargs)
activity_kwargs = base_kwargs.copy()
activity_kwargs.update({'stat': 'duration'})
plot_metric(
ax=axs[1, 0], exptGrps=exptGrps, roi_filters=roi_filters,
metric_fn=ia.population_activity, plot_method='line',
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label="Mean transient duration", **plot_kwargs)
activity_kwargs = base_kwargs.copy()
activity_kwargs.update({'stat': 'frequency'})
plot_metric(
ax=axs[1, 1], exptGrps=exptGrps, roi_filters=roi_filters,
metric_fn=ia.population_activity, plot_method='line',
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label="Mean transient frequency", **plot_kwargs)
activity_kwargs = base_kwargs.copy()
plot_metric(
ax=axs[0, 2], exptGrps=exptGrps, roi_filters=roi_filters,
metric_fn=ia.trace_sigma, plot_method='line',
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label='Trace sigma', **plot_kwargs)
activity_kwargs = base_kwargs.copy()
plot_metric(
ax=axs[1, 2], exptGrps=exptGrps, roi_filters=roi_filters,
metric_fn=ia.mean_fluorescence, plot_method='line',
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label='Mean raw fluorescence', **plot_kwargs)
return fig
def plot_calcium_dynamics_summary(
expt_grps, roi_filters=None, channel='Ch2', label=None,
rasterized=False, groupby=None, plotby=None, plot_method='cdf',
**plot_kwargs):
"""A set of control plots designed to compare baseline calcium properties
between genotypes.
"""
fig, axs = plt.subplots(
2, 3, figsize=(15, 8), subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(hspace=0.3)
# Trans psth
# base_kwargs = {'channel': channel, 'label': label}
base_kwargs = []
for expt_grp in expt_grps:
grp_kwargs = {}
try:
grp_kwargs['channel'] = expt_grp.args['channel']
except KeyError:
grp_kwargs['channel'] = channel
try:
grp_kwargs['label'] = expt_grp.args['imaging_label']
except KeyError:
grp_kwargs['label'] = label
activity_kwargs = [dict(bkw.items() + [('stat', 'amplitude')])
for bkw in base_kwargs]
plot_metric(
ax=axs[0, 1], exptGrps=expt_grps, roi_filters=roi_filters,
metric_fn=ia.population_activity, plot_method=plot_method,
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label="Mean transient amplitude", **plot_kwargs)
activity_kwargs = [dict(bkw.items() + [('stat', 'duration')])
for bkw in base_kwargs]
plot_metric(
ax=axs[1, 0], exptGrps=expt_grps, roi_filters=roi_filters,
metric_fn=ia.population_activity, plot_method=plot_method,
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label="Mean transient duration", **plot_kwargs)
activity_kwargs = [dict(bkw.items() + [('stat', 'frequency')])
for bkw in base_kwargs]
plot_metric(
ax=axs[1, 1], exptGrps=expt_grps, roi_filters=roi_filters,
metric_fn=ia.population_activity, plot_method=plot_method,
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label="Mean transient frequency", **plot_kwargs)
activity_kwargs = base_kwargs
plot_metric(
ax=axs[0, 2], exptGrps=expt_grps, roi_filters=roi_filters,
metric_fn=ia.trace_sigma, plot_method=plot_method,
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label='Trace sigma', **plot_kwargs)
activity_kwargs = base_kwargs
plot_metric(
ax=axs[1, 2], exptGrps=expt_grps, roi_filters=roi_filters,
metric_fn=ia.mean_fluorescence, plot_method=plot_method,
groupby=groupby, plotby=plotby, activity_kwargs=activity_kwargs,
activity_label='Mean raw fluorescence', **plot_kwargs)
return fig
def transient_summary(
expt_grps, plot_method, intervals='running', roi_filters=None,
groupby=None, plotby=None, label_every_n=1, save_data=False,
rasterized=False, interval_kwargs=None, channel='Ch2', label=None,
**plot_kwargs):
"""Generate a summary plot of place field transient statistics."""
if interval_kwargs is None:
interval_kwargs = {}
if roi_filters is None:
roi_filters = [None] * len(expt_grps)
if intervals == 'running':
kwargs = {}
kwargs.update(interval_kwargs)
in_intervals = [inter.running_intervals(
expt_grp, **kwargs) for expt_grp in expt_grps]
out_intervals = [~ints for ints in in_intervals]
elif intervals == 'place field':
kwargs = {}
kwargs.update(interval_kwargs)
in_intervals = [inter.place_fields(
expt_grp, roi_filter=roi_filter, **kwargs) for
expt_grp, roi_filter in zip(expt_grps, roi_filters)]
out_intervals = [~ints for ints in in_intervals]
elif intervals == 'reward':
kwargs = {'nearness': 0.1}
kwargs.update(interval_kwargs)
in_intervals = [inter.near_rewards(
expt_grp, **kwargs) for expt_grp in expt_grps]
out_intervals = [~ints for ints in in_intervals]
else:
raise ValueError("Unrecognized value for 'intervals' argument")
data_to_save = {}
fig, axs = plt.subplots(
3, 5, figsize=(15, 8), subplot_kw={'rasterized': rasterized},
sharey='col')
fig.subplots_adjust(hspace=0.3)
activity_kwargs = {'stat': 'amplitude', 'interval': None, 'channel': channel, 'label': label}
data_to_save['amplitude_all'] = plot_metric(
axs[0, 0], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
axs[0, 0].set_title('amplitude')
activity_kwargs = [
{'stat': 'amplitude', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in in_intervals]
data_to_save['amplitude_in'] = plot_metric(
axs[1, 0], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = [
{'stat': 'amplitude', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in out_intervals]
data_to_save['amplitude_out'] = plot_metric(
axs[2, 0], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = {'stat': 'duration', 'interval': None, 'channel': channel, 'label': label}
data_to_save['duration_all'] = plot_metric(
axs[0, 1], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
axs[0, 1].set_title('duration')
activity_kwargs = [
{'stat': 'duration', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in in_intervals]
data_to_save['duration_in'] = plot_metric(
axs[1, 1], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = [
{'stat': 'duration', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in out_intervals]
data_to_save['duration_out'] = plot_metric(
axs[2, 1], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = {'stat': 'responseMagnitude', 'interval': None, 'channel': channel, 'label': label}
data_to_save['magnitude_all'] = plot_metric(
axs[0, 2], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
axs[0, 2].set_title('responseMagnitude')
activity_kwargs = [
{'stat': 'responseMagnitude', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in in_intervals]
data_to_save['magnitude_in'] = plot_metric(
axs[1, 2], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = [
{'stat': 'responseMagnitude', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in out_intervals]
data_to_save['magnitude_out'] = plot_metric(
axs[2, 2], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = {'stat': 'norm transient auc2', 'interval': None, 'channel': channel, 'label': label}
data_to_save['auc_all'] = plot_metric(
axs[0, 3], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
axs[0, 3].set_title('norm transient auc2')
activity_kwargs = [
{'stat': 'norm transient auc2', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in in_intervals]
data_to_save['auc_in'] = plot_metric(
axs[1, 3], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = [
{'stat': 'norm transient auc2', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in out_intervals]
data_to_save['auc_out'] = plot_metric(
axs[2, 3], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = {'stat': 'frequency', 'interval': None, 'channel': channel, 'label': label}
data_to_save['frequency_all'] = plot_metric(
axs[0, 4], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
axs[0, 4].set_title('frequency')
activity_kwargs = [
{'stat': 'frequency', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in in_intervals]
data_to_save['frequency_in'] = plot_metric(
axs[1, 4], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
activity_kwargs = [
{'stat': 'frequency', 'interval': grp_interval, 'channel': channel, 'label': label}
for grp_interval in out_intervals]
data_to_save['frequency_out'] = plot_metric(
axs[2, 4], expt_grps, metric_fn=ia.population_activity_new,
plot_method=plot_method, roi_filters=roi_filters, groupby=groupby,
plotby=plotby, activity_kwargs=activity_kwargs, activity_label='',
label_every_n=label_every_n, **plot_kwargs)
# Remove extra labels
for ax in axs[:2, :].flat:
ax.set_xlabel('')
for ax in axs[:, 1:].flat:
ax.set_ylabel('')
for ax in axs[1:, :].flat:
ax.set_title('')
plotting.right_label(axs[0, -1], 'all trans')
plotting.right_label(axs[1, -1], 'trans in')
plotting.right_label(axs[2, -1], 'trans out')
fig.suptitle('Activity by {}\ngroupby={}'.format(intervals, groupby))
if save_data:
misc.save_data(data_to_save, fig=fig, label='transient_summary',
method=save_data)
return fig
def thresholded_metric_vs_metric_figure(
exptGrps, x_metric, y_metric, filter_metric, thresholds, roi_filters=None,
x_metric_kwargs=None, y_metric_kwargs=None, filter_metric_kwargs=None,
xlabel=None, ylabel=None, plot_method='scatter', groupby=None,
colorby=None, filter_on=('roi',), title='', save_data=None, filter_fn=None,
**plot_kwargs):
fig, axs = plt.subplots(3, len(thresholds), figsize=(15, 8))
data_to_save = {}
if xlabel is None:
xlabel = 'Metric 1'
if ylabel is None:
ylabel = 'Metric 2'
filter_fns = [misc.df_filter_intersection([None, filter_fn]),
misc.df_filter_intersection([lambda df: df['filter_metric_value'] < threshold, filter_fn]),
misc.df_filter_intersection([lambda df: df['filter_metric_value'] > threshold, filter_fn])]
filter_labels = ['all', 'less_than', 'greater_than']
for col, threshold in enumerate(thresholds):
for row, filter_fn, filter_label in zip(
it.count(), filter_fns, filter_labels):
label = '{}_{}'.format(filter_label, threshold)
data_to_save[label] = plot_paired_metrics(
exptGrps, roi_filters=roi_filters, ax=axs[row, col],
first_metric_fn=x_metric, second_metric_fn=y_metric,
first_metric_kwargs=x_metric_kwargs,
second_metric_kwargs=y_metric_kwargs,
first_metric_label=xlabel,
second_metric_label=ylabel,
plot_method=plot_method,
groupby=groupby,
colorby=colorby,
filter_metric_fn=filter_metric,
filter_metric_merge_on=filter_on,
filter_metric_fn_kwargs=filter_metric_kwargs,
filter_fn=filter_fn, **plot_kwargs)
axs[0, col].set_title('Threshold = {}'.format(threshold))
for ax, label in zip(axs[:, -1], filter_labels):
plotting.right_label(ax, label)
for ax in axs[:, 1:].flat:
ax.set_ylabel('')
for ax in axs[:-1, :].flat:
ax.set_xlabel('')
fig.suptitle(title)
if save_data:
misc.save_data(data_to_save, fig=fig,
label='thresholded_metric_vs_metric', method=save_data)
return fig
def hidden_rewards_number_of_licks(
expt_grps, rasterized=False, groupby=None, plotby=None,
label_every_n=1, **plot_kwargs):
"""Plots the total number of licks in vs out of reward per mouse"""
if groupby is None:
groupby = [['expt', 'condition_day_session']]
if plotby is None:
plotby = ['condition_day_session']
mice = {}
max_mice = -1
for expt_grp in expt_grps:
mice[expt_grp] = {expt.parent for expt in expt_grp}
max_mice = max(max_mice, len(mice[expt_grp]))
fig, axs = plt.subplots(
len(expt_grps), max_mice, figsize=(15, 8), squeeze=False,
subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(hspace=0.3)
for expt_grp, grp_axs in zip(expt_grps, axs):
for mouse, ax in zip(sorted(mice[expt_grp]), grp_axs):
mouse_expt_grp = expt_grp.subGroup(
[expt for expt in expt_grp if expt.parent == mouse],
label='near')
colors = color_cycle()
plot_metric(
ax, [mouse_expt_grp],
metric_fn=ra.number_licks_near_rewards,
plot_method='line', groupby=groupby, plotby=plotby,
label_every_n=label_every_n, colors=[colors.next()],
activity_label='Number of licks', **plot_kwargs)
mouse_expt_grp.label('away')
plot_metric(
ax, [mouse_expt_grp],
metric_fn=ra.number_licks_away_rewards,
plot_method='line', groupby=groupby, plotby=plotby,
label_every_n=label_every_n, colors=[colors.next()],
activity_label='Number of licks', **plot_kwargs)
ax.set_title(mouse.get('mouseID'))
for ax in axs[:, 1:].flat:
ax.set_ylabel('')
ax.tick_params(labelleft=False)
for ax in axs.flat:
ax.set_xlabel('')
ax.tick_params(top=False)
max_licks = -np.inf
for ax in axs.flat:
max_licks = max(max_licks, ax.get_ylim()[1])
for ax in axs.flat:
ax.set_ylim(top=max_licks)
for ax in list(axs.flat)[1:]:
legend = ax.get_legend()
if legend is not None:
legend.set_visible(False)
for expt_grp, ax in zip(expt_grps, axs[:, -1]):
plotting.right_label(ax, expt_grp.label())
fig.suptitle(
'Number of licks near/away from reward\ngroupby = {}'.format(groupby))
return fig
def salience_responsiveness_figure_by_cell(
expt_grp, stimuli, plotby, method='responsiveness', pre_time=None,
post_time=None, channel='Ch2', label=None, roi_filter=None,
exclude_running=False, rasterized=False, save_data=False,
n_bootstraps=10000, n_processes=1):
"""Plots the stimulus responsiveness versus the 'plotby'. For example, the
response to water rewards over days of exposure.
Yields 1 figure per ROI with a grid of plots, 1 per stimulus in 'stimuli'.
Parameters
----------
expt_grp, channel, label, roi_filter
Standard analysis arguments.
stimuli : list
List of stimuli.
plotby : list
List of keys that will determine the x-axis of the plot.
See lab.plotting.plotting_helpers.prepare_dataframe
method : 'responsiveness' or 'peak'
Method to determine the response to the stimuli.
pre_time, post_time : float
Duration of baseline (pre_time) and response time (post_time).
Yields
------
mpl.pyplot.Figure
"""
# data_to_save = {}
N_COLS = 4
n_rows = int(np.ceil(len(stimuli) / float(N_COLS)))
n_extra_axs = (N_COLS * n_rows) % len(stimuli)
if exclude_running:
stimuli = [stim for stim in stimuli if 'running' not in stim]
if not len(stimuli):
warn("No stimuli to analyze, aborting.")
return
if method == 'responsiveness':
activity_label = 'Responsiveness (dF/F)'
elif method == 'peak':
activity_label = 'Peak responsiveness (dF/F)'
else:
raise ValueError("Unrecognized 'method' value")
responsiveness = {}
all_roi_tuples = set()
for stimulus in stimuli:
stimulus_dfs = []
for key, grp in expt_grp.groupby(plotby):
df = ia.response_magnitudes(
grp, stimulus, method=method, pre_time=pre_time,
post_time=post_time, data=None,
exclude='running' if exclude_running else None,
channel=channel, label=label,
roi_filter=roi_filter, return_df=True)
# Put the grouping info back in the dataframe
# For example:
# plotby = ['condition_day']
# keys will be ['A_0', 'A_1', 'B_0', etc...]
# So df['condition_day'] == 'A_0' for the first group, etc.
for key_value, grouping in zip(key, plotby):
df[grouping] = key_value
stimulus_dfs.append(df)
joined_df = pd.concat(
stimulus_dfs, ignore_index=True)
joined_df['roi_tuple'] = zip(
joined_df['mouse'].apply(lambda mouse: mouse.get('mouseID')),
joined_df['uniqueLocationKey'],
joined_df['roi_id'])
responsiveness[stimulus] = joined_df
all_roi_tuples = all_roi_tuples.union(joined_df['roi_tuple'])
for roi_tuple in sorted(all_roi_tuples):
fig, axs = plt.subplots(
n_rows, N_COLS, figsize=(15, 8), squeeze=False,
subplot_kw={'rasterized': rasterized})
fig.subplots_adjust(hspace=0.3)
first_col_axs = axs[:, 0]
fig.suptitle(roi_tuple)
min_response_y_lim, max_response_y_lim = np.inf, -np.inf
for ax, stimulus in it.izip(axs.flat, stimuli):
data = responsiveness[stimulus]
data = data[data['roi_tuple'].apply(lambda val: val == roi_tuple)]
plotting.plot_dataframe(
ax, [data],
activity_label=activity_label, groupby=None, plotby=plotby,
orderby=None, plot_method='line', plot_shuffle=False,
shuffle_plotby=False, pool_shuffle=False,
agg_fn=np.mean)
min_response_y_lim = np.amin([min_response_y_lim, ax.get_ylim()[0]])
max_response_y_lim = np.amax([max_response_y_lim, ax.get_ylim()[1]])
ax.set_title(stimulus)
plt.setp(ax.get_xticklabels(), rotation='40',
horizontalalignment='right')
if n_extra_axs:
for ax in np.array(axs.flat)[-n_extra_axs:]:
ax.set_visible(False)
for ax in set(axs.flat).difference(first_col_axs):
ax.set_ylabel('')
for ax in axs.flat:
ax.set_ylim(min_response_y_lim, max_response_y_lim)
ax.set_xlabel('')
legend = ax.get_legend()
if legend is not None:
legend.set_visible(False)
yield fig
def behavior_psth_figure(
expt_grps, stimulus_key, data_key, groupby, rasterized=False,
**behaviorPSTH_kwargs):
"""Returns a figure of behavior data PSTHS of experiment subgroups.
Figure will be an array of plots, n_expt_grps x n_groupby_groups.
"""
all_expts = lab.ExperimentGroup([expt for expt in it.chain(*expt_grps)])
n_groupbys = len(list(all_expts.groupby(groupby)))
fig, axs = plt.subplots(
len(expt_grps), n_groupbys, figsize=(15, 8), squeeze=False,
subplot_kw={'rasterized': rasterized})
for grp_axs, (grp_label, subgrp) in zip(axs.T, all_expts.groupby(groupby)):
for ax, expt_grp in zip(grp_axs, expt_grps):
expt_grp_subgrp = copy(expt_grp)
expt_grp_subgrp.filter(lambda expt: expt in subgrp)
if not len(expt_grp_subgrp):
ax.set_visible(False)
continue
lab.analysis.behavior_analysis.plotBehaviorPSTH(
expt_grp_subgrp, stimulus_key, data_key, ax=ax,
**behaviorPSTH_kwargs)
grp_axs[0].set_title(str(grp_label))
for ax, expt_grp in zip(axs[:, -1], expt_grps):
plotting.right_label(ax, expt_grp.label())
fig.suptitle('{} triggered {} PSTH\ngroupby={}'.format(
stimulus_key, data_key, groupby))
return fig
| mit |
trondeau/gnuradio | gnuradio-runtime/apps/evaluation_random_numbers.py | 26 | 5155 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
import numpy as np
from scipy.stats import norm, laplace, rayleigh
from matplotlib import pyplot as plt
# NOTE: scipy and matplotlib are optional packages and not included in the default gnuradio dependencies
#*** SETUP ***#
# Number of realisations per histogram
num_tests = 1000000
# Set number of bins in histograms
uniform_num_bins = 31
gauss_num_bins = 31
rayleigh_num_bins = 31
laplace_num_bins = 31
rndm = gr.random() # instance of gnuradio random class (gr::random)
print 'All histograms contain',num_tests,'realisations.'
#*** GENERATE DATA ***#
uniform_values = np.zeros(num_tests)
gauss_values = np.zeros(num_tests)
rayleigh_values = np.zeros(num_tests)
laplace_values = np.zeros(num_tests)
for k in range(num_tests):
uniform_values[k] = rndm.ran1()
gauss_values[k] = rndm.gasdev()
rayleigh_values[k] = rndm.rayleigh()
laplace_values[k] = rndm.laplacian()
#*** HISTOGRAM DATA AND CALCULATE EXPECTED COUNTS ***#
uniform_bins = np.linspace(0,1,uniform_num_bins)
gauss_bins = np.linspace(-8,8,gauss_num_bins)
laplace_bins = np.linspace(-8,8,laplace_num_bins)
rayleigh_bins = np.linspace(0,10,rayleigh_num_bins)
uniform_hist = np.histogram(uniform_values,uniform_bins)
gauss_hist = np.histogram(gauss_values,gauss_bins)
rayleigh_hist = np.histogram(rayleigh_values,rayleigh_bins)
laplace_hist = np.histogram(laplace_values,laplace_bins)
uniform_expected = np.zeros(uniform_num_bins-1)
gauss_expected = np.zeros(gauss_num_bins-1)
rayleigh_expected = np.zeros(rayleigh_num_bins-1)
laplace_expected = np.zeros(laplace_num_bins-1)
for k in range(len(uniform_hist[0])):
uniform_expected[k] = num_tests/float(uniform_num_bins-1)
for k in range(len(gauss_hist[0])):
gauss_expected[k] = float(norm.cdf(gauss_hist[1][k+1])-norm.cdf(gauss_hist[1][k]))*num_tests
for k in range(len(rayleigh_hist[0])):
rayleigh_expected[k] = float(rayleigh.cdf(rayleigh_hist[1][k+1])-rayleigh.cdf(rayleigh_hist[1][k]))*num_tests
for k in range(len(laplace_hist[0])):
laplace_expected[k] = float(laplace.cdf(laplace_hist[1][k+1])-laplace.cdf(laplace_hist[1][k]))*num_tests
#*** PLOT HISTOGRAMS AND EXPECTATIONS TAKEN FROM SCIPY ***#
uniform_bins_center = uniform_bins[0:-1]+(uniform_bins[1]-uniform_bins[0])/2.0
gauss_bins_center = gauss_bins[0:-1]+(gauss_bins[1]-gauss_bins[0])/2.0
rayleigh_bins_center = rayleigh_bins[0:-1]+(rayleigh_bins[1]-rayleigh_bins[0])/2.0
laplace_bins_center = laplace_bins[0:-1]+(laplace_bins[1]-laplace_bins[0])/2.0
plt.figure(1)
plt.subplot(2,1,1)
plt.plot(uniform_bins_center,uniform_hist[0],'s--',uniform_bins_center,uniform_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Uniform: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(uniform_bins_center,uniform_hist[0]/uniform_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Uniform: Relative deviation to scipy')
plt.figure(2)
plt.subplot(2,1,1)
plt.plot(gauss_bins_center,gauss_hist[0],'s--',gauss_bins_center,gauss_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Gauss: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(gauss_bins_center,gauss_hist[0]/gauss_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Gauss: Relative deviation to scipy')
plt.figure(3)
plt.subplot(2,1,1)
plt.plot(rayleigh_bins_center,rayleigh_hist[0],'s--',rayleigh_bins_center,rayleigh_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Rayleigh: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(rayleigh_bins_center,rayleigh_hist[0]/rayleigh_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Rayleigh: Relative deviation to scipy')
plt.figure(4)
plt.subplot(2,1,1)
plt.plot(laplace_bins_center,laplace_hist[0],'s--',laplace_bins_center,laplace_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Laplace: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(laplace_bins_center,laplace_hist[0]/laplace_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Laplace: Relative deviation to scipy')
plt.show()
| gpl-3.0 |
jungla/ICOM-fluidity-toolbox | Detectors/plot_traj_v.py | 1 | 2677 | #!~/python
import fluidity_tools
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import myfun
import numpy as np
import pyvtk
import vtktools
import copy
import os
exp = 'm_250_8c_str'
filename = '/nethome/jmensa/fluidity-exp/'+exp+'/mli_checkpoint.detectors'
day = '400'
try: os.stat('./plot/'+exp)
except OSError: os.mkdir('./plot/'+exp)
print 'reading detectors'
det = fluidity_tools.stat_parser(filename)
keys = det.keys() # particles
print 'done.'
tt = 80
pt = 450000
step = 1
# dimensions particles
lat = 15000
lon = 7500
depth = -50
nlat = 100
nlon = 100
ndepth = 5
depth_ml = -30
y = range(100, lat+100, nlat)
x = range(100, lon+100, nlon)
z = np.linspace(0, depth, ndepth)
[Xf,Yf,Zf] = myfun.meshgrid2(x,y,z)
Yf = np.reshape(Yf,np.size(Yf,))
Xf = np.reshape(Xf,np.size(Xf,))
Zf = np.reshape(Zf,np.size(Zf,))
# dimensions archives
xstep = 150
ystep = 150
zstep = -1
Xlist = np.arange(0.0,lon+xstep,xstep)# x co-ordinates of the desired array shape
Ylist = np.arange(0.0,lat+ystep,ystep)# y co-ordinates of the desired array shape
Zlist = np.arange(0.0,depth_ml+zstep,zstep)# y co-ordinates of the desired array shape
[X,Y,Z] = myfun.meshgrid2(Xlist,Ylist,Zlist)
Y = np.reshape(Y,(np.size(Y),))
X = np.reshape(X,(np.size(X),))
Z = np.reshape(Z,(np.size(Z),))
par = np.zeros((pt,3,tt))
time = det['ElapsedTime']['value']
# read particles
for d in range(pt):
temp = det['Particles_'+myfun.digit(d+1,6)]['position']
par[d,:,:] = temp[:,0:tt]
# read ML depth from file
Tref = [0.2,0.3]
mld = []
for i in range(len(Tref)):
Data = pyvtk.VtkData('/nethome/jmensa/scripts_fluidity/2D/ML/output/'+exp+'/ML_'+myfun.digit(Tref[i],3)+'_'+exp+'_'+day+'.vtk')
mld.append(np.reshape(Data.point_data.data[0].scalars,[len(Ylist),len(Xlist)]))
# read T from archive
data = vtktools.vtu('/tamay/mensa/fluidity/'+exp+'/mli_'+day+'.pvtu')
Ts = np.reshape(data.ProbeData(vtktools.arr(zip(X,Y,Z)), 'Temperature'),[len(Zlist),len(Ylist),len(Xlist)])
# for future plotting
Yf, Zf = np.meshgrid(Ylist,Zlist)
Y, Z = np.meshgrid(y,z)
plt.figure()
# plt.gca().set_aspect('equal')
plt.contourf(Y,Z,np.flipud(np.rot90(np.mean(fsler,axis=1))),10,cmap='jet')
plt.colorbar()
plt.contour(Yf,Zf,np.mean(Ts,axis=2),20,colors='White',linewidth=4.0)
# plt.contourf(np.rot90(fsler[:,nlon/2,:]))
for i in range(len(Tref)):
mmld = np.mean(mld[i],axis=1)
plt.plot(Ylist,mmld,color='k',linewidth=4.0)
# plt.text(1000*(i+1),mmld[1]+1,myfun.digit(Tref[i],3))
for d in range(pt):
plt.plot(Ylist,Zlist,par[d,1:2,:])
plt.savefig('./plot/'+exp+'/traj_'+exp+'_'+day+'.eps',bbox_inches='tight')
plt.close()
| gpl-2.0 |
Odingod/mne-python | mne/tests/test_source_estimate.py | 12 | 28321 | from __future__ import print_function
import os.path as op
from nose.tools import assert_true, assert_raises
import warnings
from copy import deepcopy
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
from scipy.fftpack import fft
from mne.datasets import testing
from mne import (stats, SourceEstimate, VolSourceEstimate, Label,
read_source_spaces, MixedSourceEstimate)
from mne import read_source_estimate, morph_data, extract_label_time_course
from mne.source_estimate import (spatio_temporal_tris_connectivity,
spatio_temporal_src_connectivity,
compute_morph_matrix, grade_to_vertices,
grade_to_tris)
from mne.minimum_norm import read_inverse_operator
from mne.label import read_labels_from_annot, label_sign_flip
from mne.utils import (_TempDir, requires_pandas, requires_sklearn,
requires_h5py, run_tests_if_main, slow_test)
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname_inv = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
fname_t1 = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
fname_src = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
fname_stc = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg')
fname_smorph = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg')
fname_fmorph = op.join(data_path, 'MEG', 'sample',
'fsaverage_audvis_trunc-meg')
fname_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-grad-vol-7-fwd-sensmap-vol.w')
fname_vsrc = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
@slow_test
@testing.requires_testing_data
def test_volume_stc():
"""Test volume STCs
"""
tempdir = _TempDir()
N = 100
data = np.arange(N)[:, np.newaxis]
datas = [data, data, np.arange(2)[:, np.newaxis]]
vertno = np.arange(N)
vertnos = [vertno, vertno[:, np.newaxis], np.arange(2)[:, np.newaxis]]
vertno_reads = [vertno, vertno, np.arange(2)]
for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads):
stc = VolSourceEstimate(data, vertno, 0, 1)
fname_temp = op.join(tempdir, 'temp-vl.stc')
stc_new = stc
for _ in range(2):
stc_new.save(fname_temp)
stc_new = read_source_estimate(fname_temp)
assert_true(isinstance(stc_new, VolSourceEstimate))
assert_array_equal(vertno_read, stc_new.vertices)
assert_array_almost_equal(stc.data, stc_new.data)
# now let's actually read a MNE-C processed file
stc = read_source_estimate(fname_vol, 'sample')
assert_true(isinstance(stc, VolSourceEstimate))
assert_true('sample' in repr(stc))
stc_new = stc
assert_raises(ValueError, stc.save, fname_vol, ftype='whatever')
for _ in range(2):
fname_temp = op.join(tempdir, 'temp-vol.w')
stc_new.save(fname_temp, ftype='w')
stc_new = read_source_estimate(fname_temp)
assert_true(isinstance(stc_new, VolSourceEstimate))
assert_array_equal(stc.vertices, stc_new.vertices)
assert_array_almost_equal(stc.data, stc_new.data)
# save the stc as a nifti file and export
try:
import nibabel as nib
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
src = read_source_spaces(fname_vsrc)
vol_fname = op.join(tempdir, 'stc.nii.gz')
stc.save_as_volume(vol_fname, src,
dest='surf', mri_resolution=False)
with warnings.catch_warnings(record=True): # nib<->numpy
img = nib.load(vol_fname)
assert_true(img.shape == src[0]['shape'] + (len(stc.times),))
with warnings.catch_warnings(record=True): # nib<->numpy
t1_img = nib.load(fname_t1)
stc.save_as_volume(op.join(tempdir, 'stc.nii.gz'), src,
dest='mri', mri_resolution=True)
with warnings.catch_warnings(record=True): # nib<->numpy
img = nib.load(vol_fname)
assert_true(img.shape == t1_img.shape + (len(stc.times),))
assert_array_almost_equal(img.get_affine(), t1_img.get_affine(),
decimal=5)
# export without saving
img = stc.as_volume(src, dest='mri', mri_resolution=True)
assert_true(img.shape == t1_img.shape + (len(stc.times),))
assert_array_almost_equal(img.get_affine(), t1_img.get_affine(),
decimal=5)
except ImportError:
print('Save as nifti test skipped, needs NiBabel')
@testing.requires_testing_data
def test_expand():
"""Test stc expansion
"""
stc = read_source_estimate(fname_stc, 'sample')
assert_true('sample' in repr(stc))
labels_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)
new_label = labels_lh[0] + labels_lh[1]
stc_limited = stc.in_label(new_label)
stc_new = stc_limited.copy()
stc_new.data.fill(0)
for label in labels_lh[:2]:
stc_new += stc.in_label(label).expand(stc_limited.vertices)
assert_raises(TypeError, stc_new.expand, stc_limited.vertices[0])
assert_raises(ValueError, stc_new.expand, [stc_limited.vertices[0]])
# make sure we can't add unless vertno agree
assert_raises(ValueError, stc.__add__, stc.in_label(labels_lh[0]))
def _fake_stc(n_time=10):
verts = [np.arange(10), np.arange(90)]
return SourceEstimate(np.random.rand(100, n_time), verts, 0, 1e-1, 'foo')
def test_io_stc():
"""Test IO for STC files
"""
tempdir = _TempDir()
stc = _fake_stc()
stc.save(op.join(tempdir, "tmp.stc"))
stc2 = read_source_estimate(op.join(tempdir, "tmp.stc"))
assert_array_almost_equal(stc.data, stc2.data)
assert_array_almost_equal(stc.tmin, stc2.tmin)
assert_equal(len(stc.vertices), len(stc2.vertices))
for v1, v2 in zip(stc.vertices, stc2.vertices):
assert_array_almost_equal(v1, v2)
assert_array_almost_equal(stc.tstep, stc2.tstep)
@requires_h5py
def test_io_stc_h5():
"""Test IO for STC files using HDF5
"""
tempdir = _TempDir()
stc = _fake_stc()
assert_raises(ValueError, stc.save, op.join(tempdir, 'tmp'), ftype='foo')
out_name = op.join(tempdir, 'tmp')
stc.save(out_name, ftype='h5')
stc3 = read_source_estimate(out_name)
stc4 = read_source_estimate(out_name + '-stc.h5')
assert_raises(RuntimeError, read_source_estimate, out_name, subject='bar')
for stc_new in stc3, stc4:
assert_equal(stc_new.subject, stc.subject)
assert_array_equal(stc_new.data, stc.data)
assert_array_equal(stc_new.tmin, stc.tmin)
assert_array_equal(stc_new.tstep, stc.tstep)
assert_equal(len(stc_new.vertices), len(stc.vertices))
for v1, v2 in zip(stc_new.vertices, stc.vertices):
assert_array_equal(v1, v2)
def test_io_w():
"""Test IO for w files
"""
tempdir = _TempDir()
stc = _fake_stc(n_time=1)
w_fname = op.join(tempdir, 'fake')
stc.save(w_fname, ftype='w')
src = read_source_estimate(w_fname)
src.save(op.join(tempdir, 'tmp'), ftype='w')
src2 = read_source_estimate(op.join(tempdir, 'tmp-lh.w'))
assert_array_almost_equal(src.data, src2.data)
assert_array_almost_equal(src.lh_vertno, src2.lh_vertno)
assert_array_almost_equal(src.rh_vertno, src2.rh_vertno)
def test_stc_arithmetic():
"""Test arithmetic for STC files
"""
stc = _fake_stc()
data = stc.data.copy()
out = list()
for a in [data, stc]:
a = a + a * 3 + 3 * a - a ** 2 / 2
a += a
a -= a
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
a /= 2 * a
a *= -a
a += 2
a -= 1
a *= -1
a /= 2
b = 2 + a
b = 2 - a
b = +a
assert_array_equal(b.data, a.data)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
a **= 3
out.append(a)
assert_array_equal(out[0], out[1].data)
assert_array_equal(stc.sqrt().data, np.sqrt(stc.data))
stc_mean = stc.mean()
assert_array_equal(stc_mean.data, np.mean(stc.data, 1)[:, None])
@slow_test
@testing.requires_testing_data
def test_stc_methods():
"""Test stc methods lh_data, rh_data, bin(), center_of_mass(), resample()
"""
stc = read_source_estimate(fname_stc)
# lh_data / rh_data
assert_array_equal(stc.lh_data, stc.data[:len(stc.lh_vertno)])
assert_array_equal(stc.rh_data, stc.data[len(stc.lh_vertno):])
# bin
bin = stc.bin(.12)
a = np.array((1,), dtype=stc.data.dtype)
a[0] = np.mean(stc.data[0, stc.times < .12])
assert a[0] == bin.data[0, 0]
assert_raises(ValueError, stc.center_of_mass, 'sample')
stc.lh_data[:] = 0
vertex, hemi, t = stc.center_of_mass('sample', subjects_dir=subjects_dir)
assert_true(hemi == 1)
# XXX Should design a fool-proof test case, but here were the results:
assert_equal(vertex, 124791)
assert_equal(np.round(t, 2), 0.12)
stc = read_source_estimate(fname_stc)
stc.subject = 'sample'
label_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)[0]
label_rh = read_labels_from_annot('sample', 'aparc', 'rh',
subjects_dir=subjects_dir)[0]
label_both = label_lh + label_rh
for label in (label_lh, label_rh, label_both):
assert_true(isinstance(stc.shape, tuple) and len(stc.shape) == 2)
stc_label = stc.in_label(label)
if label.hemi != 'both':
if label.hemi == 'lh':
verts = stc_label.vertices[0]
else: # label.hemi == 'rh':
verts = stc_label.vertices[1]
n_vertices_used = len(label.get_vertices_used(verts))
assert_equal(len(stc_label.data), n_vertices_used)
stc_lh = stc.in_label(label_lh)
assert_raises(ValueError, stc_lh.in_label, label_rh)
label_lh.subject = 'foo'
assert_raises(RuntimeError, stc.in_label, label_lh)
stc_new = deepcopy(stc)
o_sfreq = 1.0 / stc.tstep
# note that using no padding for this STC reduces edge ringing...
stc_new.resample(2 * o_sfreq, npad=0, n_jobs=2)
assert_true(stc_new.data.shape[1] == 2 * stc.data.shape[1])
assert_true(stc_new.tstep == stc.tstep / 2)
stc_new.resample(o_sfreq, npad=0)
assert_true(stc_new.data.shape[1] == stc.data.shape[1])
assert_true(stc_new.tstep == stc.tstep)
assert_array_almost_equal(stc_new.data, stc.data, 5)
@testing.requires_testing_data
def test_extract_label_time_course():
"""Test extraction of label time courses from stc
"""
n_stcs = 3
n_times = 50
src = read_inverse_operator(fname_inv)['src']
vertices = [src[0]['vertno'], src[1]['vertno']]
n_verts = len(vertices[0]) + len(vertices[1])
# get some labels
labels_lh = read_labels_from_annot('sample', hemi='lh',
subjects_dir=subjects_dir)
labels_rh = read_labels_from_annot('sample', hemi='rh',
subjects_dir=subjects_dir)
labels = list()
labels.extend(labels_lh[:5])
labels.extend(labels_rh[:4])
n_labels = len(labels)
label_means = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
label_maxs = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
# compute the mean with sign flip
label_means_flipped = np.zeros_like(label_means)
for i, label in enumerate(labels):
label_means_flipped[i] = i * np.mean(label_sign_flip(label, src))
# generate some stc's with known data
stcs = list()
for i in range(n_stcs):
data = np.zeros((n_verts, n_times))
# set the value of the stc within each label
for j, label in enumerate(labels):
if label.hemi == 'lh':
idx = np.intersect1d(vertices[0], label.vertices)
idx = np.searchsorted(vertices[0], idx)
elif label.hemi == 'rh':
idx = np.intersect1d(vertices[1], label.vertices)
idx = len(vertices[0]) + np.searchsorted(vertices[1], idx)
data[idx] = label_means[j]
this_stc = SourceEstimate(data, vertices, 0, 1)
stcs.append(this_stc)
# test some invalid inputs
assert_raises(ValueError, extract_label_time_course, stcs, labels,
src, mode='notamode')
# have an empty label
empty_label = labels[0].copy()
empty_label.vertices += 1000000
assert_raises(ValueError, extract_label_time_course, stcs, empty_label,
src, mode='mean')
# but this works:
tc = extract_label_time_course(stcs, empty_label, src, mode='mean',
allow_empty=True)
for arr in tc:
assert_true(arr.shape == (1, n_times))
assert_array_equal(arr, np.zeros((1, n_times)))
# test the different modes
modes = ['mean', 'mean_flip', 'pca_flip', 'max']
for mode in modes:
label_tc = extract_label_time_course(stcs, labels, src, mode=mode)
label_tc_method = [stc.extract_label_time_course(labels, src,
mode=mode) for stc in stcs]
assert_true(len(label_tc) == n_stcs)
assert_true(len(label_tc_method) == n_stcs)
for tc1, tc2 in zip(label_tc, label_tc_method):
assert_true(tc1.shape == (n_labels, n_times))
assert_true(tc2.shape == (n_labels, n_times))
assert_true(np.allclose(tc1, tc2, rtol=1e-8, atol=1e-16))
if mode == 'mean':
assert_array_almost_equal(tc1, label_means)
if mode == 'mean_flip':
assert_array_almost_equal(tc1, label_means_flipped)
if mode == 'max':
assert_array_almost_equal(tc1, label_maxs)
# test label with very few vertices (check SVD conditionals)
label = Label(vertices=src[0]['vertno'][:2], hemi='lh')
x = label_sign_flip(label, src)
assert_true(len(x) == 2)
label = Label(vertices=[], hemi='lh')
x = label_sign_flip(label, src)
assert_true(x.size == 0)
@slow_test
@testing.requires_testing_data
def test_morph_data():
"""Test morphing of data
"""
tempdir = _TempDir()
subject_from = 'sample'
subject_to = 'fsaverage'
stc_from = read_source_estimate(fname_smorph, subject='sample')
stc_to = read_source_estimate(fname_fmorph)
# make sure we can specify grade
stc_from.crop(0.09, 0.1) # for faster computation
stc_to.crop(0.09, 0.1) # for faster computation
assert_raises(ValueError, stc_from.morph, subject_to, grade=3, smooth=-1,
subjects_dir=subjects_dir)
stc_to1 = stc_from.morph(subject_to, grade=3, smooth=12, buffer_size=1000,
subjects_dir=subjects_dir)
stc_to1.save(op.join(tempdir, '%s_audvis-meg' % subject_to))
# make sure we can specify vertices
vertices_to = grade_to_vertices(subject_to, grade=3,
subjects_dir=subjects_dir)
stc_to2 = morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=12, buffer_size=1000,
subjects_dir=subjects_dir)
# make sure we can use different buffer_size
stc_to3 = morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=12, buffer_size=3,
subjects_dir=subjects_dir)
# make sure we get a warning about # of steps
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=1, buffer_size=3,
subjects_dir=subjects_dir)
assert_equal(len(w), 2)
assert_array_almost_equal(stc_to.data, stc_to1.data, 5)
assert_array_almost_equal(stc_to1.data, stc_to2.data)
assert_array_almost_equal(stc_to1.data, stc_to3.data)
# make sure precomputed morph matrices work
morph_mat = compute_morph_matrix(subject_from, subject_to,
stc_from.vertices, vertices_to,
smooth=12, subjects_dir=subjects_dir)
stc_to3 = stc_from.morph_precomputed(subject_to, vertices_to, morph_mat)
assert_array_almost_equal(stc_to1.data, stc_to3.data)
assert_raises(ValueError, stc_from.morph_precomputed,
subject_to, vertices_to, 'foo')
assert_raises(ValueError, stc_from.morph_precomputed,
subject_to, [vertices_to[0]], morph_mat)
assert_raises(ValueError, stc_from.morph_precomputed,
subject_to, [vertices_to[0][:-1], vertices_to[1]], morph_mat)
assert_raises(ValueError, stc_from.morph_precomputed, subject_to,
vertices_to, morph_mat, subject_from='foo')
# steps warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
compute_morph_matrix(subject_from, subject_to,
stc_from.vertices, vertices_to,
smooth=1, subjects_dir=subjects_dir)
assert_equal(len(w), 2)
mean_from = stc_from.data.mean(axis=0)
mean_to = stc_to1.data.mean(axis=0)
assert_true(np.corrcoef(mean_to, mean_from).min() > 0.999)
# make sure we can fill by morphing
stc_to5 = morph_data(subject_from, subject_to, stc_from, grade=None,
smooth=12, buffer_size=3, subjects_dir=subjects_dir)
assert_true(stc_to5.data.shape[0] == 163842 + 163842)
# Morph sparse data
# Make a sparse stc
stc_from.vertices[0] = stc_from.vertices[0][[100, 500]]
stc_from.vertices[1] = stc_from.vertices[1][[200]]
stc_from._data = stc_from._data[:3]
assert_raises(RuntimeError, stc_from.morph, subject_to, sparse=True,
grade=5, subjects_dir=subjects_dir)
stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
subjects_dir=subjects_dir)
assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
assert_equal(stc_to_sparse.subject, subject_to)
assert_equal(stc_from.tmin, stc_from.tmin)
assert_equal(stc_from.tstep, stc_from.tstep)
stc_from.vertices[0] = np.array([], dtype=np.int64)
stc_from._data = stc_from._data[:1]
stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
subjects_dir=subjects_dir)
assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
assert_equal(stc_to_sparse.subject, subject_to)
assert_equal(stc_from.tmin, stc_from.tmin)
assert_equal(stc_from.tstep, stc_from.tstep)
def _my_trans(data):
"""FFT that adds an additional dimension by repeating result"""
data_t = fft(data)
data_t = np.concatenate([data_t[:, :, None], data_t[:, :, None]], axis=2)
return data_t, None
def test_transform_data():
"""Test applying linear (time) transform to data"""
# make up some data
n_sensors, n_vertices, n_times = 10, 20, 4
kernel = np.random.randn(n_vertices, n_sensors)
sens_data = np.random.randn(n_sensors, n_times)
vertices = np.arange(n_vertices)
data = np.dot(kernel, sens_data)
for idx, tmin_idx, tmax_idx in\
zip([None, np.arange(n_vertices // 2, n_vertices)],
[None, 1], [None, 3]):
if idx is None:
idx_use = slice(None, None)
else:
idx_use = idx
data_f, _ = _my_trans(data[idx_use, tmin_idx:tmax_idx])
for stc_data in (data, (kernel, sens_data)):
stc = VolSourceEstimate(stc_data, vertices=vertices,
tmin=0., tstep=1.)
stc_data_t = stc.transform_data(_my_trans, idx=idx,
tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
assert_allclose(data_f, stc_data_t)
def test_transform():
"""Test applying linear (time) transform to data"""
# make up some data
n_verts_lh, n_verts_rh, n_times = 10, 10, 10
vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
data = np.random.randn(n_verts_lh + n_verts_rh, n_times)
stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)
# data_t.ndim > 2 & copy is True
stcs_t = stc.transform(_my_trans, copy=True)
assert_true(isinstance(stcs_t, list))
assert_array_equal(stc.times, stcs_t[0].times)
assert_equal(stc.vertices, stcs_t[0].vertices)
data = np.concatenate((stcs_t[0].data[:, :, None],
stcs_t[1].data[:, :, None]), axis=2)
data_t = stc.transform_data(_my_trans)
assert_array_equal(data, data_t) # check against stc.transform_data()
# data_t.ndim > 2 & copy is False
assert_raises(ValueError, stc.transform, _my_trans, copy=False)
# data_t.ndim = 2 & copy is True
tmp = deepcopy(stc)
stc_t = stc.transform(np.abs, copy=True)
assert_true(isinstance(stc_t, SourceEstimate))
assert_array_equal(stc.data, tmp.data) # xfrm doesn't modify original?
# data_t.ndim = 2 & copy is False
times = np.round(1000 * stc.times)
verts = np.arange(len(stc.lh_vertno),
len(stc.lh_vertno) + len(stc.rh_vertno), 1)
verts_rh = stc.rh_vertno
t_idx = [np.where(times >= -50)[0][0], np.where(times <= 500)[0][-1]]
data_t = stc.transform_data(np.abs, idx=verts, tmin_idx=t_idx[0],
tmax_idx=t_idx[-1])
stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
assert_true(isinstance(stc, SourceEstimate))
assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.5))
assert_true(len(stc.vertices[0]) == 0)
assert_equal(stc.vertices[1], verts_rh)
assert_array_equal(stc.data, data_t)
times = np.round(1000 * stc.times)
t_idx = [np.where(times >= 0)[0][0], np.where(times <= 250)[0][-1]]
data_t = stc.transform_data(np.abs, tmin_idx=t_idx[0], tmax_idx=t_idx[-1])
stc.transform(np.abs, tmin=0, tmax=250, copy=False)
assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.2))
assert_array_equal(stc.data, data_t)
@requires_sklearn
def test_spatio_temporal_tris_connectivity():
"""Test spatio-temporal connectivity from triangles"""
tris = np.array([[0, 1, 2], [3, 4, 5]])
connectivity = spatio_temporal_tris_connectivity(tris, 2)
x = [1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
components = stats.cluster_level._get_components(np.array(x), connectivity)
# _get_components works differently now...
old_fmt = [0, 0, -2, -2, -2, -2, 0, -2, -2, -2, -2, 1]
new_fmt = np.array(old_fmt)
new_fmt = [np.nonzero(new_fmt == v)[0]
for v in np.unique(new_fmt[new_fmt >= 0])]
assert_true(len(new_fmt), len(components))
for c, n in zip(components, new_fmt):
assert_array_equal(c, n)
@testing.requires_testing_data
def test_spatio_temporal_src_connectivity():
"""Test spatio-temporal connectivity from source spaces"""
tris = np.array([[0, 1, 2], [3, 4, 5]])
src = [dict(), dict()]
connectivity = spatio_temporal_tris_connectivity(tris, 2)
src[0]['use_tris'] = np.array([[0, 1, 2]])
src[1]['use_tris'] = np.array([[0, 1, 2]])
src[0]['vertno'] = np.array([0, 1, 2])
src[1]['vertno'] = np.array([0, 1, 2])
connectivity2 = spatio_temporal_src_connectivity(src, 2)
assert_array_equal(connectivity.todense(), connectivity2.todense())
# add test for dist connectivity
src[0]['dist'] = np.ones((3, 3)) - np.eye(3)
src[1]['dist'] = np.ones((3, 3)) - np.eye(3)
src[0]['vertno'] = [0, 1, 2]
src[1]['vertno'] = [0, 1, 2]
connectivity3 = spatio_temporal_src_connectivity(src, 2, dist=2)
assert_array_equal(connectivity.todense(), connectivity3.todense())
# add test for source space connectivity with omitted vertices
inverse_operator = read_inverse_operator(fname_inv)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
src_ = inverse_operator['src']
connectivity = spatio_temporal_src_connectivity(src_, n_times=2)
assert len(w) == 1
a = connectivity.shape[0] / 2
b = sum([s['nuse'] for s in inverse_operator['src']])
assert_true(a == b)
assert_equal(grade_to_tris(5).shape, [40960, 3])
@requires_pandas
def test_to_data_frame():
"""Test stc Pandas exporter"""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
data = np.random.randn(n_vert, n_times)
stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
subject='sample')
for stc in [stc_surf, stc_vol]:
assert_raises(ValueError, stc.to_data_frame, index=['foo', 'bar'])
for ncat, ind in zip([1, 0], ['time', ['subject', 'time']]):
df = stc.to_data_frame(index=ind)
assert_true(df.index.names == ind
if isinstance(ind, list) else [ind])
assert_array_equal(df.values.T[ncat:], stc.data)
# test that non-indexed data were present as categorial variables
assert_true(all([c in ['time', 'subject'] for c in
df.reset_index().columns][:2]))
def test_get_peak():
"""Test peak getter
"""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
data = np.random.randn(n_vert, n_times)
stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
subject='sample')
for ii, stc in enumerate([stc_surf, stc_vol]):
assert_raises(ValueError, stc.get_peak, tmin=-100)
assert_raises(ValueError, stc.get_peak, tmax=90)
assert_raises(ValueError, stc.get_peak, tmin=0.002, tmax=0.001)
vert_idx, time_idx = stc.get_peak()
vertno = np.concatenate(stc.vertices) if ii == 0 else stc.vertices
assert_true(vert_idx in vertno)
assert_true(time_idx in stc.times)
ch_idx, time_idx = stc.get_peak(vert_as_index=True,
time_as_index=True)
assert_true(vert_idx < stc.data.shape[0])
assert_true(time_idx < len(stc.times))
@testing.requires_testing_data
def test_mixed_stc():
"""Test source estimate from mixed source space
"""
N = 90 # number of sources
T = 2 # number of time points
S = 3 # number of source spaces
data = np.random.randn(N, T)
vertno = S * [np.arange(N // S)]
# make sure error is raised if vertices are not a list of length >= 2
assert_raises(ValueError, MixedSourceEstimate, data=data,
vertices=[np.arange(N)])
stc = MixedSourceEstimate(data, vertno, 0, 1)
vol = read_source_spaces(fname_vsrc)
# make sure error is raised for plotting surface with volume source
assert_raises(ValueError, stc.plot_surface, src=vol)
run_tests_if_main()
| bsd-3-clause |
fabianp/scikit-learn | sklearn/utils/validation.py | 66 | 23629 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
| bsd-3-clause |
sarahgrogan/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
datapythonista/pandas | pandas/tests/frame/methods/test_convert_dtypes.py | 2 | 1129 | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
class TestConvertDtypes:
@pytest.mark.parametrize(
"convert_integer, expected", [(False, np.dtype("int32")), (True, "Int32")]
)
def test_convert_dtypes(self, convert_integer, expected):
# Specific types are tested in tests/series/test_dtypes.py
# Just check that it works for DataFrame here
df = pd.DataFrame(
{
"a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
"b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
}
)
result = df.convert_dtypes(True, True, convert_integer, False)
expected = pd.DataFrame(
{
"a": pd.Series([1, 2, 3], dtype=expected),
"b": pd.Series(["x", "y", "z"], dtype="string"),
}
)
tm.assert_frame_equal(result, expected)
def test_convert_empty(self):
# Empty DataFrame can pass convert_dtypes, see GH#40393
empty_df = pd.DataFrame()
tm.assert_frame_equal(empty_df, empty_df.convert_dtypes())
| bsd-3-clause |
mahak/spark | python/pyspark/sql/tests/test_pandas_map.py | 20 | 4612 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import unittest
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
if have_pandas:
import pandas as pd
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class MapInPandasTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.sc.environment["TZ"] = tz
cls.spark.conf.set("spark.sql.session.timeZone", tz)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
def test_map_partitions_in_pandas(self):
def func(iterator):
for pdf in iterator:
assert isinstance(pdf, pd.DataFrame)
assert pdf.columns == ['id']
yield pdf
df = self.spark.range(10)
actual = df.mapInPandas(func, 'id long').collect()
expected = df.collect()
self.assertEqual(actual, expected)
def test_multiple_columns(self):
data = [(1, "foo"), (2, None), (3, "bar"), (4, "bar")]
df = self.spark.createDataFrame(data, "a int, b string")
def func(iterator):
for pdf in iterator:
assert isinstance(pdf, pd.DataFrame)
assert [d.name for d in list(pdf.dtypes)] == ['int32', 'object']
yield pdf
actual = df.mapInPandas(func, df.schema).collect()
expected = df.collect()
self.assertEqual(actual, expected)
def test_different_output_length(self):
def func(iterator):
for _ in iterator:
yield pd.DataFrame({'a': list(range(100))})
df = self.spark.range(10)
actual = df.repartition(1).mapInPandas(func, 'a long').collect()
self.assertEqual(set((r.a for r in actual)), set(range(100)))
def test_empty_iterator(self):
def empty_iter(_):
return iter([])
self.assertEqual(
self.spark.range(10).mapInPandas(empty_iter, 'a int, b string').count(), 0)
def test_empty_rows(self):
def empty_rows(_):
return iter([pd.DataFrame({'a': []})])
self.assertEqual(
self.spark.range(10).mapInPandas(empty_rows, 'a int').count(), 0)
def test_chain_map_partitions_in_pandas(self):
def func(iterator):
for pdf in iterator:
assert isinstance(pdf, pd.DataFrame)
assert pdf.columns == ['id']
yield pdf
df = self.spark.range(10)
actual = df.mapInPandas(func, 'id long').mapInPandas(func, 'id long').collect()
expected = df.collect()
self.assertEqual(actual, expected)
def test_self_join(self):
# SPARK-34319: self-join with MapInPandas
df1 = self.spark.range(10)
df2 = df1.mapInPandas(lambda iter: iter, 'id long')
actual = df2.join(df2).collect()
expected = df1.join(df1).collect()
self.assertEqual(sorted(actual), sorted(expected))
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_map import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
ssaeger/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 176 | 2027 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
colors = ['navy', 'turquoise', 'darkorange']
lw = 2
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of IRIS dataset')
plt.figure()
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
dingocuster/scikit-learn | sklearn/utils/tests/test_multiclass.py | 128 | 12853 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that were'nt supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
aflaxman/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 79 | 2497 | # Author: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from sklearn.utils.testing import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def assert_csr_equal(X, Y):
X.eliminate_zeros()
Y.eliminate_zeros()
assert_equal(X.shape[0], Y.shape[0])
assert_equal(X.shape[1], Y.shape[1])
assert_array_equal(X.data, Y.data)
assert_array_equal(X.indices, Y.indices)
assert_array_equal(X.indptr, Y.indptr)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
mjgrav2001/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
tjlane/thor | src/python/plot.py | 1 | 4165 |
"""
A library for plotting pretty images of all kinds.
"""
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
import numpy as np
import matplotlib.pyplot as plt
class InteractiveImshow(object):
"""
A brief extension to matplotlib's imshow that puts a colorbar next to
the image that you can click on to scale the maximum numeric value
displayed.
Based on code from pyana_misc by Ingrid Ofte.
Parameters
----------
inarr : np.ndarray
The array to imshow()
filename : {str, None}
The filename to call the file if it is saved. If `None`, disable saving
ability.
"""
def __init__(self, inarr, filename=None, fig=None, ax=None):
"""
Parameters
----------
inarr : np.ndarray
The array to imshow()
filename : {str, None}
The filename to call the file if it is saved. If `None`, disable saving
ability.
fig : pyplot.figure
A figure object to draw on.
ax : pyplot.axes
An axes canvas to draw on.
"""
self.inarr = inarr
self.filename = filename
self.fig = fig
self.ax = ax
self.cmax = self.inarr.max()
self.cmin = self.inarr.min()
self._draw_img()
def _on_keypress(self, event):
if event.key == 's':
if not self.filename:
self.filename = input('Saving. Enter filename: ')
plt.savefig(self.filename)
logger.info("Saved image: %s" % self.filename)
elif event.key == 'r':
logger.info("Reset plot")
colmin, colmax = self.orglims
plt.clim(colmin, colmax)
plt.draw()
def _on_click(self, event):
if event.inaxes:
lims = self.im.get_clim()
colmin = lims[0]
colmax = lims[1]
rng = colmax - colmin
value = colmin + event.ydata * rng
if event.button is 1:
if value > colmin and value < colmax :
colmax = value
elif event.button is 2:
colmin, colmax = self.orglims
elif event.button is 3:
if value > colmin and value < colmax:
colmix = value
self.im.set_clim(colmin, colmax)
plt.draw()
def _on_scroll(self, event):
lims = self.im.get_clim()
speed = 1.1
if event.button == 'up':
colmax = lims[1] / speed
elif event.button == 'down':
colmax = lims[1] * speed
self.im.set_clim(lims[0], colmax)
plt.draw()
def _draw_img(self):
if not self.fig:
self.fig = plt.figure()
cid1 = self.fig.canvas.mpl_connect('key_press_event', self._on_keypress)
cid2 = self.fig.canvas.mpl_connect('button_press_event', self._on_click)
cid3 = self.fig.canvas.mpl_connect('scroll_event', self._on_scroll)
if not self.ax:
self.ax = self.fig.add_subplot(111)
self.im = self.ax.imshow(self.inarr, vmax=self.cmax, origin='lower')
self.colbar = plt.colorbar(self.im, pad=0.01)
self.orglims = self.im.get_clim()
def plot_polar_intensities(shot, output_file=None):
"""
Plot an intensity map in polar coordinates.
Parameters
----------
shot : thor.xray.Shot
A shot to plot.
output_file : str
The filename to write. If `None`, will display the image on screen and
not save.
"""
pi = shot.polar_grid
colors = shot.polar_intensities # color by intensity
ax = plt.subplot(111, polar=True)
c = plt.scatter(pi[:,1], pi[:,0], c=colors, cmap=cm.hsv)
c.set_alpha(0.75)
if output_file:
plt.savefig(output_file)
logger.info("Saved: %s" % output_file)
else:
plt.show()
return | gpl-2.0 |
ScienceStacks/SciSheets | mysite/scisheets/core/table.py | 2 | 27421 | '''
Implements the table class for SciSheets.
'''
from mysite import settings
import CommonUtil.util as ut
from Files.data_capture import DataCapture
from FileVersion.versioned_file import VersionedFile
from helpers.formula_statement import FormulaStatement
from CommonUtil.is_null import isNull
from column import Column
from column_container import ColumnContainer
from table_evaluator import TableEvaluator
from helpers.serialize_deserialize import deserialize
import errors as er
import json
import numpy as np
import os
import pandas as pd
import random
NAME_COLUMN_STR = "row"
NAME_COLUMN_IDX = 0
CUR_DIR = os.path.dirname(__file__)
PROLOGUE_FILEPATH = os.path.join(CUR_DIR, "table.prologue")
EPILOGUE_FILEPATH = os.path.join(CUR_DIR,"table.epilogue")
PROLOGUE_NAME = "Prologue"
EPILOGUE_NAME = "Epilogue"
class Row(dict):
"""
Container of values for a row
"""
pass
# pylint: disable=R0904
class Table(ColumnContainer):
"""
Implements full table functionality.
Feature 1: Maintains consistency
between columns as to column lengths
column names are unique
Feature 2: Knows about rows
add rows
delete rows
rows have a name as specified in the row column
The primary object for referencing a column is the column object.
The primary object for referencing a row is the row index
"""
def __init__(self, name):
super(Table, self).__init__(name)
self._namespace = {} # Namespace for formula evaluation
self._createNameColumn()
if self.getParent() is None:
self._prologue = self._formulaStatementFromFile(PROLOGUE_FILEPATH,
PROLOGUE_NAME)
self._epilogue = self._formulaStatementFromFile(EPILOGUE_FILEPATH,
EPILOGUE_NAME)
else:
self._prologue = None
self._epilogue = None
self._is_evaluate_formulas = True
@classmethod
def createRandomTable(cls, name, nrow, ncol, ncolstr=0,
low_int=0, hi_int=100, table_cls=None):
"""
Creates a table with random integers as values
Input: name - name of the table
nrow - number of rows
ncol - number of columns
ncolstr - number of columns with strings
low_int - smallest integer
hi_int - largest integer
table_cls - Table class to use; default is Table
"""
if table_cls is None:
table_cls = cls
ncol = int(ncol)
nrow = int(nrow)
table = cls(name)
ncolstr = min(ncol, ncolstr)
ncolint = ncol - ncolstr
c_list = range(ncol)
random.shuffle(c_list)
for n in range(ncol):
column = Column("Col_" + str(n))
if c_list[n] <= ncolint - 1:
values = np.random.randint(low_int, hi_int, nrow)
values_ext = values.tolist()
else:
values_ext = ut.randomWords(nrow)
#values_ext.append(None)
column.addCells(np.array(values_ext))
table.addColumn(column)
table.setFilepath(settings.SCISHEETS_DEFAULT_TABLEFILE)
return table
@classmethod
def createRandomHierarchicalTable(cls, name, nrow, num_nodes,
prob_child, ncolstr=0, low_int=0, hi_int=100, prob_detach=0,
table_cls=None):
"""
Creates a table with random integers as values
:param str name: name of the table
:param int nrow: number of rows
:param float prob_child: probability that next node is a child
:param str ncolstr: number of columns with strings
:param int low_int: smallest integer
:param int hi_int: largest integer
:param float prob_detach: probability that a subtree is detached
:parm Type table_cls: Table class to use; default is Table
:return table_cls:
"""
if table_cls is None:
table_cls = cls
# Create the schema for the Hierarchical Table
htable = super(Table, cls).createRandomNamedTree(num_nodes,
prob_child, leaf_cls=Column, prob_detach=prob_detach,
nonleaf_cls=table_cls)
leaves = [c for c in htable.getLeaves()
if c.getName(is_global_name=False) != NAME_COLUMN_STR]
num_leaves = len(htable.getLeaves()) -1 # Don't include the name column
# Create the values for the leaves of the Hierarchical Table
flat_table = Table.createRandomTable(name, nrow, num_leaves, ncolstr=ncolstr,
low_int=low_int, hi_int=hi_int, table_cls=table_cls)
data_columns = flat_table.getDataColumns()
pairs = zip(leaves, data_columns)
# Populate the leaves of the Hierarchical Table
[l.getParent().addCells(l, d.getCells(), replace=True) for l, d in pairs]
# Validate the table
if NAME_COLUMN_STR in \
[n.getName(is_global_name=False) for n in htable.getNonLeaves()]:
import pdb; pdb.set_trace()
htable.setFilepath(settings.SCISHEETS_DEFAULT_TABLEFILE)
return htable
def getSerializationDict(self, class_variable):
"""
:param str class_variable: key to use for the class name
:return dict: dictionary encoding the Table object and its columns
"""
serialization_dict = {}
serialization_dict[class_variable] = str(self.__class__)
filepath = self.getFilepath()
if self.getFilepath() is not None:
if ut.getFileExtension(self.getFilepath()) != settings.SCISHEETS_EXT:
filepath = ut.changeFileExtension(self.getFilepath(),
settings.SCISHEETS_EXT)
more_dict = {
"_name": self.getName(is_global_name=False),
"_prologue_formula": self.getPrologue().getFormula(),
"_epilogue_formula": self.getEpilogue().getFormula(),
"_is_evaluate_formulas": self.getIsEvaluateFormulas(),
"_filepath": filepath,
"_attached": self.isAttached(),
}
serialization_dict.update(more_dict)
_children = []
for child in self.getChildren():
if not Table.isNameColumn(child):
_children.append(child.getSerializationDict(class_variable))
serialization_dict["_children"] = _children
return serialization_dict
@classmethod
def deserialize(cls, serialization_dict, instance=None):
"""
Deserializes a table object and does fix ups.
:param dict serialization_dict: container of parameters for deserialization
:return Table:
"""
if instance is None:
table = Table(serialization_dict["_name"])
else:
table = instance
if serialization_dict["_filepath"] is not None:
table.setFilepath(serialization_dict["_filepath"])
table.setPrologue(serialization_dict["_prologue_formula"])
table.setEpilogue(serialization_dict["_epilogue_formula"])
table.setIsEvaluateFormulas(serialization_dict["_is_evaluate_formulas"])
if "_attached" in serialization_dict.keys():
table.setIsAttached(serialization_dict["_attached"])
if "_children" in serialization_dict.keys():
child_dicts = serialization_dict["_children"]
elif "_columns" in serialization_dict.keys():
child_dicts = serialization_dict["_columns"]
else:
raise ValueError("Cannot find children for %s" % table.getName())
for child_dict in child_dicts:
# Handle older serializations
if not child_dict['_name'] == NAME_COLUMN_STR:
new_child = deserialize(json.dumps(child_dict))
table.addChild(new_child)
table.adjustColumnLength()
return table
# The following methods are used in debugging
def d(self):
return [(c.getName(), c.getCells()) for c
in self.getLeaves()]
def f(self):
return [(c.getName(), c.getFormula())
for c in self.getColumns(is_attached=False)]
def setCapture(self, filename, data):
dc = DataCapture(filename)
dc.setData(data)
def getIsEvaluateFormulas(self):
return self._is_evaluate_formulas
# Internal and other methods
# TODO: Tests with multiple levels of subtable
def _updateNameColumn(self, nrows_table=None):
"""
Changes the cells in the name column of the table
to be consecutive ints.
:paam int nrows_table: Number of rows in the table
"""
if nrows_table is None:
nrows_table = self.numRows()
names = []
for row_num in range(nrows_table):
names.append(Table._rowNameFromIndex(row_num))
for column in self.getLeaves(is_attached=True):
if Table.isNameColumn(column):
column.addCells(list(names), replace=True)
def _formulaStatementFromFile(self, filepath, name):
"""
Reads the file contents and creates the FormulaStatement object.
:param str filepath: path to file to read
:param str name: name of the formula
:returns str: file contents
"""
with open(filepath, 'r') as f:
lines = f.readlines()
statements = ''.join(lines)
return FormulaStatement(statements, name)
# Data columns are those that have user data. The "row" column is excluded.
def getDataColumns(self, is_recursive=True, is_attached=True):
"""
Returns the columns other than the name column
"""
return [c for c in self.getColumns(is_recursive=is_recursive,
is_attached=is_attached) if not Table.isNameColumn(c)]
def getNameColumn(self):
"""
Gets the name column for this table.
:return Column:
"""
columns = [c for c in self.getColumns()
if Table.isNameColumn(c) and c.getParent() == self]
if len(columns) != 1:
raise RuntimeError("Should have exactly one name column")
return columns[0]
def getData(self):
"""
:return dict: keys are global column names
"""
return {c.getName(): list(c.getCells()) for c in self.getColumns()
if not Table.isNameColumn(c)}
def getEpilogue(self):
"""
:return FormulaStatement:
"""
return self._epilogue
def getFormulaColumns(self):
"""
:return list-of-Column:
"""
result = [c for c in self.getColumns(is_attached=False)
if c.getFormula() is not None]
return result
def getRow(self, row_index=None):
"""
:param row_index: row desired
if None, then a row of None is returned
:return: Row object
"""
row = Row()
for column in self.getColumns():
if row_index is None:
if column.isFloats():
row[column.getName()] = np.nan # pylint: disable=E1101
else:
row[column.getName()] = None
else:
row[column.getName()] = column.getCells()[row_index]
return row
def getNamespace(self):
return self._namespace
def getPrologue(self):
"""
:return FormulaStatement:
"""
return self._prologue
# TODO: Verify the index
@staticmethod
def _rowNameFromIndex(index):
"""
Create the row name from its index
"""
return str(index + 1)
def _coerceNameColumnToStr(self):
"""
Makes sure that row names are strings
"""
column = self.columnFromName(NAME_COLUMN_STR, is_relative=False)
if column is None:
import pdb; pdb.set_trace()
values = [str(v) for v in column.getCells()]
column.replaceCells(values)
# TODO: Verify the index
@staticmethod
def _rowNamesFromSize(size):
"""
:param size: number of rows
:return: array of names
"""
return [str(n) for n in range(1, size+1)]
def _createNameColumn(self):
"""
Creates the name column for the table
"""
column = Column(NAME_COLUMN_STR, asis=True)
self.addColumn(column)
def adjustColumnLength(self):
"""
Inserts values of None or np.nan so that column
has the same length as the table
"""
none_array = np.array([None])
num_rows = self.numRows()
for column in self.getColumns():
adj_rows = num_rows - column.numCells()
if adj_rows > 0:
if column.isFloats():
column.addCells(np.repeat(np.nan, adj_rows)) # pylint:disable=E1101
else:
column.addCells(np.repeat(none_array, adj_rows))
self._updateNameColumn(nrows_table=num_rows)
def _validateTable(self):
"""
Checks that the table is internally consistent
Verify that there is at least one column
"""
if len(self.getColumns()) < 1:
raise er.InternalError("Table %s has no columns." % self._name)
# Verify that all columns have the same number of cells
try:
name_column = [c for c in self.getChildren()
if c.getName(is_global_name=False) == NAME_COLUMN_STR][0]
except Exception as e:
import pdb; pdb.set_trace()
if name_column is None:
import pdb; pdb.set_trace()
num_rows = self.numRows()
for column in self.getColumns():
if column.numCells() != num_rows:
import pdb; pdb.set_trace()
msg = "In Table %s, Column %s differs in its number of rows." \
% (self.getName(), column.getName())
raise er.InternalError(msg)
# Verify that the first Column is the Name Column
if self.getChildAtPosition(0).getName(is_global_name=False) != NAME_COLUMN_STR:
msg = "In Table %s, first column is not 'row' column" % self.getName()
raise er.InternalError(msg)
# Verify that names are unique
if self.validateTree() is not None:
raise RuntimeError(self.validateTree())
# Verify the sequence of row names
for nrow in range(self.numRows()):
expected_row_name = Table._rowNameFromIndex(nrow)
actual_row_name = \
self.getChildAtPosition(NAME_COLUMN_IDX).getCells()[nrow]
if actual_row_name != expected_row_name:
import pdb; pdb.set_trace()
msg = "In Table %s, invalid row name at index %d: %s" % \
(self.getName(), nrow, actual_row_name)
raise er.InternalError(msg)
# Verify that the name columns are identical
for column in self.getColumns():
if Table.isNameColumn(column):
if not column.getCells() == name_column.getCells():
raise RuntimeError("%s is not a consistent name column" % column.getName())
def addCells(self, column, cells, replace=False):
"""
Adds to the column
:param Column column:
:param list cells:
"""
column.addCells(cells, replace=replace)
self.adjustColumnLength()
self._validateTable()
def addColumn(self, column, index=None):
"""
Adds a column to the table.
Adjusts the Column length to that of the table
:param column: column object
:param int index: position for the new column
:return: error text if there is a problem with the column
None if no problem
Notes: (1) A new column may have either no cells
or the same number as the existing table
"""
error = None
# Check for problems with this column
is_ok = all([c.getName(is_global_name=False)
!= column.getName(is_global_name=False)
for c in self.getChildren()])
if not is_ok:
error = "**%s is a duplicate name" % column.getName()
return error
else:
error = Column.isPermittedName( \
column.getName(is_global_name=False))
if error is not None:
return error
if index is None:
index = len(self.getColumns(is_attached=False))
# Handle the different cases of adding a column
self.addChild(column, position=index)
# Case 1: First column after name column
if self.numColumns() == 1:
self._updateNameColumn()
# Case 2: Subsequent columns
else:
self.adjustColumnLength()
self._validateTable()
def addRow(self, row, row_index=None):
"""
:param Row row: Row to add
:param int row_index: index where Row is added, may be a float
if None, then appended
"""
# Determine the actual desired name
if row_index is None:
proposed_name = Table._rowNameFromIndex(self.numRows())
else:
proposed_name = Table._rowNameFromIndex(row_index)
# Assign values to the last row of each column cells
for column in self.getColumns():
if column.getName(is_global_name=False) != NAME_COLUMN_STR:
cur_name = column.getName()
if cur_name in row:
column.insertCell(row[cur_name])
else:
column.insertCell(None)
else:
column.insertCell(None)
last_index = self.numRows() - 1
self.renameRow(last_index, proposed_name) # put the row in the right place
self._validateTable()
def copy(self, instance=None):
"""
Returns a copy of this object
:param Table instance:
"""
# Create an object if none provided
if instance is None:
instance = Table(self.getName(is_global_name=False))
name_column = instance.columnFromName(NAME_COLUMN_STR,
is_relative=False)
name_column.removeTree() # Avoid duplicate
# Copy everything required from inherited classes
super(Table, self).copy(instance=instance)
instance._coerceNameColumnToStr()
# Set properties specific to this class
instance.setPrologue(self.getPrologue().getFormula())
instance.setEpilogue(self.getEpilogue().getFormula())
instance.setIsEvaluateFormulas(self.getIsEvaluateFormulas())
self.adjustColumnLength()
return instance
def deleteRows(self, indicies):
"""
Deletes rows
:param indicies: index of rows to delete
"""
indicies.sort()
indicies.reverse()
for column in self.getColumns():
column.deleteCells(indicies)
self._updateNameColumn()
def export(self, **kwargs):
"""
Exports the table to a python program
:return: error - string from the file export
"""
table_evaluator = TableEvaluator(self)
error = table_evaluator.export(**kwargs)
return error
def evaluate(self, user_directory=None):
"""
Evaluates formulas in the table
:param user_directory: full directory path where user modules
are placed
:return: error from table evaluation or None
"""
evaluator = TableEvaluator(self)
error = evaluator.evaluate(user_directory=user_directory)
return error
def isColumnPresent(self, column_name):
"""
:param str column_name: local column name
:return bool: True if column is present
"""
return any([c.getName(is_global_name=False) == column_name
for c in self.getColumns(is_attached=False)])
def isEquivalent(self, other_table, is_exception=False):
"""
Checks that the tables have the same values of their properties,
excluding the VersionedFile.
:param Table other_table:
:param bool is_exception: generate an AssertionError if false
:returns bool:
"""
msg = None
if not isinstance(other_table, self.__class__):
msg = "Table is not equivalent to a non-table."
elif not (self.getName(is_global_name=False) == other_table.getName(is_global_name=False)):
msg = "Table has a different name."
elif not (self.numColumns() == other_table.numColumns()):
msg = "Table has a different number of columns."
elif not (self.getPrologue().isEquivalent(other_table.getPrologue())):
msg = "Table has a different Prologue."
elif not (self.getEpilogue().isEquivalent(other_table.getEpilogue())):
msg = "Table has a different Epilogue."
elif not super(Table, self).isEquivalent(other_table,
is_exception=is_exception):
msg = "Differs because of ancestor of Table."
if msg is None:
return True
elif is_exception:
raise AssertionError(msg)
else:
return False
@staticmethod
def isNameColumn(column):
"""
Determines if this is a name column
:param Column column:
:return bool: True if name column
"""
path = column.pathFromGlobalName(column.getName())
return path[-1] == NAME_COLUMN_STR
@classmethod
def isTable(cls, child):
"""
:param NamedTree child:
:return bool: True if is a Column
"""
return isinstance(child, Table)
def insertRow(self, row, index=None):
"""
Inserts the row in the desired index in the table and
assigns the value of the NAME_COLUMN
:param row: a Row
:param index: index in the table where the row is inserted
"""
idx = index
if idx is None:
idx = self.numRows()
for child in self.getLeaves(is_attached=True):
if ColumnContainer.isColumn(child):
name = child.getName(is_global_name=False)
if name in row.keys():
child.insertCell(row[name], idx)
else:
child.insertCell(None, idx)
self._updateNameColumn()
def moveRow(self, index1, index2):
"""
Moves the row at index1 to index2
"""
row = self.getRow(row_index=index1)
self.deleteRows([index1])
self.insertRow(row, index2)
self._updateNameColumn()
def numRows(self):
"""
Returns the number of rows in the table
"""
attached_leaves = self.getAttachedNodes(self.getColumns())
return max([c.numCells() for c in attached_leaves])
# TODO: This won't work with nested columns
def refactorColumn(self, cur_colnm, new_colnm):
"""
Changes the column name and its occurrences in formulas in the table.
:param str cur_colnm: Current name of the column
:param str new_colnm: New name of the column
:returns list-of-str changed_columns:
:raises ValueError: column name is unknown
"""
def changeFormula(formula_statement):
"""
Changes the formula by replacing occurrences of
cur_colnm with new_colnm
:param FormulaStatement formula_satement:
:returns str/None: new formula or None
"""
formula = formula_statement.getFormula()
if cur_colnm in formula:
return formula.replace(cur_colnm, new_colnm)
else:
return None
column = self.childFromName(cur_colnm, is_relative=True)
if column is None:
raise ValueError("Column %s does not exist." % cur_colnm)
column.setName(new_colnm)
columns = self.getColumns(is_attached=False)
changed_columns = []
try:
# Do the Columns
for col in self.getFormulaColumns():
new_formula = changeFormula(col.getFormulaStatementObject())
if new_formula is not None:
col.setFormula(new_formula)
changed_columns.append(col.getName())
# Handle Prologue
new_formula = changeFormula(self.getPrologue())
if new_formula is not None:
self.setPrologue(new_formula)
changed_columns.append(PROLOGUE_NAME)
# Handle Epilogue
new_formula = changeFormula(self.getEpilogue())
if new_formula is not None:
self.setEpilogue(new_formula)
changed_columns.append(PROLOGUE_NAME)
except Exception as err:
msg = '''Changing column name from %s to %s.
Encountered error %s.
Changed formulas in columns %s.''' % (cur_colnm, new_colnm,
str(err), ' '.join(changed_columns))
return changed_columns
@staticmethod
def rowIndexFromName(name):
"""
Returns the row index for the row name
"""
return int(name) - 1
def renameColumn(self, column, proposed_name):
"""
Renames the column, checking for a duplicate
:param column: column object
:param proposed_name: str, proposed name
:return: Boolean indicating success or failure
"""
names = [c.getName(is_global_name=False) for c in self.getChildren()]
bool_test = all([name != proposed_name for name in names])
if bool_test:
column.setName(proposed_name)
return bool_test
def renameRow(self, row_index, proposed_name):
"""
Renames the row so that it is an integer value
that creates the row ordering desired.
:param row_index: index of the row to change
:param proposed_name: string of a number
"""
name_column = self.childFromName(NAME_COLUMN_STR,
is_relative=True)
names = name_column.getCells()
try:
names[row_index] = str(proposed_name)
except:
import pdb; pdb.set_trace()
try:
float_names = [float(x) for x in names]
except:
import pdb; pdb.set_trace()
sel_index = np.argsort(float_names)
new_names = Table._rowNamesFromSize(len(names))
for column in self.getChildren(is_recursive=True):
if Table.isNameColumn(column):
column.replaceCells(list(new_names))
self._updateNameColumn()
# Update the order of values in each column
for column in self.getLeaves(is_attached=True):
if not Table.isNameColumn(column):
data = column.getCells()
new_data = [data[n] for n in sel_index]
column.replaceCells(new_data)
def setNamespace(self, namespace):
self._namespace = namespace
def setIsEvaluateFormulas(self, setting):
self._is_evaluate_formulas = setting
def setEpilogue(self, epilogue_formula):
"""
:param str epilogue_formula: New value for the Epilogue formula
:return str: Error or None
"""
self._epilogue = FormulaStatement(epilogue_formula, EPILOGUE_NAME)
return self._epilogue.do()
def setPrologue(self, prologue_formula):
"""
:param str prologue_formula: New value for the Prologue formula
"""
self._prologue = FormulaStatement(prologue_formula, PROLOGUE_NAME)
return self._prologue.do()
def tableFromName(self, name, is_relative=True):
"""
Finds the table with the specified name or None.
Note that Columns must be leaves in the Tree.
:param str name: name of the column
:return NamedTree:
"""
leaf = self.childFromName(name, is_relative=is_relative)
if Table.isTable(leaf):
return leaf
def trimRows(self):
"""
Removes all consequative rows at the end of the table
that have None values in the data columns
"""
num_rows = self.numRows()
row_indexes = range(num_rows)
row_indexes.sort(reverse=True)
for index in row_indexes:
row = self.getRow(row_index=index)
# Delete all of the name columns
for colnm in row.keys():
column = self.childFromName(colnm, is_relative=False)
if column is None:
import pdb; pdb.set_trace()
if Table.isNameColumn(column):
del row[column.getName()]
delete_row = True
for name in row.keys():
column = self.childFromName(name, is_relative=False)
if not isNull(row[name]):
delete_row = False
if delete_row:
self.deleteRows([index])
else:
break
def updateCell(self, value, row_index, column_id):
"""
Changes the value of the identified cell
:param obj value: new value for the cell
:param int row_index: 0-based index of the row
:param int/str column_id: 0-based index of the column or its name
"""
if isinstance(column_id, int):
column = self.columnFromIndex(column_id)
else:
column = self.childFromName(column_id, is_relative=False)
column.updateCell(value, row_index)
def updateColumn(self, column, cells):
"""
Replaces the cells in the column with those provided
:param column: column to update
:param cells: cells to change
"""
column.addCells(cells, replace=True)
self.adjustColumnLength()
self._validateTable()
def updateRow(self, row, index):
"""
Updates the row in place. Only changes values
Assigns the value of the NAME_COLUMN
that are specified in row.
:param row: Row
:param index: index of row to change
"""
row[NAME_COLUMN_STR] = Table._rowNameFromIndex(index)
for name in row:
column = self.childFromName(name, is_relative=False)
if not Table.isNameColumn(column):
column.updateCell(row[name], index)
self.adjustColumnLength()
| apache-2.0 |
dpgoetz/swift | swift/common/middleware/x_profile/html_viewer.py | 15 | 21038 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import os
import random
import re
import string
import tempfile
from swift import gettext_ as _
from exceptions import PLOTLIBNotInstalled, ODFLIBNotInstalled,\
NotFoundException, MethodNotAllowed, DataLoadFailure, ProfileException
from profile_model import Stats2
PLOTLIB_INSTALLED = True
try:
import matplotlib
# use agg backend for writing to file, not for rendering in a window.
# otherwise some platform will complain "no display name and $DISPLAY
# environment variable"
matplotlib.use('agg')
import matplotlib.pyplot as plt
except ImportError:
PLOTLIB_INSTALLED = False
empty_description = """
The default profile of current process or the profile you requested is
empty. <input type="submit" name="refresh" value="Refresh"/>
"""
profile_tmpl = """
<select name="profile">
<option value="current">current</option>
<option value="all">all</option>
${profile_list}
</select>
"""
sort_tmpl = """
<select name="sort">
<option value="time">time</option>
<option value="cumulative">cumulative</option>
<option value="calls">calls</option>
<option value="pcalls">pcalls</option>
<option value="name">name</option>
<option value="file">file</option>
<option value="module">module</option>
<option value="line">line</option>
<option value="nfl">nfl</option>
<option value="stdname">stdname</option>
</select>
"""
limit_tmpl = """
<select name="limit">
<option value="-1">all</option>
<option value="0.1">10%</option>
<option value="0.2">20%</option>
<option value="0.3">30%</option>
<option value="10">10</option>
<option value="20">20</option>
<option value="30">30</option>
<option value="50">50</option>
<option value="100">100</option>
<option value="200">200</option>
<option value="300">300</option>
<option value="400">400</option>
<option value="500">500</option>
</select>
"""
fulldirs_tmpl = """
<input type="checkbox" name="fulldirs" value="1"
${fulldir_checked}/>
"""
mode_tmpl = """
<select name="mode">
<option value="stats">stats</option>
<option value="callees">callees</option>
<option value="callers">callers</option>
</select>
"""
nfl_filter_tmpl = """
<input type="text" name="nfl_filter" value="${nfl_filter}"
placeholder="filename part" />
"""
formelements_tmpl = """
<div>
<table>
<tr>
<td>
<strong>Profile</strong>
<td>
<strong>Sort</strong>
</td>
<td>
<strong>Limit</strong>
</td>
<td>
<strong>Full Path</strong>
</td>
<td>
<strong>Filter</strong>
</td>
<td>
</td>
<td>
<strong>Plot Metric</strong>
</td>
<td>
<strong>Plot Type</strong>
<td>
</td>
<td>
<strong>Format</strong>
</td>
<td>
<td>
</td>
<td>
</td>
</tr>
<tr>
<td>
${profile}
<td>
${sort}
</td>
<td>
${limit}
</td>
<td>
${fulldirs}
</td>
<td>
${nfl_filter}
</td>
<td>
<input type="submit" name="query" value="query"/>
</td>
<td>
<select name='metric'>
<option value='nc'>call count</option>
<option value='cc'>primitive call count</option>
<option value='tt'>total time</option>
<option value='ct'>cumulative time</option>
</select>
</td>
<td>
<select name='plottype'>
<option value='bar'>bar</option>
<option value='pie'>pie</option>
</select>
<td>
<input type="submit" name="plot" value="plot"/>
</td>
<td>
<select name='format'>
<option value='default'>binary</option>
<option value='json'>json</option>
<option value='csv'>csv</option>
<option value='ods'>ODF.ods</option>
</select>
</td>
<td>
<input type="submit" name="download" value="download"/>
</td>
<td>
<input type="submit" name="clear" value="clear"/>
</td>
</tr>
</table>
</div>
"""
index_tmpl = """
<html>
<head>
<title>profile results</title>
<style>
<!--
tr.normal { background-color: #ffffff }
tr.hover { background-color: #88eeee }
//-->
</style>
</head>
<body>
<form action="${action}" method="POST">
<div class="form-text">
${description}
</div>
<hr />
${formelements}
</form>
<pre>
${profilehtml}
</pre>
</body>
</html>
"""
class HTMLViewer(object):
format_dict = {'default': 'application/octet-stream',
'json': 'application/json',
'csv': 'text/csv',
'ods': 'application/vnd.oasis.opendocument.spreadsheet',
'python': 'text/html'}
def __init__(self, app_path, profile_module, profile_log):
self.app_path = app_path
self.profile_module = profile_module
self.profile_log = profile_log
def _get_param(self, query_dict, key, default=None, multiple=False):
value = query_dict.get(key, default)
if value is None or value == '':
return default
if multiple:
return value
if isinstance(value, list):
return eval(value[0]) if isinstance(default, int) else value[0]
else:
return value
def render(self, url, method, path_entry, query_dict, clear_callback):
plot = self._get_param(query_dict, 'plot', None)
download = self._get_param(query_dict, 'download', None)
clear = self._get_param(query_dict, 'clear', None)
action = plot or download or clear
profile_id = self._get_param(query_dict, 'profile', 'current')
sort = self._get_param(query_dict, 'sort', 'time')
limit = self._get_param(query_dict, 'limit', -1)
fulldirs = self._get_param(query_dict, 'fulldirs', 0)
nfl_filter = self._get_param(query_dict, 'nfl_filter', '').strip()
metric_selected = self._get_param(query_dict, 'metric', 'cc')
plot_type = self._get_param(query_dict, 'plottype', 'bar')
download_format = self._get_param(query_dict, 'format', 'default')
content = ''
# GET /__profile, POST /__profile
if len(path_entry) == 2 and method in ['GET', 'POST']:
log_files = self.profile_log.get_logfiles(profile_id)
if action == 'plot':
content, headers = self.plot(log_files, sort, limit,
nfl_filter, metric_selected,
plot_type)
elif action == 'download':
content, headers = self.download(log_files, sort, limit,
nfl_filter, download_format)
else:
if action == 'clear':
self.profile_log.clear(profile_id)
clear_callback and clear_callback()
content, headers = self.index_page(log_files, sort, limit,
fulldirs, nfl_filter,
profile_id, url)
# GET /__profile__/all
# GET /__profile__/current
# GET /__profile__/profile_id
# GET /__profile__/profile_id/
# GET /__profile__/profile_id/account.py:50(GETorHEAD)
# GET /__profile__/profile_id/swift/proxy/controllers
# /account.py:50(GETorHEAD)
# with QUERY_STRING: ?format=[default|json|csv|ods]
elif len(path_entry) > 2 and method == 'GET':
profile_id = path_entry[2]
log_files = self.profile_log.get_logfiles(profile_id)
pids = self.profile_log.get_all_pids()
# return all profiles in a json format by default.
# GET /__profile__/
if profile_id == '':
content = '{"profile_ids": ["' + '","'.join(pids) + '"]}'
headers = [('content-type', self.format_dict['json'])]
else:
if len(path_entry) > 3 and path_entry[3] != '':
nfl_filter = '/'.join(path_entry[3:])
if path_entry[-1].find(':0') == -1:
nfl_filter = '/' + nfl_filter
content, headers = self.download(log_files, sort, -1,
nfl_filter, download_format)
headers.append(('Access-Control-Allow-Origin', '*'))
else:
raise MethodNotAllowed(_('method %s is not allowed.') % method)
return content, headers
def index_page(self, log_files=None, sort='time', limit=-1,
fulldirs=0, nfl_filter='', profile_id='current', url='#'):
headers = [('content-type', 'text/html')]
if len(log_files) == 0:
return empty_description, headers
try:
stats = Stats2(*log_files)
except (IOError, ValueError):
raise DataLoadFailure(_('Can not load profile data from %s.')
% log_files)
if not fulldirs:
stats.strip_dirs()
stats.sort_stats(sort)
nfl_filter_esc =\
nfl_filter.replace('(', '\(').replace(')', '\)')
amount = [nfl_filter_esc, limit] if nfl_filter_esc else [limit]
profile_html = self.generate_stats_html(stats, self.app_path,
profile_id, *amount)
description = "Profiling information is generated by using\
'%s' profiler." % self.profile_module
sort_repl = '<option value="%s">' % sort
sort_selected = '<option value="%s" selected>' % sort
sort = sort_tmpl.replace(sort_repl, sort_selected)
plist = ''.join(['<option value="%s">%s</option>' % (p, p)
for p in self.profile_log.get_all_pids()])
profile_element = string.Template(profile_tmpl).substitute(
{'profile_list': plist})
profile_repl = '<option value="%s">' % profile_id
profile_selected = '<option value="%s" selected>' % profile_id
profile_element = profile_element.replace(profile_repl,
profile_selected)
limit_repl = '<option value="%s">' % limit
limit_selected = '<option value="%s" selected>' % limit
limit = limit_tmpl.replace(limit_repl, limit_selected)
fulldirs_checked = 'checked' if fulldirs else ''
fulldirs_element = string.Template(fulldirs_tmpl).substitute(
{'fulldir_checked': fulldirs_checked})
nfl_filter_element = string.Template(nfl_filter_tmpl).\
substitute({'nfl_filter': nfl_filter})
form_elements = string.Template(formelements_tmpl).substitute(
{'description': description,
'action': url,
'profile': profile_element,
'sort': sort,
'limit': limit,
'fulldirs': fulldirs_element,
'nfl_filter': nfl_filter_element,
}
)
content = string.Template(index_tmpl).substitute(
{'formelements': form_elements,
'action': url,
'description': description,
'profilehtml': profile_html,
})
return content, headers
def download(self, log_files, sort='time', limit=-1, nfl_filter='',
output_format='default'):
if len(log_files) == 0:
raise NotFoundException(_('no log file found'))
try:
nfl_esc = nfl_filter.replace('(', '\(').replace(')', '\)')
# remove the slash that is intentionally added in the URL
# to avoid failure of filtering stats data.
if nfl_esc.startswith('/'):
nfl_esc = nfl_esc[1:]
stats = Stats2(*log_files)
stats.sort_stats(sort)
if output_format == 'python':
data = self.format_source_code(nfl_filter)
elif output_format == 'json':
data = stats.to_json(nfl_esc, limit)
elif output_format == 'csv':
data = stats.to_csv(nfl_esc, limit)
elif output_format == 'ods':
data = stats.to_ods(nfl_esc, limit)
else:
data = stats.print_stats()
return data, [('content-type', self.format_dict[output_format])]
except ODFLIBNotInstalled as ex:
raise ex
except Exception as ex:
raise ProfileException(_('Data download error: %s') % ex)
def plot(self, log_files, sort='time', limit=10, nfl_filter='',
metric_selected='cc', plot_type='bar'):
if not PLOTLIB_INSTALLED:
raise PLOTLIBNotInstalled(_('python-matplotlib not installed.'))
if len(log_files) == 0:
raise NotFoundException(_('no log file found'))
try:
stats = Stats2(*log_files)
stats.sort_stats(sort)
stats_dict = stats.stats
__, func_list = stats.get_print_list([nfl_filter, limit])
nfls = []
performance = []
names = {'nc': 'Total Call Count', 'cc': 'Primitive Call Count',
'tt': 'Total Time', 'ct': 'Cumulative Time'}
for func in func_list:
cc, nc, tt, ct, __ = stats_dict[func]
metric = {'cc': cc, 'nc': nc, 'tt': tt, 'ct': ct}
nfls.append(func[2])
performance.append(metric[metric_selected])
y_pos = range(len(nfls))
error = [random.random() for __ in y_pos]
plt.clf()
if plot_type == 'pie':
plt.pie(x=performance, explode=None, labels=nfls,
autopct='%1.1f%%')
else:
plt.barh(y_pos, performance, xerr=error, align='center',
alpha=0.4)
plt.yticks(y_pos, nfls)
plt.xlabel(names[metric_selected])
plt.title('Profile Statistics (by %s)' % names[metric_selected])
#plt.gcf().tight_layout(pad=1.2)
with tempfile.TemporaryFile() as profile_img:
plt.savefig(profile_img, format='png', dpi=300)
profile_img.seek(0)
data = profile_img.read()
return data, [('content-type', 'image/jpg')]
except Exception as ex:
raise ProfileException(_('plotting results failed due to %s') % ex)
def format_source_code(self, nfl):
nfls = re.split('[:()]', nfl)
file_path = nfls[0]
try:
lineno = int(nfls[1])
except (TypeError, ValueError, IndexError):
lineno = 0
# for security reason, this need to be fixed.
if not file_path.endswith('.py'):
return _('The file type are forbidden to access!')
try:
data = []
i = 0
with open(file_path) as f:
lines = f.readlines()
max_width = str(len(str(len(lines))))
fmt = '<span id="L%d" rel="#L%d">%' + max_width\
+ 'd|<code>%s</code></span>'
for line in lines:
l = cgi.escape(line, quote=None)
i = i + 1
if i == lineno:
fmt2 = '<span id="L%d" style="background-color: \
rgb(127,255,127)">%' + max_width +\
'd|<code>%s</code></span>'
data.append(fmt2 % (i, i, l))
else:
data.append(fmt % (i, i, i, l))
data = ''.join(data)
except Exception:
return _('Can not access the file %s.') % file_path
return '<pre>%s</pre>' % data
def generate_stats_html(self, stats, app_path, profile_id, *selection):
html = []
for filename in stats.files:
html.append('<p>%s</p>' % filename)
try:
for func in stats.top_level:
html.append('<p>%s</p>' % func[2])
html.append('%s function calls' % stats.total_calls)
if stats.total_calls != stats.prim_calls:
html.append("(%d primitive calls)" % stats.prim_calls)
html.append('in %.3f seconds' % stats.total_tt)
if stats.fcn_list:
stat_list = stats.fcn_list[:]
msg = "<p>Ordered by: %s</p>" % stats.sort_type
else:
stat_list = stats.stats.keys()
msg = '<p>Random listing order was used</p>'
for sel in selection:
stat_list, msg = stats.eval_print_amount(sel, stat_list, msg)
html.append(msg)
html.append('<table style="border-width: 1px">')
if stat_list:
html.append('<tr><th>#</th><th>Call Count</th>\
<th>Total Time</th><th>Time/Call</th>\
<th>Cumulative Time</th>\
<th>Cumulative Time/Call</th>\
<th>Filename:Lineno(Function)</th>\
<th>JSON</th>\
</tr>')
count = 0
for func in stat_list:
count = count + 1
html.append('<tr onMouseOver="this.className=\'hover\'"\
onMouseOut="this.className=\'normal\'">\
<td>%d)</td>' % count)
cc, nc, tt, ct, __ = stats.stats[func]
c = str(nc)
if nc != cc:
c = c + '/' + str(cc)
html.append('<td>%s</td>' % c)
html.append('<td>%f</td>' % tt)
if nc == 0:
html.append('<td>-</td>')
else:
html.append('<td>%f</td>' % (float(tt) / nc))
html.append('<td>%f</td>' % ct)
if cc == 0:
html.append('<td>-</td>')
else:
html.append('<td>%f</td>' % (float(ct) / cc))
nfls = cgi.escape(stats.func_std_string(func))
if nfls.split(':')[0] not in ['', 'profile'] and\
os.path.isfile(nfls.split(':')[0]):
html.append('<td><a href="%s/%s%s?format=python#L%d">\
%s</a></td>' % (app_path, profile_id,
nfls, func[1], nfls))
else:
html.append('<td>%s</td>' % nfls)
if not nfls.startswith('/'):
nfls = '/' + nfls
html.append('<td><a href="%s/%s%s?format=json">\
--></a></td></tr>' % (app_path,
profile_id, nfls))
except Exception as ex:
html.append("Exception:" % ex.message)
return ''.join(html)
| apache-2.0 |
williamdjones/protein_binding | convert_csv_to_h5.py | 1 | 3637 | '''
The purpose of this script is to take a csv file containing protein_binding data and create an h5 with a specified proportion
of examples to hold as a testing set.
by: Derek Jones
'''
import time
import os
import h5py
import pandas as pd
import numpy as np
import argparse
import numpy as np
from tqdm import tqdm
random_state = np.random.RandomState(0)
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser()
parser.add_argument("-i", type=str, help="path to old csv file")
parser.add_argument("-o", type=str, help="path to new h5 file")
parser.add_argument("-c", type=str, help="prefix for compound lists")
parser.add_argument("--a", type=float, help="ratio of examples to hold out for test set", default=0.2) #create 80/20 split
args = parser.parse_args()
def save_to_hdf5(data_frame, output_name):
output_file = h5py.File(output_name, "w", libver='latest')
column_names = list(data_frame.columns.values)
group_names = list(set(data_frame['receptor']))
output_file.create_group("train")
output_file.create_group("test")
for group_name in tqdm(group_names):
grp_data = data_frame[data_frame['receptor'] == group_name]
labels = data_frame[data_frame['receptor'] == group_name]["label"]
idxs = np.arange(0, len(labels))
train_idxs, test_idxs = train_test_split(idxs, stratify=labels, test_size=args.a, random_state=random_state)
data_frame.iloc[train_idxs][["receptor","drugID","label","vina_score"]].to_csv(args.c+"_training_compounds.csv")
data_frame.iloc[test_idxs][["receptor", "drugID", "label", "vina_score"]].to_csv(args.c + "_testing_compounds.csv")
output_file['/train'].create_group(group_name)
output_file['/test'].create_group(group_name)
for feature in iter(column_names):
# print(feature)
if feature == "label":
output_file['train/'+str(group_name)].require_dataset(str(feature), [grp_data.iloc[train_idxs].shape[0], 1],
data=np.asarray(grp_data.iloc[train_idxs][feature]),
dtype=np.int8)
output_file['test/'+str(group_name)].require_dataset(str(feature), [grp_data.iloc[test_idxs].shape[0], 1], data=np.asarray(grp_data.iloc[test_idxs][feature]),
dtype=np.int8)
elif feature in ['receptor','drugID','Filename']:
output_file['train/'+str(group_name)].require_dataset(str(feature), [grp_data.iloc[train_idxs].shape[0], 1],
data=np.asarray(grp_data.iloc[train_idxs][feature]),
dtype=h5py.special_dtype(vlen=str))
output_file['test/'+str(group_name)].require_dataset(str(feature), [grp_data.iloc[test_idxs].shape[0], 1],
data=np.asarray(grp_data.iloc[test_idxs][feature]),
dtype=h5py.special_dtype(vlen=str))
else:
output_file['train/'+str(group_name)+"/"+str(feature)] = np.asarray(pd.to_numeric(grp_data.iloc[train_idxs][feature]),dtype=np.float16)
output_file['test/'+str(group_name)+"/"+str(feature)] = np.asarray(pd.to_numeric(grp_data.iloc[test_idxs][feature]),dtype=np.float16)
output_file.close()
t0 = time.clock()
save_to_hdf5(pd.read_csv(args.i, keep_default_na=False,na_values=[np.nan, 'na']).convert_objects(convert_numeric=True), args.o)
t1 = time.clock()
print(args.i, "converted to .h5 in", (t1-t0), "seconds.")
| mit |
clemkoa/scikit-learn | examples/neighbors/plot_kde_1d.py | 60 | 5120 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
metpy/MetPy | metpy/testing.py | 1 | 6677 | # Copyright (c) 2015,2016,2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
r"""Collection of utilities for testing.
This includes:
* unit-aware test functions
* code for testing matplotlib figures
"""
from __future__ import absolute_import
import functools
import numpy as np
import numpy.testing
from pint import DimensionalityError
import pytest
import xarray as xr
from metpy.calc import wind_components
from metpy.cbook import get_test_data
from metpy.deprecation import MetpyDeprecationWarning
from .units import units
def get_upper_air_data(date, station):
"""Get upper air observations from the test data cache.
Parameters
----------
time : datetime
The date and time of the desired observation.
station : str
The three letter ICAO identifier of the station for which data should be
downloaded.
Returns
-------
dict : upper air data
"""
sounding_key = '{0:%Y-%m-%dT%HZ}_{1:}'.format(date, station)
sounding_files = {'2016-05-22T00Z_DDC': 'may22_sounding.txt',
'2013-01-20T12Z_OUN': 'jan20_sounding.txt',
'1999-05-04T00Z_OUN': 'may4_sounding.txt',
'2002-11-11T00Z_BNA': 'nov11_sounding.txt',
'2010-12-09T12Z_BOI': 'dec9_sounding.txt'}
fname = sounding_files[sounding_key]
fobj = get_test_data(fname)
def to_float(s):
# Remove all whitespace and replace empty values with NaN
if not s.strip():
s = 'nan'
return float(s)
# Skip dashes, column names, units, and more dashes
for _ in range(4):
fobj.readline()
# Initiate lists for variables
arr_data = []
# Read all lines of data and append to lists only if there is some data
for row in fobj:
level = to_float(row[0:7])
values = (to_float(row[7:14]), to_float(row[14:21]), to_float(row[21:28]),
to_float(row[42:49]), to_float(row[49:56]))
if any(np.invert(np.isnan(values[1:]))):
arr_data.append((level,) + values)
p, z, t, td, direc, spd = np.array(arr_data).T
p = p * units.hPa
z = z * units.meters
t = t * units.degC
td = td * units.degC
direc = direc * units.degrees
spd = spd * units.knots
u, v = wind_components(spd, direc)
return {'pressure': p, 'height': z, 'temperature': t,
'dewpoint': td, 'direction': direc, 'speed': spd, 'u_wind': u, 'v_wind': v}
def check_and_drop_units(actual, desired):
r"""Check that the units on the passed in arrays are compatible; return the magnitudes.
Parameters
----------
actual : `pint.Quantity` or array-like
desired : `pint.Quantity` or array-like
Returns
-------
actual, desired
array-like versions of `actual` and `desired` once they have been
coerced to compatible units.
Raises
------
AssertionError
If the units on the passed in objects are not compatible.
"""
try:
# If the desired result has units, add dimensionless units if necessary, then
# ensure that this is compatible to the desired result.
if hasattr(desired, 'units'):
if not hasattr(actual, 'units'):
actual = units.Quantity(actual, 'dimensionless')
actual = actual.to(desired.units)
# Otherwise, the desired result has no units. Convert the actual result to
# dimensionless units if it is a united quantity.
else:
if hasattr(actual, 'units'):
actual = actual.to('dimensionless')
except DimensionalityError:
raise AssertionError('Units are not compatible: {} should be {}'.format(
actual.units, getattr(desired, 'units', 'dimensionless')))
except AttributeError:
pass
if hasattr(actual, 'magnitude'):
actual = actual.magnitude
if hasattr(desired, 'magnitude'):
desired = desired.magnitude
return actual, desired
def assert_nan(value, units):
"""Check for nan with proper units."""
value, _ = check_and_drop_units(value, np.nan * units)
assert np.isnan(value)
def assert_almost_equal(actual, desired, decimal=7):
"""Check that values are almost equal, including units.
Wrapper around :func:`numpy.testing.assert_almost_equal`
"""
actual, desired = check_and_drop_units(actual, desired)
numpy.testing.assert_almost_equal(actual, desired, decimal)
def assert_array_almost_equal(actual, desired, decimal=7):
"""Check that arrays are almost equal, including units.
Wrapper around :func:`numpy.testing.assert_array_almost_equal`
"""
actual, desired = check_and_drop_units(actual, desired)
numpy.testing.assert_array_almost_equal(actual, desired, decimal)
def assert_array_equal(actual, desired):
"""Check that arrays are equal, including units.
Wrapper around :func:`numpy.testing.assert_array_equal`
"""
actual, desired = check_and_drop_units(actual, desired)
numpy.testing.assert_array_equal(actual, desired)
def assert_xarray_allclose(actual, desired):
"""Check that the xarrays are almost equal, including coordinates and attributes."""
xr.testing.assert_allclose(actual, desired)
assert desired.metpy.coordinates_identical(actual)
assert desired.attrs == actual.attrs
@pytest.fixture(scope='module', autouse=True)
def set_agg_backend():
"""Fixture to ensure the Agg backend is active."""
import matplotlib.pyplot as plt
prev_backend = plt.get_backend()
try:
plt.switch_backend('agg')
yield
finally:
plt.switch_backend(prev_backend)
@pytest.fixture(autouse=True)
def patch_round(monkeypatch):
"""Fixture to patch builtin round using numpy's.
This works around the fact that built-in round changed between Python 2 and 3. This
is probably not needed once we're testing on matplotlib 2.0, which has been updated
to use numpy's throughout.
"""
monkeypatch.setitem(__builtins__, 'round', np.round)
def check_and_silence_deprecation(func):
"""Decorate a function to swallow metpy deprecation warnings, making sure they are present.
This should be used on deprecated function tests to make sure the deprecation warnings
are not printing in the tests, but checks that the warning is present and makes sure
the function still works as intended.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
with pytest.warns(MetpyDeprecationWarning):
return func(*args, **kwargs)
return wrapper
| bsd-3-clause |
jpinedaf/pyspeckit | pyspeckit/spectrum/models/formaldehyde.py | 3 | 23644 | """
===========================
Formaldehyde cm-line fitter
===========================
This is a formaldehyde 1_11-1_10 / 2_12-2_11 fitter. It includes hyperfine
components of the formaldehyde lines and has both LTE and RADEX LVG based
models
Module API
^^^^^^^^^^
"""
from __future__ import print_function
import numpy as np
from ...mpfit import mpfit
from .. import units
from . import fitter,model,modelgrid
import matplotlib.cbook as mpcb
import copy
from . import hyperfine
from ...specwarnings import warn
from six.moves import xrange
try: # for model grid reading
import astropy.io.fits as pyfits
except ImportError:
import pyfits
try:
import scipy.interpolate
import scipy.ndimage
scipyOK = True
except ImportError:
scipyOK=False
line_names = ['oneone','twotwo','threethree']
line_names = ['oneone_f10', 'oneone_f01', 'oneone_f22', 'oneone_f21',
'oneone_f12', 'oneone_f11', 'twotwo_f11', 'twotwo_f12',
'twotwo_f21', 'twotwo_f32', 'twotwo_f33', 'twotwo_f22',
'twotwo_f23']
# http://adsabs.harvard.edu/abs/1971ApJ...169..429T has the most accurate freqs
# http://adsabs.harvard.edu/abs/1972ApJ...174..463T [twotwo]
central_freq_dict = {
'oneone': 4.82965996e9,
'twotwo': 14.48847881e9,
'threethree': 28.97480e9,
}
line_strength_dict={
'oneone_f10': 4.,
'oneone_f01': 4.,
'oneone_f22': 15.,
'oneone_f21': 5.,
'oneone_f12': 5.,
'oneone_f11': 3.,
'twotwo_f11': 15.,
'twotwo_f12': 5.,
'twotwo_f21': 5.,
'twotwo_f32': 5.19,
'twotwo_f33': 41.48,
'twotwo_f22': 23.15,
'twotwo_f23': 5.19,
'threethree_f22':1,
'threethree_f44':1,
'threethree_f33':1,
}
relative_strength_total_degeneracy={
'oneone_f10': 36.,
'oneone_f01': 36.,
'oneone_f22': 36.,
'oneone_f21': 36.,
'oneone_f12': 36.,
'oneone_f11': 36.,
'twotwo_f11': 100.01,
'twotwo_f12': 100.01,
'twotwo_f21': 100.01,
'twotwo_f32': 100.01,
'twotwo_f33': 100.01,
'twotwo_f22': 100.01,
'twotwo_f23': 100.01,
'threethree_f22':3.0,
'threethree_f44':3.0,
'threethree_f33':3.0,
}
hf_freq_dict={
'oneone_f10':4.82965996e9 - 18.53e3,
'oneone_f01':4.82965996e9 - 1.34e3,
'oneone_f22':4.82965996e9 - 0.35e3,
'oneone_f21':4.82965996e9 + 4.05e3,
'oneone_f12':4.82965996e9 + 6.48e3,
'oneone_f11':4.82965996e9 + 11.08e3,
'twotwo_f11':14.48847881e9 - 19.97e3,
'twotwo_f12':14.48847881e9 - 7.03e3,
'twotwo_f21':14.48847881e9 - 2.20e3,
'twotwo_f32':14.48847881e9 + 0.12e3,
'twotwo_f33':14.48847881e9 + 0.89e3,
'twotwo_f22':14.48847881e9 + 10.74e3,
'twotwo_f23':14.48847881e9 + 11.51e3,
'threethree_f22':28.97478e9,
'threethree_f44':28.97480e9,
'threethree_f33':28.97481e9,
}
freq_dict = copy.copy(hf_freq_dict)
freq_dict.update(central_freq_dict)
aval_dict = {
'oneone': 10**-8.44801, #64*!pi**4/(3*h*c**3)*nu11**3*mu0**2*(1/2.)
'twotwo': 10**-7.49373, #64*!pi**4/(3*h*c**3)*nu22**3*mu0**2*(2/3.)
'threethree': 10**-6.89179, #64*!pi**4/(3*h*c**3)*nu33**3*mu0**2*(3/4.)
}
hf_aval_dict={
'oneone_f10':10**-8.92509,
'oneone_f01':10**-8.44797,
'oneone_f22':10**-8.57294,
'oneone_f21':10**-9.05004,
'oneone_f12':10**-8.82819,
'oneone_f11':10**-9.05009,
'twotwo_f11':10**-7.61876,
'twotwo_f12':10**-8.09586,
'twotwo_f21':10**-8.31771,
'twotwo_f32':10**-8.44804,
'twotwo_f33':10**-7.54494,
'twotwo_f22':10**-7.65221,
'twotwo_f23':10**-8.30191,
'threethree_f22':10**-6.94294,
'threethree_f44':10**-6.91981,
'threethree_f33':10**-6.96736,
}
ortho_dict = {
'oneone': False,
'twotwo': False,
'threethree': False,
}
n_ortho = np.arange(0,28,3) # 0..3..27
n_para = np.array([x for x in range(28) if x % 3 != 0])
voff_lines_dict = {
'oneone': [(hf_freq_dict[f]-freq_dict['oneone'])/freq_dict['oneone']*units.speedoflight_ms for f in hf_freq_dict.keys() if "oneone" in f],
'twotwo': [(hf_freq_dict[f]-freq_dict['twotwo'])/freq_dict['twotwo']*units.speedoflight_ms for f in hf_freq_dict.keys() if "twotwo" in f],
'threethree': [(hf_freq_dict[f]-freq_dict['threethree'])/freq_dict['threethree']*units.speedoflight_ms for f in hf_freq_dict.keys() if "threethree" in f],
}
voff_lines_dict={ # opposite signs of freq offset
'oneone_f10': + 18.53e3/freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f01': + 1.34e3 /freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f22': + 0.35e3 /freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f21': - 4.05e3 /freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f12': - 6.48e3 /freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f11': - 11.08e3/freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'twotwo_f11': + 19.97e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f12': + 7.03e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f21': + 2.20e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f32': - 0.12e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f33': - 0.89e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f22': - 10.74e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f23': - 11.51e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'threethree_f22':28.97478e9,
'threethree_f44':28.97480e9,
'threethree_f33':28.97481e9,
}
formaldehyde_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict,
freq_dict, line_strength_dict,
relative_strength_total_degeneracy)
formaldehyde_vtau_fitter = formaldehyde_vtau.fitter
formaldehyde_vtau_vheight_fitter = formaldehyde_vtau.vheight_fitter
formaldehyde_vtau_tbg_fitter = formaldehyde_vtau.background_fitter
def formaldehyde_radex(xarr, density=4, column=13, xoff_v=0.0, width=1.0,
grid_vwidth=1.0, grid_vwidth_scale=False, texgrid=None,
taugrid=None, hdr=None, path_to_texgrid='',
path_to_taugrid='', temperature_gridnumber=3,
debug=False, verbose=False, **kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
minfreq = (4.8,)
maxfreq = (5.0,)
elif len(taugrid)==len(texgrid) and hdr is not None:
minfreq,maxfreq,texgrid = zip(*texgrid)
minfreq,maxfreq,taugrid = zip(*taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
else:
raise Exception
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
tau_nu_cumul = np.zeros(len(xarr))
gridval1 = np.interp(density, densityarr[0,:], xinds[0,:])
gridval2 = np.interp(column, columnarr[:,0], yinds[:,0])
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
slices = [temperature_gridnumber] + [slice(int(np.floor(gv)),int(np.floor(gv))+2) for gv in (gridval2,gridval1)]
tau = [scipy.ndimage.map_coordinates(tg[slices],np.array([[gridval2%1],[gridval1%1]]),order=1) for tg in taugrid]
tex = [scipy.ndimage.map_coordinates(tg[slices],np.array([[gridval2%1],[gridval1%1]]),order=1) for tg in texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
if verbose:
print("density %20.12g column %20.12g: tau %20.12g tex %20.12g" % (density, column, tau, tex))
if debug:
import pdb; pdb.set_trace()
spec = np.sum([(formaldehyde_vtau(xarr,Tex=float(tex[ii]),tau=float(tau[ii]),xoff_v=xoff_v,width=width, **kwargs)
* (xarr.as_unit('GHz')>minfreq[ii]) * (xarr.as_unit('GHz')<maxfreq[ii])) for ii in xrange(len(tex))],
axis=0)
return spec
def formaldehyde_radex_orthopara_temp(xarr, density=4, column=13,
orthopara=1.0, temperature=15.0,
xoff_v=0.0, width=1.0,
Tbackground1=2.73,
Tbackground2=2.73,
grid_vwidth=1.0,
grid_vwidth_scale=False, texgrid=None,
taugrid=None, hdr=None,
path_to_texgrid='', path_to_taugrid='',
debug=False, verbose=False,
getpars=False, **kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
minfreq = (4.8,)
maxfreq = (5.0,)
elif len(taugrid)==len(texgrid) and hdr is not None:
minfreq,maxfreq,texgrid = zip(*texgrid)
minfreq,maxfreq,taugrid = zip(*taugrid)
else:
raise Exception
densityarr = (np.arange(taugrid[0].shape[3])+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (np.arange(taugrid[0].shape[2])+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
temparr = (np.arange(taugrid[0].shape[1])+hdr['CRPIX3']-1)*hdr['CDELT3']+hdr['CRVAL3'] # temperature
oprarr = (np.arange(taugrid[0].shape[0])+hdr['CRPIX4']-1)*hdr['CDELT4']+hdr['CRVAL4'] # log ortho/para ratio
gridval1 = np.interp(density, densityarr, np.arange(len(densityarr)))
gridval2 = np.interp(column, columnarr, np.arange(len(columnarr)))
gridval3 = np.interp(temperature, temparr, np.arange(len(temparr)))
gridval4 = np.interp(orthopara, oprarr, np.arange(len(oprarr)))
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
slices = [slice(int(np.floor(gv)),int(np.floor(gv)+2))
for gv in (gridval4,gridval3,gridval2,gridval1)]
tau = [scipy.ndimage.map_coordinates(tg[slices],
np.array([[gridval4 % 1],
[gridval3 % 1],
[gridval2 % 1],
[gridval1 % 1]]),
order=1, prefilter=False)
for tg in taugrid]
tex = [scipy.ndimage.map_coordinates(tg[slices],
np.array([[gridval4 % 1],
[gridval3 % 1],
[gridval2 % 1],
[gridval1 % 1]]),
order=1,prefilter=False)
for tg in texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
# there can be different background temperatures at each frequency
tbg = [Tbackground1,Tbackground2]
if verbose:
print("density %20.12g column: %20.12g temperature: %20.12g opr: %20.12g xoff_v: %20.12g width: %20.12g" % (density, column, temperature, orthopara, xoff_v, width))
print("tau: ",tau," tex: ",tex)
print("minfreq: ",minfreq," maxfreq: ",maxfreq)
print("tbg: ",tbg)
if debug > 1:
import pdb; pdb.set_trace()
if getpars:
return tau,tex
spec = np.sum([(formaldehyde_vtau(xarr.as_unit('Hz', quiet=True),
Tex=float(tex[ii]), tau=float(tau[ii]),
Tbackground=tbg[ii], xoff_v=xoff_v,
width=width, **kwargs)
* (xarr.as_unit('GHz')>minfreq[ii])
* (xarr.as_unit('GHz')<maxfreq[ii]))
for ii in xrange(len(tex))],
axis=0)
return spec
def formaldehyde(xarr, amp=1.0, xoff_v=0.0, width=1.0,
return_hyperfine_components=False, texscale=0.01, tau=0.01, **kwargs):
"""
Generate a model Formaldehyde spectrum based on simple gaussian parameters
the "amplitude" is an essentially arbitrary parameter; we therefore define
it to be Tex given tau=0.01 when passing to the fitter
The final spectrum is then rescaled to that value
"""
mdl = formaldehyde_vtau(xarr, Tex=amp*texscale, tau=tau, xoff_v=xoff_v,
width=width,
return_tau=True,
return_hyperfine_components=return_hyperfine_components, **kwargs)
if return_hyperfine_components:
mdlpeak = np.abs(mdl).squeeze().sum(axis=0).max()
else:
mdlpeak = np.abs(mdl).max()
if mdlpeak > 0:
mdl *= amp/mdlpeak
return mdl
def formaldehyde_pyradex(xarr, density=4, column=13, temperature=20,
xoff_v=0.0, opr=1.0, width=1.0, tbackground=2.73,
grid_vwidth=1.0, debug=False, verbose=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
"""
raise NotImplementedError("Not done yet.")
import pyradex
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
tb_nu_cumul = np.zeros(len(xarr))
R = pyradex.Radex(molecule='oh2co-h2', column=column,
temperature=temperature, density=10**density,
tbackground=tbackground,)
spec = np.sum([(formaldehyde_vtau(xarr,Tex=float(tex[ii]),tau=float(tau[ii]),xoff_v=xoff_v,width=width, **kwargs)
* (xarr.as_unit('GHz')>minfreq[ii]) * (xarr.as_unit('GHz')<maxfreq[ii])) for ii in xrange(len(tex))],
axis=0)
return spec
class formaldehyde_model(model.SpectralModel):
def formaldehyde_integral(self, modelpars, linename='oneone'):
"""
Return the integral of the individual components (ignoring height)
"""
raise NotImplementedError("Not implemented, but the integral is just amplitude * width * sqrt(2*pi)")
# produced by directly computing the integral of gaussians and formaldehydeians as a function of
# line width and then fitting that with a broken logarithmic power law
# The errors are <0.5% for all widths
formaldehyde_to_gaussian_ratio_coefs = {
'lt0.1_oneone': np.array([ -5.784020,-40.058798,-111.172706,-154.256411,-106.593122,-28.933119]),
'gt0.1_oneone': np.array([ 0.038548, -0.071162, -0.045710, 0.183828, -0.145429, 0.040039]),
'lt0.1_twotwo': np.array([ 1.156561, 6.638570, 11.782065, -0.429536,-24.860297,-27.902274, -9.510288]),
'gt0.1_twotwo': np.array([ -0.090646, 0.078204, 0.123181, -0.175590, 0.089506, -0.034687, 0.008676]),
}
integ = 0
if len(modelpars) % 3 == 0:
for amp,cen,width in np.reshape(modelpars,[len(modelpars)/3,3]):
gaussint = amp*width*np.sqrt(2.0*np.pi)
cftype = "gt0.1_"+linename if width > 0.1 else "lt0.1_"+linename
correction_factor = 10**np.polyval(formaldehyde_to_gaussian_ratio_coefs[cftype], np.log10(width) )
# debug statement print("Two components of the integral: amp %g, width %g, gaussint %g, correction_factor %g " % (amp,width,gaussint,correction_factor))
integ += gaussint*correction_factor
return integ
formaldehyde_fitter = formaldehyde_model(formaldehyde, 3,
parnames=['amp','center','width'],
parlimited=[(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0)],
shortvarnames=("A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunit='Hz' )
formaldehyde_vheight_fitter = formaldehyde_model(fitter.vheightmodel(formaldehyde), 4,
parnames=['height','amp','center','width'],
parlimited=[(False,False),(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0), (0,0)],
shortvarnames=("H","A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunit='Hz' )
# Create a tau-only fit:
def formaldehyde_radex_tau(xarr, density=4, column=13, xoff_v=0.0, width=1.0,
grid_vwidth=1.0, grid_vwidth_scale=False,
taugrid=None, hdr=None, path_to_taugrid='',
temperature_gridnumber=3, debug=False,
verbose=False, return_hyperfine_components=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
* uses hyperfine components
* assumes *tau* varies but *tex* does not!
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if verbose:
print("Parameters: dens=%f, column=%f, xoff=%f, width=%f" % (density, column, xoff_v, width))
if taugrid is None:
if path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
hdr = pyfits.getheader(path_to_taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
minfreq = (4.8,)
maxfreq = (5.0,)
elif hdr is not None:
minfreq,maxfreq,taugrid = zip(*taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
else:
raise Exception
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
gridval1 = np.interp(density, densityarr[0,:], xinds[0,:])
gridval2 = np.interp(column, columnarr[:,0], yinds[:,0])
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
slices = [temperature_gridnumber] + [slice(np.floor(gv),np.floor(gv)+2) for gv in (gridval2,gridval1)]
tau = [scipy.ndimage.map_coordinates(tg[slices],np.array([[gridval2%1],[gridval1%1]]),order=1) for tg in taugrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
# let the hyperfine module determine the hyperfine components, and pass all of them here
spec_components = [(formaldehyde_vtau(xarr.as_unit('Hz', quiet=True),
tau=float(tau[ii]), xoff_v=xoff_v, width=width,
return_tau=True, return_hyperfine_components=True, **kwargs) *
(xarr.as_unit('GHz')>minfreq[ii]) *
(xarr.as_unit('GHz')<maxfreq[ii]))
for ii in xrange(len(tau))]
# get an array of [n_lines, n_hyperfine, len(xarr)]
if return_hyperfine_components:
return np.array(spec_components).sum(axis=0)
else:
return np.sum(spec_components, axis=0).sum(axis=0)
try:
import pymodelfit
class pmfFormaldehydeModel(pymodelfit.FunctionModel1DAuto):
def f(self, x, amp0=1.0, xoff_v0=0.0,width0=1.0):
return formaldehyde(x,
amp=amp0,
xoff_v=xoff_v0,width=width0)
class pmfFormaldehydeModelVtau(pymodelfit.FunctionModel1DAuto):
def f(self, x, Tex0=1.0, tau0=0.01, xoff_v0=0.0, width0=1.0):
return formaldehyde_vtau(x,
Tex=Tex0, tau=tau0,
xoff_v=xoff_v0,width=width0)
except ImportError:
pass
| mit |
davek44/Basset | src/dev/basset_anchor.py | 1 | 12462 | #!/usr/bin/env python
from optparse import OptionParser
import copy
import math
import os
import random
import subprocess
import sys
import h5py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import spearmanr
import seaborn as sns
from sklearn.metrics import roc_auc_score, roc_curve
sns_colors = sns.color_palette('deep')
from dna_io import one_hot_set, vecs2dna
################################################################################
# basset_anchor.py
#
# Anchor a motif in the center of a set of sequences.
################################################################################
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <motif> <model_file> <test_hdf5_file>'
parser = OptionParser(usage)
parser.add_option('-d', dest='model_hdf5_file', default=None, help='Pre-computed model output as HDF5.')
parser.add_option('-f', dest='filters', default=None, help='Filters to plot length analysis [Default: %default]')
parser.add_option('-o', dest='out_dir', default='.')
parser.add_option('-p', dest='pool', default=False, action='store_true', help='Take representation after pooling [Default: %default]')
parser.add_option('-s', dest='sample', default=None, type='int', help='Sequences to sample [Default: %default]')
parser.add_option('-t', dest='targets', default=None, help='Comma-separated list of targets to analyze in more depth [Default: %default]')
(options,args) = parser.parse_args()
if len(args) != 3:
parser.error('Must provide motif, Basset model file, and test data in HDF5 format.')
else:
motif = args[0]
model_file = args[1]
test_hdf5_file = args[2]
random.seed(2)
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
#################################################################
# load data
#################################################################
# load sequences
test_hdf5_in = h5py.File(test_hdf5_file, 'r')
seq_vecs = np.array(test_hdf5_in['test_in'])
seq_targets = np.array(test_hdf5_in['test_out'])
seq_headers = np.array(test_hdf5_in['test_headers'])
target_labels = np.array(test_hdf5_in['target_labels'])
test_hdf5_in.close()
#################################################################
# sample
#################################################################
if options.sample is not None and options.sample < seq_vecs.shape[0]:
# choose sampled indexes
sample_i = np.array(random.sample(xrange(seq_vecs.shape[0]), options.sample))
# filter
seq_vecs = seq_vecs[sample_i]
seq_targets = seq_targets[sample_i]
seq_headers = seq_headers[sample_i]
# create a new HDF5 file
sample_hdf5_file = '%s/sample.h5' % options.out_dir
sample_hdf5_out = h5py.File(sample_hdf5_file, 'w')
sample_hdf5_out.create_dataset('test_in', data=seq_vecs)
sample_hdf5_out.close()
# update test HDF5
test_hdf5_file = sample_hdf5_file
#################################################################
# write in motif
#################################################################
# this code must match the Torch code
seq_len = seq_vecs.shape[3]
seq_mid = math.floor(seq_len/2.0 - len(motif)/2.0) - 1
for si in range(seq_vecs.shape[0]):
for pi in range(len(motif)):
one_hot_set(seq_vecs[si], seq_mid+pi, motif[pi])
# get fasta
seq_dna = vecs2dna(seq_vecs)
#################################################################
# Torch predict
#################################################################
if options.model_hdf5_file is None:
pool_str = ''
if options.pool:
pool_str = '-pool'
options.model_hdf5_file = '%s/model_out.h5' % options.out_dir
torch_cmd = 'basset_anchor_predict.lua %s %s %s %s %s' % (pool_str, motif, model_file, test_hdf5_file, options.model_hdf5_file)
print torch_cmd
subprocess.call(torch_cmd, shell=True)
# load model output
model_hdf5_in = h5py.File(options.model_hdf5_file, 'r')
pre_preds = np.array(model_hdf5_in['pre_preds'])
preds = np.array(model_hdf5_in['preds'])
scores = np.array(model_hdf5_in['scores'])
seq_filter_outs = np.array(model_hdf5_in['filter_outs'])
pre_seq_filter_outs = np.array(model_hdf5_in['pre_filter_outs'])
model_hdf5_in.close()
# pre-process
seq_filter_means = seq_filter_outs.mean(axis=2)
filter_means = seq_filter_means.mean(axis=0)
filter_msds = seq_filter_means.std(axis=0) + 1e-6
num_seqs = seq_filter_means.shape[0]
num_filters = seq_filter_means.shape[1]
num_targets = len(target_labels)
if options.filters is None:
options.filters = range(num_filters)
else:
options.filters = [int(fi) for fi in options.filters.split(',')]
if options.targets is None:
options.targets = range(num_targets)
else:
options.targets = [int(ti) for ti in options.targets.split(',')]
#################################################################
# scatter plot prediction changes
#################################################################
sns.set(style='ticks', font_scale=1.5)
lim_eps = 0.02
for ti in options.targets:
if num_seqs > 500:
isample = np.array(random.sample(range(num_seqs), 500))
else:
isample = np.array(range(num_seqs))
plt.figure(figsize=(8,8))
g = sns.jointplot(pre_preds[isample,ti], preds[isample,ti], color='black', stat_func=None, alpha=0.5, space=0)
ax = g.ax_joint
ax.plot([0,1], [0,1], c='black', linewidth=1, linestyle='--')
ax.set_xlim((0-lim_eps, 1+lim_eps))
ax.set_ylim((0-lim_eps, 1+lim_eps))
ax.set_xlabel('Pre-insertion accessibility')
ax.set_ylabel('Post-insertion accessibility')
ax.grid(True, linestyle=':')
ax_x = g.ax_marg_x
ax_x.set_title(target_labels[ti])
plt.savefig('%s/scatter_t%d.pdf' % (options.out_dir, ti))
plt.close()
#################################################################
# plot sequences
#################################################################
for ti in options.targets:
# sort sequences by score
seqsi = np.argsort(scores[:,ti])[::-1]
# print a fasta file with uniformly sampled sequences
unif_i = np.array([int(sp) for sp in np.arange(0,num_seqs,num_seqs/200.0)])
seqsi_uniform = seqsi[unif_i]
fasta_out = open('%s/seqs_t%d.fa' % (options.out_dir,ti), 'w')
for si in seqsi_uniform:
print >> fasta_out, '>%s_gc%.2f_p%.2f\n%s' % (seq_headers[si], gc(seq_dna[si]), preds[si,ti], seq_dna[si])
fasta_out.close()
# print their filter/pos activations to a table
# this is slow and big, and I only need it when I'm trying
# to find a specific example.
table_out = open('%s/seqs_t%d_table.txt' % (options.out_dir, ti), 'w')
for si in seqsi_uniform:
for fi in range(num_filters):
for pi in range(seq_filter_outs.shape[2]):
cols = (seq_headers[si], fi, pi, seq_filter_outs[si,fi,pi])
print >> table_out, '%-25s %3d %3d %5.2f' % cols
table_out.close()
# sample fewer for heat map
unif_i = np.array([int(sp) for sp in np.arange(0,num_seqs,num_seqs/200.0)])
seqsi_uniform = seqsi[unif_i]
''' these kinda suck
# plot heat map
plt.figure()
n = 20
ax_sf = plt.subplot2grid((1,n), (0,0), colspan=n-1)
ax_ss = plt.subplot2grid((1,n), (0,n-1))
# filter heat
sf_norm = seq_filter_means[seqsi_uniform,:] - filter_means
# sf_norm = np.divide(seq_filter_means[seqsi_uniform,:] - filter_means, filter_msds)
sns.heatmap(sf_norm, vmin=-.04, vmax=.04, xticklabels=False, yticklabels=False, ax=ax_sf)
# scores heat
sns.heatmap(scores[seqsi_uniform,ti].reshape(-1,1), xticklabels=False, yticklabels=False, ax=ax_ss)
# this crashed the program, and I don't know why
# plt.tight_layout()
plt.savefig('%s/seqs_t%d.pdf' % (options.out_dir, ti))
plt.close()
'''
#################################################################
# filter mean correlations
#################################################################
# compute and print
table_out = open('%s/table.txt' % options.out_dir, 'w')
filter_target_cors = np.zeros((num_filters,num_targets))
for fi in range(num_filters):
for ti in range(num_targets):
cor, p = spearmanr(seq_filter_means[:,fi], scores[:,ti])
cols = (fi, ti, cor, p)
print >> table_out, '%-3d %3d %6.3f %6.1e' % cols
if np.isnan(cor):
cor = 0
filter_target_cors[fi,ti] = cor
table_out.close()
# plot
ftc_df = pd.DataFrame(filter_target_cors, columns=target_labels)
plt.figure()
g = sns.clustermap(ftc_df)
for tick in g.ax_heatmap.get_xticklabels():
tick.set_rotation(-45)
tick.set_horizontalalignment('left')
tick.set_fontsize(3)
for tick in g.ax_heatmap.get_yticklabels():
tick.set_fontsize(3)
plt.savefig('%s/filters_targets.pdf' % options.out_dir)
plt.close()
#################################################################
# filter position correlation
#################################################################
sns.set(style='ticks', font_scale=1.7)
table_out = open('%s/filter_pos.txt' % options.out_dir, 'w')
for fi in options.filters:
for ti in options.targets:
print 'Plotting f%d versus t%d' % (fi,ti)
# compute correlations
pos_cors = []
pos_cors_pre = []
nans = 0
for pi in range(seq_filter_outs.shape[2]):
# motif correlation
cor, p = spearmanr(seq_filter_outs[:,fi,pi], preds[:,ti])
if np.isnan(cor):
cor = 0
p = 1
nans += 1
pos_cors.append(cor)
# pre correlation
cor_pre, p_pre = spearmanr(pre_seq_filter_outs[:,fi,pi], pre_preds[:,ti])
if np.isnan(cor_pre):
cor_pre = 0
p_pre = 1
pos_cors_pre.append(cor_pre)
cols = (fi, pi, ti, cor, p, cor_pre, p_pre)
print >> table_out, '%-3d %3d %3d %6.3f %6.1e %6.3f %6.1e' % cols
if nans < 50:
# plot
# df_pc = pd.DataFrame({'Position':range(len(pos_cors)), 'Correlation':pos_cors})
plt.figure(figsize=(9,6))
plt.title(target_labels[ti])
# sns.regplot(x='Position', y='Correlation', data=df_pc, lowess=True)
plt.scatter(range(len(pos_cors)), pos_cors_pre, c=sns_colors[2], alpha=0.8, linewidths=0, label='Before motif insertion')
plt.scatter(range(len(pos_cors)), pos_cors, c=sns_colors[1], alpha=0.8, linewidths=0, label='After motif insertion')
plt.axhline(y=0, linestyle='--', c='grey', linewidth=1)
ax = plt.gca()
ax.set_xlim(0, len(pos_cors))
ax.set_xlabel('Position')
ax.set_ylabel('Activation vs Prediction Correlation')
ax.grid(True, linestyle=':')
sns.despine()
plt.legend()
plt.tight_layout()
plt.savefig('%s/f%d_t%d.pdf' % (options.out_dir,fi,ti))
plt.close()
table_out.close()
def gc(seq):
''' Return GC% '''
gc_count = 0
for nt in seq:
if nt == 'C' or nt == 'G':
gc_count += 1
return gc_count/float(len(seq))
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| mit |
miloharper/neural-network-animation | matplotlib/backends/backend_wxagg.py | 11 | 5902 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib
from matplotlib.figure import Figure
from .backend_agg import FigureCanvasAgg
from . import backend_wx # already uses wxversion.ensureMinimal('2.8')
from .backend_wx import FigureManagerWx, FigureCanvasWx, \
FigureFrameWx, DEBUG_MSG, NavigationToolbar2Wx, error_msg_wx, \
draw_if_interactive, show, Toolbar, backend_version
import wx
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2WxAgg(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
class FigureCanvasWxAgg(FigureCanvasAgg, FigureCanvasWx):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
DEBUG_MSG("draw()", 1, self)
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.BeginDrawing()
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.EndDrawing()
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasAgg.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
class NavigationToolbar2WxAgg(NavigationToolbar2Wx):
def get_canvas(self, frame, fig):
return FigureCanvasWxAgg(frame, -1, fig)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
backend_wx._create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, fig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
frame = FigureFrameWxAgg(num, figure)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr
#
# agg/wxPython image conversion functions (wxPython >= 2.8)
#
def _convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba())
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.bounds
r = l + width
t = b + height
srcBmp = wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba())
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
FigureCanvas = FigureCanvasWxAgg
FigureManager = FigureManagerWx
| mit |
untom/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
CVML/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
BigTone2009/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/stochasticModelFrame.py | 22 | 3298 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
from scipy.fftpack import fft, ifft
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
def stochasticModelFrame(x, w, N, stocf) :
# x: input array sound, w: analysis window, N: FFT size,
# stocf: decimation factor of mag spectrum for stochastic analysis
hN = N/2+1 # size of positive spectrum
hM = (w.size)/2 # half analysis window size
pin = hM # initialize sound pointer in middle of analysis window
fftbuffer = np.zeros(N) # initialize buffer for FFT
yw = np.zeros(w.size) # initialize output sound frame
w = w / sum(w) # normalize analysis window
#-----analysis-----
xw = x[pin-hM:pin+hM] * w # window the input sound
X = fft(xw) # compute FFT
mX = 20 * np.log10( abs(X[:hN]) ) # magnitude spectrum of positive frequencies
mXenv = resample(np.maximum(-200, mX), mX.size*stocf) # decimate the mag spectrum
pX = np.angle(X[:hN])
#-----synthesis-----
mY = resample(mXenv, hN) # interpolate to original size
pY = 2*np.pi*np.random.rand(hN) # generate phase random values
Y = np.zeros(N, dtype = complex)
Y[:hN] = 10**(mY/20) * np.exp(1j*pY) # generate positive freq.
Y[hN:] = 10**(mY[-2:0:-1]/20) * np.exp(-1j*pY[-2:0:-1]) # generate negative freq.
fftbuffer = np.real( ifft(Y) ) # inverse FFT
y = fftbuffer*N/2
return mX, pX, mY, pY, y
# example call of stochasticModel function
if __name__ == '__main__':
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
w = np.hanning(1024)
N = 1024
stocf = 0.2
maxFreq = 10000.0
lastbin = N*maxFreq/fs
first = 1000
last = first+w.size
mX, pX, mY, pY, y = stochasticModelFrame(x[first:last], w, N, stocf)
plt.figure(1, figsize=(9, 7))
plt.subplot(4,1,1)
plt.plot(np.arange(first, last)/float(fs), x[first:last])
plt.axis([first/float(fs), last/float(fs), min(x[first:last]), max(x[first:last])])
plt.title('x (ocean.wav)')
plt.subplot(4,1,2)
plt.plot(float(fs)*np.arange(mX.size)/N, mX, 'r', lw=1.5, label="mX")
plt.plot(float(fs)*np.arange(mY.size)/N, mY, 'k', lw=1.5, label="mY")
plt.legend()
plt.axis([0, maxFreq, -80, max(mX)+3])
plt.title('mX + mY (stochastic approximation)')
plt.subplot(4,1,3)
plt.plot(float(fs)*np.arange(pX.size)/N, pX, 'c', lw=1.5, label="pX")
plt.plot(float(fs)*np.arange(pY.size)/N, pY-np.pi, 'k', lw=1.5, label="pY")
plt.axis([0, maxFreq, -np.pi, np.pi])
plt.legend()
plt.title('pX + pY (random phases)')
plt.subplot(4,1,4)
plt.plot(np.arange(first, last)/float(fs), y, 'b', lw=1.5)
plt.axis([first/float(fs), last/float(fs), min(y), max(y)])
plt.title('y')
plt.tight_layout()
plt.savefig('stochasticModelFrame.png')
plt.show()
| agpl-3.0 |
darinbaumgartel/dGlimpse | dGlimpse.py | 1 | 11321 | import os
import pprint
import random
import sys
import wx
import csv
import io
# The recommended way to use wx with mpl is with the WXAgg
# backend.
#
import matplotlib
matplotlib.use('WXAgg')
import numpy as np
import pylab
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from sklearn.svm import SVC,LinearSVC
from matplotlib.figure import Figure
import wx.lib.scrolledpanel
class ImportDatafile:
""" Getting columns of data from csv file and build a dictionary
"""
def __init__(self,infile):
self.infile = infile
def FromVerticalCSV(self):
DataSet = {}
titleset = []
ftest = open(self.infile,'r')
_n = 0
strftest = ''
for line in ftest:
strftest += line
_n += 1
if _n>1:
break
strftest = strftest.split('\n')
strftest = strftest[:-1]
if len(strftest)<2:
raise Exception("Data file "+self.infile+" is too small for analysis!")
_posdelimiters = ',;:|\t '
gooddelimiter=';'
for p in _posdelimiters:
if len(strftest[0].split(p))==len(strftest[1].split(p)):
if len(strftest[0].split(p))!=1:
gooddelimiter= p
DataSet = np.loadtxt(open(self.infile,"rb"),delimiter=gooddelimiter,skiprows=1)
DataSet = DataSet.transpose()
titleset=strftest[0].split(gooddelimiter)
return [DataSet,titleset]
########################################################################
class RandomPanel(wx.Panel):
""""""
#----------------------------------------------------------------------
def __init__(self, parent, color):
"""Constructor"""
wx.Panel.__init__(self, parent)
self.SetBackgroundColour(color)
########################################################################
class MainPanel(wx.Panel):
""""""
#----------------------------------------------------------------------
def __init__(self, parent):
"""Constructor"""
wx.Panel.__init__(self, parent)
topSplitter = wx.SplitterWindow(self)
hSplitter = wx.SplitterWindow(topSplitter)
bSplitter = wx.SplitterWindow(topSplitter)
self.panelOne = RandomPanel(hSplitter, "gray")
self.panelTwo = RandomPanel(hSplitter, "gray")
hSplitter.SplitVertically(self.panelOne, self.panelTwo,-650)
# hSplitter.SetSashGravity(0.5)
self.panelThree = RandomPanel(bSplitter, "gray")
self.panelFour = RandomPanel(bSplitter, "blue")
bSplitter.SplitVertically(self.panelThree,self.panelFour,-650)
topSplitter.SplitHorizontally(hSplitter, bSplitter,500)
# topSplitter.SetSashGravity(0.5)
self.fbutton = wx.Button(self.panelOne, -1, "Import 1st .csv",pos=(15,30),size=(120,30))
self.fbutton.Bind(wx.EVT_BUTTON, self.OnButton)
self.varlistbox = wx.ListBox(choices=[], id=wx.NewId(), name='varlistbox', parent=self.panelOne, pos=(10, 110), size=wx.Size(140, 260), style=0)
self.varlistbox.Bind(wx.EVT_LISTBOX, self.OnSelect)
self.fbutton2 = wx.Button(self.panelOne, -1, "Import 2nd .csv",pos=(15,64),size=(120,30))
self.fbutton2.Bind(wx.EVT_BUTTON, self.OnButton2)
self.varlistbox2 = wx.ListBox(choices=[], id=wx.NewId(), name='varlistbox2', parent=self.panelOne, pos=(160, 110), size=wx.Size(184, 260), style=0)
self.varlistbox2.Bind(wx.EVT_LISTBOX, self.OnSelect2)
self.possible_kernels = ['Kernel = '+k for k in ['rbf','linear']]
self.kernelchoicebox = wx.ComboBox(value=self.possible_kernels[0],choices=self.possible_kernels, id=wx.NewId(), name='kernelchoicebox', parent=self.panelThree, pos=(160, 110), size=wx.DefaultSize)
self.kernelchoicebox.Bind(wx.EVT_COMBOBOX, self.OnSelectKernel)
self.chosenkernel = self.possible_kernels[0].split('=')[-1].replace(' ','')
self.possible_cvalues = ['C = '+str((.00001*(10**_c))) for _c in range(11)]
self.cvaluechoicebox = wx.ComboBox(value=self.possible_cvalues[0],choices=self.possible_cvalues, id=wx.NewId(), name='cvaluechoicebox', parent=self.panelThree, pos=(160, 140), size=wx.DefaultSize)
self.cvaluechoicebox.Bind(wx.EVT_COMBOBOX, self.OnSelectCvalue)
self.chosencvalue = self.possible_cvalues[0].split('=')[-1].replace(' ','')
self.varlistbox3 = wx.ListBox(choices=[], id=wx.NewId(), name='varlistbox3', parent=self.panelThree, pos=(10, 110), size=wx.Size(140, 260), style=wx.LB_MULTIPLE)
self.varlistbox3.Bind(wx.EVT_LISTBOX, self.OnSelect3)
self.selindex = 0
self.compindex = 0
self.selected_variables = []
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(topSplitter, 1, wx.EXPAND)
self.SetSizer(sizer)
self.dataset2 = None
self.infile=None
self.infile2=None
def OnButton(self, evt):
# print ' OnButton selection '
dlg = wx.FileDialog(
self, message="Choose a file",
defaultDir=os.getcwd(),
defaultFile="",
style=wx.OPEN | wx.MULTIPLE | wx.CHANGE_DIR
)
self.infile = None
if dlg.ShowModal() == wx.ID_OK:
paths = dlg.GetPaths()
infile = str(paths[0])
self.infile=infile
self.fbutton.SetBackgroundColour('lightblue')
[self.dataset,self.titleset] = ImportDatafile(infile).FromVerticalCSV()
self.varlistbox.Clear()
for vartitle in self.titleset:
self.varlistbox.Append(vartitle)
self.varlistbox3.Append(vartitle)
def OnButton2(self, evt):
print ' OnButton selection '
dlg = wx.FileDialog(
self, message="Choose a file",
defaultDir=os.getcwd(),
defaultFile="",
style=wx.OPEN | wx.MULTIPLE | wx.CHANGE_DIR
)
self.infile2 = None
if dlg.ShowModal() == wx.ID_OK:
paths = dlg.GetPaths()
self.infile2 = str(paths[0])
self.fbutton2.SetBackgroundColour('pink')
[self.dataset2,self.titleset2] = ImportDatafile(self.infile2).FromVerticalCSV()
def OnSelect(self, evt):
self.selindex = evt.GetSelection()
self.varlistbox2.Clear()
for vartitle in self.titleset:
if vartitle != self.titleset[self.selindex]:
self.varlistbox2.Append('Plot vs: '+vartitle)
else:
self.varlistbox2.Append('Histogram '+self.titleset[self.selindex])
def OnSelect2(self, evt):
self.figure = Figure(figsize=(6, 4.5), dpi=100, facecolor='w', edgecolor='k')
self.mainaxis = self.figure.add_subplot(1,1,1)
self.canvas = FigureCanvas(self.panelTwo, -1, self.figure)
self.compindex = evt.GetSelection()
if self.selindex==self.compindex:
self.mainaxis.hist(self.dataset[self.selindex], 50, edgecolor='blue',alpha=0.75,label=(self.infile).split('.')[0].split('/')[-1],histtype='step')
self.mainaxis.set_xlabel(self.titleset[self.selindex])
self.mainaxis.set_ylabel('Probability')
if self.infile2!=None:
self.mainaxis.hist(self.dataset2[self.selindex], 50,edgecolor='red',alpha=0.75,label=(self.infile2).split('.')[0].split('/')[-1],histtype='step')
else:
self.mainaxis.scatter(self.dataset[self.selindex],self.dataset[self.compindex],marker='.',s=1,facecolor='0.5', edgecolor='0.5',label=(self.infile).split('.')[0].split('/')[-1])
self.mainaxis.set_xlabel(self.titleset[self.selindex])
self.mainaxis.set_ylabel(self.titleset[self.compindex])
fit_1 = np.polyfit(self.dataset[self.selindex],self.dataset[self.compindex],1)
coef_1 = np.poly1d(fit_1)
fit_2 = np.polyfit(self.dataset[self.selindex],self.dataset[self.compindex],2)
coef_2 = np.poly1d(fit_2)
x_min = min(self.dataset[self.selindex])
x_max = max(self.dataset[self.selindex])
xspace = [x_min + 0.01*(x_max-x_min)*interval for interval in range(100)]
y_1 = coef_1(xspace)
y_2 = coef_2(xspace)
# self.mainaxis.set_grid(True)
self.mainaxis.plot(xspace,y_1,'red', label='Linear Fit')
self.mainaxis.plot(xspace,y_2,'blue', label='Quadratic Fit')
legend = self.mainaxis.legend(loc='upper left', shadow=True)
for alabel in legend.get_texts():
alabel.set_fontsize('small')
self.canvas.draw()
self.canvas.resize(10,50)
def DoMVA(self):
self.figure2 = Figure(figsize=(6, 4.5), dpi=100, facecolor='w', edgecolor='k')
self.mainaxis2 = self.figure2.add_subplot(1,1,1)
self.canvas2 = FigureCanvas(self.panelFour, -1, self.figure2)
selected_variables = self.selected_variables
_S= self.dataset[selected_variables,:1000]
_B= self.dataset2[selected_variables,:1000]
v = selected_variables[0]
if self.dataset2 != None or _S.shape[0]<=1:
if len(selected_variables)==1:
v = selected_variables[0]
self.mainaxis2.hist(_S[0,:], 50,alpha=0.75,label=('Training Subset: '+self.infile.split('/')[-1].split('.')[0]),histtype='step')
self.mainaxis2.hist(_B[0,:], 50,edgecolor='red',alpha=0.75,label=('Training Subset: '+self.infile2.split('/')[-1].split('.')[0]),histtype='step')
self.mainaxis2.set_xlabel(self.titleset[self.selindex])
self.mainaxis2.set_ylabel('Probability')
legend2 = self.mainaxis2.legend(loc='upper left', shadow=True)
for alabel in legend2.get_texts():
alabel.set_fontsize('small')
if _S.shape[0]>1:
_ST = np.ones((_S.shape[-1]))
_BT = -1*np.ones((_B.shape[-1]))
_X = np.concatenate((_S,_B),axis=1)
_Y = np.concatenate((_ST,_BT),axis=0)
_X=_X.transpose()
print 'USING:',self.chosenkernel
svm = SVC(C = float(self.chosencvalue), kernel = self.chosenkernel)
svm.fit(_X,_Y)
_S_TrainHist = svm.decision_function(_S.transpose()).transpose()[0]
_B_TrainHist = svm.decision_function(_B.transpose()).transpose()[0]
self.mainaxis2.hist(_S_TrainHist, 50,alpha=0.75,label=(self.infile.split('/')[-1].split('.')[0]),histtype='step')
self.mainaxis2.hist(_B_TrainHist, 50,edgecolor='red',alpha=0.75,label=(self.infile2.split('/')[-1].split('.')[0]),histtype='step')
self.mainaxis2.axvline(x=0,color='black',linestyle='--',alpha=0.2)
_xmax = 1.2*max([_S_TrainHist.max(),_B_TrainHist.max()])
_xmin = -1.2*(abs(1.0*min([_S_TrainHist.min(),_B_TrainHist.min()])))
self.mainaxis2.axvspan(0.0, _xmax, color='blue',alpha=0.08)
self.mainaxis2.axvspan(_xmin,0.0, color='red',alpha=0.08)
self.mainaxis2.set_xlim(_xmin,_xmax )
self.mainaxis2.set_xlabel('Machine Learning Classifier')
self.mainaxis2.set_ylabel('Probability')
legend2 = self.mainaxis2.legend(loc='upper left', shadow=True)
for alabel in legend2.get_texts():
alabel.set_fontsize('small')
self.canvas2.draw()
self.canvas2.resize(10,50)
def OnSelectKernel(self,evt):
# print "Rechoosing Kernel"
self.chosenkernel = self.possible_kernels[evt.GetSelection()].split('=')[-1].replace(' ','')
# print self.chosenkernel
if len(self.selected_variables)>0:
self.DoMVA()
def OnSelectCvalue(self,evt):
self.chosencvalue = self.possible_cvalues[evt.GetSelection()].split('=')[-1].replace(' ','')
# print self.chosenkernel
if len(self.selected_variables)>0:
self.DoMVA()
def OnSelect3(self, evt):
self.selected_variables = []
for varindex in range(len(self.titleset)):
if self.varlistbox3.IsSelected(varindex):
self.selected_variables.append(varindex)
if len(self.selected_variables)>0:
self.DoMVA()
########################################################################
class MainFrame(wx.Frame):
""""""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
wx.Frame.__init__(self, None, title="Nested Splitters",
size=(1000,1000))
panel = MainPanel(self)
self.Show()
#----------------------------------------------------------------------
if __name__ == "__main__":
app = wx.App(False)
frame = MainFrame()
app.MainLoop() | gpl-2.0 |
platinhom/ManualHom | Coding/Python/scipy-html-0.16.1/generated/scipy-signal-fftconvolve-1.py | 1 | 1225 | # Autocorrelation of white noise is an impulse. (This is at least 100 times
# as fast as `convolve`.)
from scipy import signal
sig = np.random.randn(1000)
autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
import matplotlib.pyplot as plt
fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
ax_orig.plot(sig)
ax_orig.set_title('White noise')
ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
ax_mag.set_title('Autocorrelation')
fig.tight_layout()
fig.show()
# Gaussian blur implemented using FFT convolution. Notice the dark borders
# around the image, due to the zero-padding beyond its boundaries.
# The `convolve2d` function allows for other types of image boundaries,
# but is far slower.
from scipy import misc
lena = misc.lena()
kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
blurred = signal.fftconvolve(lena, kernel, mode='same')
fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(1, 3)
ax_orig.imshow(lena, cmap='gray')
ax_orig.set_title('Original')
ax_orig.set_axis_off()
ax_kernel.imshow(kernel, cmap='gray')
ax_kernel.set_title('Gaussian kernel')
ax_kernel.set_axis_off()
ax_blurred.imshow(blurred, cmap='gray')
ax_blurred.set_title('Blurred')
ax_blurred.set_axis_off()
fig.show()
| gpl-2.0 |
chengsoonong/crowdastro | crowdastro/experiment/experiment_probability_vs_magnitude.py | 1 | 4019 | """Plots the magnitude features against predicted probability.
Matthew Alger
The Australian National University
2016
"""
import argparse
import collections
import logging
import h5py
import matplotlib.mlab
import matplotlib.pyplot as plt
import numpy
import scipy.interpolate
import sklearn.linear_model
import sklearn.preprocessing
def main(crowdastro_h5_path, training_h5_path):
with h5py.File(training_h5_path, 'r') as training_h5:
features = training_h5['features'].value
labels = training_h5['labels'].value
lr = sklearn.linear_model.LogisticRegression(class_weight='balanced')
lr.fit(features, labels)
with h5py.File(crowdastro_h5_path, 'r') as crowdastro_h5:
probs = crowdastro_h5['/wise/cdfs/norris_labels'].value
# probs = lr.predict_proba(features)[:, 1]
features[:, 4] = -2.5 * numpy.log10(features[:, 4])
features[:, 5] = -2.5 * numpy.log10(features[:, 5])
# Downsample.
indices = numpy.arange(features.shape[0])
numpy.random.shuffle(indices)
indices = indices[:len(indices) // 4]
w1_w2 = features[indices, 4]
w2_w3 = features[indices, 5]
xy_to_z = {}
xy_to_n = collections.defaultdict(int)
for x, y, z in zip(w2_w3, w1_w2, probs):
if (x, y) in xy_to_z:
xy_to_z[x, y] = (xy_to_z[x, y] * xy_to_n[x, y] + z) / (
xy_to_n[x, y] + 1)
xy_to_n[x, y] += 1
else:
xy_to_z[x, y] = z
xy_to_n[x, y] = 1
xs, ys, zs = [], [], []
for (x, y), z in xy_to_z.items():
xs.append(x)
ys.append(y)
zs.append(z)
x_min, x_max = numpy.percentile(xs, (2, 98))
y_min, y_max = numpy.percentile(ys, (2, 98))
res = 100
xi = numpy.linspace(x_min, x_max, res)
yi = numpy.linspace(y_min, y_max, res)
xi, yi = numpy.meshgrid(xi, yi)
rbf = scipy.interpolate.Rbf(xs, ys, zs, function='linear')
zi = rbf(xi, yi)
plt.pcolormesh(xi, yi, zi)
plt.show()
# plt.subplot(2, 2, 1)
# plt.scatter(features[labels == 0, 4], probs[labels == 0],
# color='red', marker='+')
# plt.scatter(features[labels == 1, 4], probs[labels == 1],
# color='blue', marker='+')
# plt.xlabel('w1 - w2')
# plt.ylabel('$p(z \\mid x)$')
# plt.ylim((0, 1))
# plt.subplot(2, 2, 2)
# plt.scatter(features[labels == 0, 5], probs[labels == 0],
# color='red', marker='+')
# plt.scatter(features[labels == 1, 5], probs[labels == 1],
# color='blue', marker='+')
# plt.xlabel('w2 - w3')
# plt.ylabel('$p(z \\mid x)$')
# plt.ylim((0, 1))
# plt.subplot(2, 2, 3)
# plt.scatter(features[labels == 0, 6], probs[labels == 0],
# color='red', marker='+')
# plt.scatter(features[labels == 1, 6], probs[labels == 1],
# color='blue', marker='+')
# plt.xlabel('Distance')
# plt.ylabel('$p(z \\mid x)$')
# plt.ylim((0, 1))
# plt.subplot(2, 2, 4)
# plt.scatter(features[labels == 0, 8], probs[labels == 0],
# color='red', marker='+')
# plt.scatter(features[labels == 1, 8], probs[labels == 1],
# color='blue', marker='+')
# plt.xlabel('CNN2')
# plt.ylabel('$p(z \\mid x)$')
# plt.ylim((0, 1))
# plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--crowdastro', default='data/crowdastro.h5',
help='HDF5 crowdastro data file')
parser.add_argument('--training', default='data/training.h5',
help='HDF5 training data file')
args = parser.parse_args()
logging.root.setLevel(logging.INFO)
main(args.crowdastro, args.training)
| mit |
AlexRobson/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 244 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
lseman/pylspm | pylspm/tabu2.py | 1 | 3175 | # PLS-PM tabu search clustering
# Author: Laio Oriel Seman
# Brownlee, J. (2011). Clever Algorithms. Retrieved from
# http://www.cleveralgorithms.com
from random import randint, uniform
from copy import deepcopy
import numpy as np
from numpy import inf
import pandas as pd
import random
from .pylspm import PyLSpm
from .boot import PyLSboot
def stochasticTwoOpt(perm):
result = perm[:] # make a copy
size = len(result)
p1, p2 = random.randrange(0, size), random.randrange(0, size)
exclude = set([p1])
if p1 == 0:
exclude.add(size - 1)
else:
exclude.add(p1 - 1)
if p1 == size - 1:
exclude.add(0)
else:
exclude.add(p1 + 1)
while p2 in exclude:
p2 = random.randrange(0, size)
if p2 < p1:
p1, p2 = p2, p1
result[p1:p2] = reversed(result[p1:p2])
return result
def locateBestCandidate(candidates):
candidates.sort(key=lambda c: c[1])
best = candidates[0]
return best
def generateCandidates(best, tabuList):
check_tabu = None
permutation = stochasticTwoOpt(best[0])
while check_tabu == None:
if permutation in tabuList:
permutation = stochasticTwoOpt(best[0])
else:
check_tabu = 1
return permutation
def tabu(tabu_size, n_children, n_clusters, iterations, data_,
lvmodel, mvmodel, scheme, regression):
node = []
for i in range(len(data_)):
node.append(random.randrange(n_clusters))
best_ = PyLSboot(1, 8, data_, lvmodel,
mvmodel, scheme, regression, 0, 100, nclusters=n_clusters, population=[node])
best = best_.tabu()[0]
tabuList = []
for i in range(0, iterations):
print("Iteration %s" % (i + 1))
candidates = []
for index in range(0, n_children):
candidates.append(generateCandidates(best, tabuList))
fit_ = PyLSboot(len(candidates), 8, data_, lvmodel,
mvmodel, scheme, regression, 0, 100, nclusters=n_clusters, population=candidates)
fit = fit_.tabu()
bestCandidate = locateBestCandidate(fit)
if bestCandidate[1] < best[1]:
best = [bestCandidate[0], bestCandidate[1]]
tabuList.append(bestCandidate[0])
if len(tabuList) > tabu_size:
print('apagou 1')
del tabuList[0]
print("\nFitness = %s" % best[1])
print(best[0])
output = pd.DataFrame(best[0])
output.columns = ['Split']
dataSplit = pd.concat([data_, output], axis=1)
# return best clusters path matrix
results = []
for i in range(n_clusters):
dataSplited = (dataSplit.loc[dataSplit['Split']
== i]).drop('Split', axis=1)
dataSplited.index = range(len(dataSplited))
results.append(PyLSpm(dataSplited, lvmodel, mvmodel, scheme,
regression, 0, 100, HOC='true'))
print(results[i].path_matrix)
print(results[i].gof())
print(results[i].residuals()[3])
| mit |
kohr-h/odl | examples/solvers/douglas_rachford_pd_heron.py | 2 | 2172 | r"""Solves the generalized Heron problem using the Douglas-Rachford solver.
The generalized Heron problem is defined as
min_{x in R^2} sum_i d(x, Omega_i),
where d(x, Omega_i) is the distance from x to the set Omega_i. Here, the
Omega_i are given by three rectangles.
This uses the infimal convolution option of the Douglas-Rachford solver since
the problem can be written as:
min_{x in R^2} sum_i inf_{z \in Omega_i} ||x - z||.
"""
import matplotlib.pyplot as plt
import numpy as np
import odl
# Create the solution space
space = odl.rn(2)
# Define the rectangles by [minimum_corner, maximum_corner]
rectangles = [[[0, 0], [1, 1]],
[[0, 2], [1, 3]],
[[2, 2], [3, 3]]]
# The L operators are simply the identity in this case
lin_ops = [odl.IdentityOperator(space)] * len(rectangles)
# The function f in the douglas rachford solver is not needed so we set it
# to the zero function
f = odl.solvers.ZeroFunctional(space)
# g is the distance function `d(x, Omega_i)`. Here, the l2 distance.
g = [odl.solvers.L2Norm(space)] * len(rectangles)
# l are the indicator functions on the rectangles.
l = [odl.solvers.IndicatorBox(space, minp, maxp) for minp, maxp in rectangles]
# Select step size
tau = 1.0 / len(rectangles)
sigma = [1.0] * len(rectangles)
# The lam parameter can be used to accelerate the convergence rate
def lam(n):
return 1.0 + 1.0 / (n + 1)
def print_objective(x):
"""Calculate the objective value and prints it."""
value = 0
for minp, maxp in rectangles:
x_proj = np.minimum(np.maximum(x, minp), maxp)
value += (x - x_proj).norm()
print('Point = [{:.4f}, {:.4f}], Value = {:.4f}'.format(x[0], x[1], value))
# Solve
x = space.zero()
odl.solvers.douglas_rachford_pd(x, f, g, lin_ops,
tau=tau, sigma=sigma, niter=20, lam=lam,
callback=print_objective, l=l)
# plot the result
for minp, maxp in rectangles:
xp = [minp[0], maxp[0], maxp[0], minp[0], minp[0]]
yp = [minp[1], minp[1], maxp[1], maxp[1], minp[1]]
plt.plot(xp, yp)
plt.scatter(x[0], x[1])
plt.xlim(-1, 4)
plt.ylim(-1, 4)
plt.show()
| mpl-2.0 |
Natetempid/nearfield | GUI_Layout_1/GUI_Layout_1/frame_daq_measure.py | 1 | 8813 | from PyDAQmx import *
from daq9211 import *
import Tkinter as tk
import tkFileDialog
import tkMessageBox
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
import matplotlib.animation as animation
import datetime
import time
import ttk
import Queue as q
class daq_measure_frame(tk.Frame):
def __init__(self, master, controller, daq9211):
tk.Frame.__init__(self, master)
self.grid_rowconfigure(1,weight=1)
self.grid_columnconfigure(0,weight=1)
self.daq9211 = daq9211
#self.test_btn = ttk.Button(self, text = 'Test', command = lambda: self.test())
#self.test_btn.pack()
self.measurement_running = False
self.plot_running = False
self.tasksetup_run = False
self.callback = None
#data queues
self.channeltime_list = [np.array([]), np.array([]), np.array([]), np.array([])]
self.channeldata_list = [np.array([]), np.array([]), np.array([]), np.array([])] #4 channels each appending data to an np.ndarray
#Header frame that includes all controls
self.headerframe = tk.Frame(self, borderwidth = 5, relief = tk.GROOVE)
self.headerframe.grid(row = 0, column = 0, sticky = 'nsew')
self.headerframe.grid_rowconfigure(0,weight = 1)
self.headerframe.grid_columnconfigure(3,weight=1)
self.headerframe.grid_columnconfigure(4,weight=1)
self.headerframe.grid_columnconfigure(5,weight=1)
#canvas for indicator
self.indicator_canvas = tk.Canvas(self.headerframe, width = 50, height = 50)
self.indicator_canvas.grid(row = 0, column = 0, sticky = 'ns')
self.indicator = self.indicator_canvas.create_oval(5,5,40,40, fill = 'red4')
self.indicatorstr = tk.StringVar()
self.indicatorstr.set('DAQ not measuring')
self.indicatorlbl = tk.Label(self.headerframe, textvariable = self.indicatorstr, font = ('tkDefaultFont', 12))
self.indicatorlbl.grid(row = 0, column = 1, sticky = 'nsew')
#Time interval
self.intervalframe = tk.Frame(self.headerframe,borderwidth = 5)
self.intervalframe.grid(row = 0, column = 2, sticky = 'nsew')
self.intervalframe.grid_rowconfigure(0,weight = 1)
self.intervalframe.grid_columnconfigure(0, weight = 1)
self.interval_lbl = tk.Label(self.intervalframe, text = 'Measurement Time Step (s)')
self.interval_lbl.grid(row = 0, column = 0)
self.intervalstr = tk.StringVar()
self.intervalstr.set('1')
self.interval = tk.Entry(self.intervalframe, textvariable = self.intervalstr, width=5)
self.interval.grid(row = 1, column = 0, sticky = 'nsew')
#Measure Button
self.measure_btn = ttk.Button(self.headerframe, text = 'Start Measurement', command = lambda:self.measure_click())
self.measure_btn.grid(row = 0, column = 3, sticky = 'nsew')
#Plot Button
self.measure_and_plot_btn = ttk.Button(self.headerframe, text = 'Start Measurement & Plot', command = lambda: self.measure_and_plot_click())
self.measure_and_plot_btn.grid(row = 0, column = 4, sticky = 'nsew')
#Reset Plot Button
self.resetbtn = ttk.Button(self.headerframe, text = 'Reset Graphs', command = lambda: self.reset_graphs())
self.resetbtn.grid(row = 0, column = 5, sticky = 'nsew')
#Plotting
self.fig = plt.Figure(figsize=(5,5))
#make plots depend on number of configured channels of the daq9211
self.axs = []
self.lines = []
for i in range(0,4):
self.axs.append(self.fig.add_subplot(2,2,i+1))
line, = self.axs[i].plot([], [], lw=2, label = 'A', color = 'b')
self.lines.append(line)
#self.axs[i].legend(bbox_to_anchor=(0, 0.02, -.102, -0.102), loc=2, ncol = 2, borderaxespad=0)
self.axs[i].set_title('Channel %d' % i)
self.canvas = FigureCanvasTkAgg(self.fig,self)
self.canvas.show()
self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.canvas._tkcanvas.grid(row = 1, column = 0, sticky = 'nsew')#pack(side=tk.TOP, fill=tk.BOTH, expand=True)
#Click Methods
def measure_click(self):
if self.measurement_running: #then user wants to stop the Measurement
self.stop_instrument()
self.stop_graph() #stopping the measurement also stops the graph
else: #then user wants to start the measurement without running the graph
self.start_instrument()
def measure_and_plot_click(self):
if self.measurement_running and not self.plot_running: #then user has started measuring and wants to graph
self.start_graph()
elif self.measurement_running and self.plot_running: #then user wants to stop the graph but keep the measurement going
self.stop_graph()
elif not self.measurement_running and not self.plot_running: #then user wants to start the measurement and start the graph
self.start_instrument()
self.start_graph()
#note the user cannot stop the measurement from the measure_and_plot button. To stop the Measurement, the user must click the measure_btn
def start_instrument(self):
if not self.tasksetup_run: #then I need to setup the daq channel tasks
self.run_tasksetup()
self.tasksetup_run = True
self.daq9211.measureAll(float(self.intervalstr.get()))
#change indicator
self.indicator_canvas.itemconfig(self.indicator, fill = "green2")
self.indicatorstr.set('DAQ measuring...')
#change measurement button
self.measure_btn.config(text = 'Stop Measurement & Plot')
#change measurement running state
self.measurement_running = True
def stop_instrument(self):
self.daq9211.stop_event.set()
#change indicator
self.indicator_canvas.itemconfig(self.indicator, fill = "red4")
self.indicatorstr.set('DAQ not measuring')
#change measurement button
self.measure_btn.config(text = 'Start Measurement')
while self.daq9211.thread_active:
time.sleep(0.002) #wait for the measurement to stop
#clear the measurement queue
self.daq9211.clear_queues()
#change measurement running state
self.measurement_running = False
def start_graph(self):
#change plot running state
self.plot_running = True
#change measure and plot button
self.measure_and_plot_btn.config(text = 'Stop Plot')
#disable reset button
self.resetbtn.config(state = tk.DISABLED)
#update the graph
self.update_graph()
def stop_graph(self):
#change plot running state
self.plot_running = False
#change measure and plot button
self.measure_and_plot_btn.config(text = 'Start Measurement & Plot')
#enable reset button
self.resetbtn.config(state = tk.NORMAL)
#print self.callback
if self.callback is not None:
self.after_cancel(self.callback)
def reset_graphs(self):
self.channeltime_list = [np.array([]), np.array([]), np.array([]), np.array([])]
self.channeldata_list = [np.array([]), np.array([]), np.array([]), np.array([])]
for k in range(0,4):
self.lines[k].set_data(self.channeltime_list[k], self.channeldata_list[k])
self.canvas.draw_idle()
def update_graph(self):#,i):\
#try:
def totalseconds(x):
return (x - datetime.datetime(1970,1,1)).total_seconds()
totalseconds = np.vectorize(totalseconds)
#plot by channel ID
for k in range(0,len(self.daq9211.channels)):
ID = self.daq9211.channels[k].ID #data in channel # ID is plotted on the graph # ID
while (not self.daq9211.channels[k].dataq.empty()):
data = self.daq9211.channels[k].dataq.get()
time_val = data[0]
val = data[1]
self.channeltime_list[k] = np.append(self.channeltime_list[k], time_val)
self.channeldata_list[k] = np.append(self.channeldata_list[k], val)
self.lines[ID].set_data(totalseconds(self.channeltime_list[k]), self.channeldata_list[k])
self.axs[ID].relim()
self.axs[ID].autoscale_view()
self.canvas.draw_idle()
self.callback = self.after(100, self.update_graph)
def run_tasksetup(self):
for i in range(0,len(self.daq9211.channels)):
self.daq9211.channels[i].setup_task() #setup task in each channel
#self.daq9211.data[i]] = [] #initialize data dictionary
| gpl-3.0 |
astropy/astropy | astropy/visualization/tests/test_norm.py | 7 | 11838 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from packaging.version import Version
import pytest
import numpy as np
from numpy import ma
from numpy.testing import assert_allclose, assert_equal
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.visualization.mpl_normalize import ImageNormalize, simple_norm, imshow_norm
from astropy.visualization.interval import ManualInterval, PercentileInterval
from astropy.visualization.stretch import LogStretch, PowerStretch, SqrtStretch
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB, HAS_PLT # noqa
if HAS_MATPLOTLIB:
import matplotlib
MATPLOTLIB_LT_32 = Version(matplotlib.__version__) < Version('3.2')
DATA = np.linspace(0., 15., 6)
DATA2 = np.arange(3)
DATA2SCL = 0.5 * DATA2
DATA3 = np.linspace(-3., 3., 7)
STRETCHES = (SqrtStretch(), PowerStretch(0.5), LogStretch())
INVALID = (None, -np.inf, -1)
@pytest.mark.skipif('HAS_MATPLOTLIB')
def test_normalize_error_message():
with pytest.raises(ImportError) as exc:
ImageNormalize()
assert (exc.value.args[0] == "matplotlib is required in order to use "
"this class.")
@pytest.mark.skipif('not HAS_MATPLOTLIB')
class TestNormalize:
def test_invalid_interval(self):
with pytest.raises(TypeError):
ImageNormalize(vmin=2., vmax=10., interval=ManualInterval,
clip=True)
def test_invalid_stretch(self):
with pytest.raises(TypeError):
ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch,
clip=True)
def test_stretch_none(self):
with pytest.raises(ValueError):
ImageNormalize(vmin=2., vmax=10., stretch=None)
def test_scalar(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(data=6, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
assert_allclose(norm(6), 0.70710678)
assert_allclose(norm(6), norm2(6))
def test_clip(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
output = norm(DATA)
expected = [0., 0.35355339, 0.70710678, 0.93541435, 1., 1.]
assert_allclose(output, expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(output, norm2(DATA))
def test_noclip(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=False, invalid=None)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=False,
invalid=None)
output = norm(DATA)
expected = [np.nan, 0.35355339, 0.70710678, 0.93541435, 1.11803399,
1.27475488]
assert_allclose(output, expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])
assert_allclose(output, norm2(DATA))
def test_implicit_autoscale(self):
norm = ImageNormalize(vmin=None, vmax=10., stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(DATA, interval=ManualInterval(None, 10),
stretch=SqrtStretch(), clip=False)
output = norm(DATA)
assert norm.vmin == np.min(DATA)
assert norm.vmax == 10.
assert_allclose(output, norm2(DATA))
norm = ImageNormalize(vmin=2., vmax=None, stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, None),
stretch=SqrtStretch(), clip=False)
output = norm(DATA)
assert norm.vmin == 2.
assert norm.vmax == np.max(DATA)
assert_allclose(output, norm2(DATA))
def test_call_clip(self):
"""Test that the clip keyword is used when calling the object."""
data = np.arange(5)
norm = ImageNormalize(vmin=1., vmax=3., clip=False)
output = norm(data, clip=True)
assert_equal(output.data, [0, 0, 0.5, 1.0, 1.0])
assert np.all(~output.mask)
output = norm(data, clip=False)
assert_equal(output.data, [-0.5, 0, 0.5, 1.0, 1.5])
assert np.all(~output.mask)
def test_masked_clip(self):
mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(mdata, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
output = norm(mdata)
expected = [0., 0.35355339, 1., 0.93541435, 1., 1.]
assert_allclose(output.filled(-10), expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(output, norm2(mdata))
def test_masked_noclip(self):
mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=False, invalid=None)
norm2 = ImageNormalize(mdata, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=False,
invalid=None)
output = norm(mdata)
expected = [np.nan, 0.35355339, -10, 0.93541435, 1.11803399,
1.27475488]
assert_allclose(output.filled(-10), expected)
assert_allclose(output.mask, [0, 0, 1, 0, 0, 0])
assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])
assert_allclose(output, norm2(mdata))
def test_invalid_data(self):
data = np.arange(25.).reshape((5, 5))
data[2, 2] = np.nan
data[1, 2] = np.inf
percent = 85.0
interval = PercentileInterval(percent)
# initialized without data
norm = ImageNormalize(interval=interval)
norm(data) # sets vmin/vmax
assert_equal((norm.vmin, norm.vmax), (1.65, 22.35))
# initialized with data
norm2 = ImageNormalize(data, interval=interval)
assert_equal((norm2.vmin, norm2.vmax), (norm.vmin, norm.vmax))
norm3 = simple_norm(data, 'linear', percent=percent)
assert_equal((norm3.vmin, norm3.vmax), (norm.vmin, norm.vmax))
assert_allclose(norm(data), norm2(data))
assert_allclose(norm(data), norm3(data))
norm4 = ImageNormalize()
norm4(data) # sets vmin/vmax
assert_equal((norm4.vmin, norm4.vmax), (0, 24))
norm5 = ImageNormalize(data)
assert_equal((norm5.vmin, norm5.vmax), (norm4.vmin, norm4.vmax))
@pytest.mark.parametrize('stretch', STRETCHES)
def test_invalid_keyword(self, stretch):
norm1 = ImageNormalize(stretch=stretch, vmin=-1, vmax=1, clip=False,
invalid=None)
norm2 = ImageNormalize(stretch=stretch, vmin=-1, vmax=1, clip=False)
norm3 = ImageNormalize(DATA3, stretch=stretch, vmin=-1, vmax=1,
clip=False, invalid=-1.)
result1 = norm1(DATA3)
result2 = norm2(DATA3)
result3 = norm3(DATA3)
assert_equal(result1[0:2], (np.nan, np.nan))
assert_equal(result2[0:2], (-1., -1.))
assert_equal(result1[2:], result2[2:])
assert_equal(result2, result3)
@pytest.mark.skipif('not HAS_MATPLOTLIB')
class TestImageScaling:
def test_linear(self):
"""Test linear scaling."""
norm = simple_norm(DATA2, stretch='linear')
assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.e-5)
def test_sqrt(self):
"""Test sqrt scaling."""
norm1 = simple_norm(DATA2, stretch='sqrt')
assert_allclose(norm1(DATA2), np.sqrt(DATA2SCL), atol=0, rtol=1.e-5)
@pytest.mark.parametrize('invalid', INVALID)
def test_sqrt_invalid_kw(self, invalid):
stretch = SqrtStretch()
norm1 = simple_norm(DATA3, stretch='sqrt', min_cut=-1, max_cut=1,
clip=False, invalid=invalid)
norm2 = ImageNormalize(stretch=stretch, vmin=-1, vmax=1, clip=False,
invalid=invalid)
assert_equal(norm1(DATA3), norm2(DATA3))
def test_power(self):
"""Test power scaling."""
power = 3.0
norm = simple_norm(DATA2, stretch='power', power=power)
assert_allclose(norm(DATA2), DATA2SCL ** power, atol=0, rtol=1.e-5)
def test_log(self):
"""Test log10 scaling."""
norm = simple_norm(DATA2, stretch='log')
ref = np.log10(1000 * DATA2SCL + 1.0) / np.log10(1001.0)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_log_with_log_a(self):
"""Test log10 scaling with a custom log_a."""
log_a = 100
norm = simple_norm(DATA2, stretch='log', log_a=log_a)
ref = np.log10(log_a * DATA2SCL + 1.0) / np.log10(log_a + 1)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_asinh(self):
"""Test asinh scaling."""
norm = simple_norm(DATA2, stretch='asinh')
ref = np.arcsinh(10 * DATA2SCL) / np.arcsinh(10)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_asinh_with_asinh_a(self):
"""Test asinh scaling with a custom asinh_a."""
asinh_a = 0.5
norm = simple_norm(DATA2, stretch='asinh', asinh_a=asinh_a)
ref = np.arcsinh(DATA2SCL / asinh_a) / np.arcsinh(1. / asinh_a)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_min(self):
"""Test linear scaling."""
norm = simple_norm(DATA2, stretch='linear', min_cut=1., clip=True)
assert_allclose(norm(DATA2), [0., 0., 1.], atol=0, rtol=1.e-5)
def test_percent(self):
"""Test percent keywords."""
norm = simple_norm(DATA2, stretch='linear', percent=99., clip=True)
assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.e-5)
norm2 = simple_norm(DATA2, stretch='linear', min_percent=0.5,
max_percent=99.5, clip=True)
assert_allclose(norm(DATA2), norm2(DATA2), atol=0, rtol=1.e-5)
def test_invalid_stretch(self):
"""Test invalid stretch keyword."""
with pytest.raises(ValueError):
simple_norm(DATA2, stretch='invalid')
@pytest.mark.skipif('not HAS_PLT')
def test_imshow_norm():
import matplotlib.pyplot as plt
image = np.random.randn(10, 10)
ax = plt.subplot(label='test_imshow_norm')
imshow_norm(image, ax=ax)
with pytest.raises(ValueError):
# X and data are the same, can't give both
imshow_norm(image, X=image, ax=ax)
with pytest.raises(ValueError):
# illegal to manually pass in normalization since that defeats the point
imshow_norm(image, ax=ax, norm=ImageNormalize())
imshow_norm(image, ax=ax, vmin=0, vmax=1)
with pytest.warns(AstropyDeprecationWarning):
# Note that the following is deprecated in Matplotlib 3.2
if MATPLOTLIB_LT_32:
# vmin/vmax "shadow" the MPL versions, so imshow_only_kwargs allows direct-setting
imshow_norm(image, ax=ax, imshow_only_kwargs=dict(vmin=0, vmax=1))
# but it should fail for an argument that is not in ImageNormalize
with pytest.raises(ValueError):
imshow_norm(image, ax=ax, imshow_only_kwargs=dict(cmap='jet'))
# make sure the pyplot version works
imres, norm = imshow_norm(image, ax=None)
assert isinstance(norm, ImageNormalize)
plt.close('all')
| bsd-3-clause |
CFIS-Octarine/octarine | src/ossos_tests/test_ossos/test_fitsviewer/test_displayable.py | 2 | 7722 | __author__ = "David Rusk <[email protected]>"
import unittest
from astropy.io.fits.hdu.hdulist import HDUList
import matplotlib.pyplot as plt
from hamcrest import assert_that, close_to, equal_to, none
from mock import Mock, MagicMock
from ossos import astrom
from ossos.downloads.cutouts.source import SourceCutout
from ossos.downloads.cutouts.grid import CutoutGrid
from ossos.fitsviewer.colormap import clip
from ossos.fitsviewer.displayable import (DisplayableImageSinglet,
DisplayableImageTriplet,
ImageSinglet)
from ossos.fitsviewer import displayable
class ImageSingletTest(unittest.TestCase):
def setUp(self):
mainhdu = Mock()
mainhdu.data.shape = (100, 100)
self.hdulist = [mainhdu]
fig = plt.figure()
self.displayable = ImageSinglet(self.hdulist, fig, [0, 0, 1, 1])
def test_draw_one_circle(self):
assert_that(self.displayable.marker, none())
cx = 1
cy = 2
cr = 3
self.displayable.place_marker(cx, cy, cr)
assert_that(self.displayable.marker.center, equal_to((cx, cy)))
assert_that(self.displayable.marker.radius, equal_to(cr))
def test_draw_second_circle_removes_first(self):
c1x = 1
c1y = 2
c1r = 3
self.displayable.place_marker(c1x, c1y, c1r)
assert_that(self.displayable.marker.center, equal_to((c1x, c1y)))
assert_that(self.displayable.marker.radius, equal_to(c1r))
c2x = 4
c2y = 5
c2r = 6
self.displayable.place_marker(c2x, c2y, c2r)
assert_that(self.displayable.marker.center, equal_to((c2x, c2y)))
assert_that(self.displayable.marker.radius, equal_to(c2r))
def test_toggle_reticule_notifies_display_changed(self):
self.displayable.place_marker(10, 10, 10)
refresh_handler = Mock()
self.displayable.display_changed.connect(refresh_handler)
self.displayable.toggle_reticule()
refresh_handler.assert_called_once_with()
class DisplayableImageSingletTest(unittest.TestCase):
def setUp(self):
self.singlet = DisplayableImageSinglet(MagicMock())
def mock_image_singlet(self):
image_singlet = Mock(spec=ImageSinglet)
self.singlet.image_singlet = image_singlet
return image_singlet
def test_reset_colormap(self):
image_singlet = self.mock_image_singlet()
self.singlet.reset_colormap()
image_singlet.reset_colormap.assert_called_once_with()
def test_toggle_reticule(self):
image_singlet = self.mock_image_singlet()
self.singlet.toggle_reticule()
image_singlet.toggle_reticule.assert_called_once_with()
class DisplayableImageTripletTest(unittest.TestCase):
def setUp(self):
source = Mock(spec=astrom.Source)
source.num_readings.return_value = 3
def mock_hdulist():
return MagicMock(spec=HDUList)
grid = CutoutGrid(source)
self.hdulist00 = mock_hdulist()
self.hdulist01 = mock_hdulist()
self.hdulist02 = mock_hdulist()
self.hdulist10 = mock_hdulist()
self.hdulist11 = mock_hdulist()
self.hdulist12 = mock_hdulist()
self.hdulist20 = mock_hdulist()
self.hdulist21 = mock_hdulist()
self.hdulist22 = mock_hdulist()
def mock_cutout(hdulist):
cutout = Mock(spec=SourceCutout)
cutout.hdulist = hdulist
return cutout
grid.add_cutout(mock_cutout(self.hdulist00), 0, 0)
grid.add_cutout(mock_cutout(self.hdulist01), 0, 1)
grid.add_cutout(mock_cutout(self.hdulist02), 0, 2)
grid.add_cutout(mock_cutout(self.hdulist10), 1, 0)
grid.add_cutout(mock_cutout(self.hdulist11), 1, 1)
grid.add_cutout(mock_cutout(self.hdulist12), 1, 2)
grid.add_cutout(mock_cutout(self.hdulist20), 2, 0)
grid.add_cutout(mock_cutout(self.hdulist21), 2, 1)
grid.add_cutout(mock_cutout(self.hdulist22), 2, 2)
self.grid = grid
def test_frames_have_correct_hdulists(self):
displayable = DisplayableImageTriplet(self.grid)
def get_hdulist(frame_index, time_index):
return displayable.get_singlet(frame_index, time_index).hdulist
assert_that(get_hdulist(0, 0), equal_to(self.hdulist00))
assert_that(get_hdulist(0, 1), equal_to(self.hdulist01))
assert_that(get_hdulist(0, 2), equal_to(self.hdulist02))
assert_that(get_hdulist(1, 0), equal_to(self.hdulist10))
assert_that(get_hdulist(1, 1), equal_to(self.hdulist11))
assert_that(get_hdulist(1, 2), equal_to(self.hdulist12))
assert_that(get_hdulist(2, 0), equal_to(self.hdulist20))
assert_that(get_hdulist(2, 1), equal_to(self.hdulist21))
assert_that(get_hdulist(2, 2), equal_to(self.hdulist22))
class UtilityTest(unittest.TestCase):
def assert_close(self, expected, actual):
assert_that(expected, close_to(actual, 0.0001))
def test_clip_in_range(self):
assert_that(clip(0.5, 0, 1), equal_to(0.5))
def test_clip_below_range(self):
assert_that(clip(-0.5, 0, 1), equal_to(0.0))
def test_clip_above_range(self):
assert_that(clip(1.5, 0, 1), equal_to(1.0))
def test_get_rect_first_frame_first_time_top_left(self):
[left, bottom, width, height] = displayable.get_rect((3, 3), 0, 0,
border=0, spacing=0)
assert_that(left, equal_to(0))
assert_that(bottom, equal_to(2./3))
assert_that(width, equal_to(1./3))
assert_that(height, equal_to(1./3))
def test_get_rect_last_frame_last_time_bottom_right(self):
[left, bottom, width, height] = displayable.get_rect((3, 3), 2, 2,
border=0, spacing=0)
assert_that(left, equal_to(2./3))
assert_that(bottom, equal_to(0))
assert_that(width, equal_to(1./3))
assert_that(height, equal_to(1./3))
def test_get_rect_last_frame_first_time_bottom_left(self):
[left, bottom, width, height] = displayable.get_rect((3, 3), 2, 0,
border=0, spacing=0)
assert_that(left, equal_to(0))
assert_that(bottom, equal_to(0))
assert_that(width, equal_to(1./3))
assert_that(height, equal_to(1./3))
def test_get_rect_last_frame_first_time_with_border(self):
[left, bottom, width, height] = displayable.get_rect((3, 3), 2, 0,
border=0.05, spacing=0)
assert_that(left, equal_to(0.05))
assert_that(bottom, equal_to(0.05))
assert_that(width, equal_to(0.3))
assert_that(height, equal_to(0.3))
def test_get_rect_mid_frame_first_time_with_spacing(self):
[left, bottom, width, height] = displayable.get_rect((3, 3), 1, 0,
border=0, spacing=0.05)
assert_that(left, equal_to(0))
assert_that(bottom, equal_to(0.35))
assert_that(width, equal_to(0.3))
assert_that(height, equal_to(0.3))
def test_get_rect_mid_frame_first_time_with_spacing_and_border(self):
[left, bottom, width, height] = displayable.get_rect((3, 3), 1, 0,
border=0.025, spacing=0.025)
self.assert_close(left, 0.025)
self.assert_close(bottom, 0.35)
self.assert_close(width, 0.3)
self.assert_close(height, 0.3)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
d-mittal/pystruct | pystruct/learners/subgradient_latent_ssvm.py | 4 | 10991 | ######################
# (c) 2012 Andreas Mueller <[email protected]>
# License: BSD 3-clause
#
from time import time
import numpy as np
from sklearn.externals.joblib import Parallel, delayed, cpu_count
from sklearn.utils import gen_even_slices
from .subgradient_ssvm import SubgradientSSVM
from ..utils import find_constraint_latent
class SubgradientLatentSSVM(SubgradientSSVM):
"""Latent Variable Structured SVM solver using subgradient descent.
Implements a margin rescaled with l1 slack penalty.
By default, a constant learning rate is used.
It is also possible to use the adaptive learning rate found by AdaGrad.
This class implements online subgradient descent. If n_jobs != 1,
small batches of size n_jobs are used to exploit parallel inference.
If inference is fast, use n_jobs=1.
Parameters
----------
model : StructuredModel
Object containing model structure. Has to implement
`loss`, `inference` and `loss_augmented_inference`.
max_iter : int, default=100
Maximum number of passes over dataset to find constraints and perform
updates.
C : float, default=1.
Regularization parameter
verbose : int, default=0
Verbosity.
learning_rate : float or 'auto', default='auto'
Learning rate used in subgradient descent. If 'auto', the pegasos
schedule is used, which starts with ``learning_rate = n_samples * C``.
momentum : float, default=0.0
Momentum used in subgradient descent.
n_jobs : int, default=1
Number of parallel jobs for inference. -1 means as many as cpus.
show_loss_every : int, default=0
Controlls how often the hamming loss is computed (for monitoring
purposes). Zero means never, otherwise it will be computed very
show_loss_every'th epoch.
decay_exponent : float, default=1
Exponent for decaying learning rate. Effective learning rate is
``learning_rate / (decay_t0 + t)** decay_exponent``. Zero means no decay.
decay_t0 : float, default=10
Offset for decaying learning rate. Effective learning rate is
``learning_rate / (decay_t0 + t)** decay_exponent``.
break_on_no_constraints : bool, default=True
Break when there are no new constraints found.
averaging : string, default=None
Whether and how to average weights. Possible options are 'linear', 'squared' and None.
The string reflects the weighting of the averaging:
- linear: ``w_avg ~ w_1 + 2 * w_2 + ... + t * w_t``
- squared: ``w_avg ~ w_1 + 4 * w_2 + ... + t**2 * w_t``
Uniform averaging is not implemented as it is worse than linear
weighted averaging or no averaging.
Attributes
----------
w : nd-array, shape=(model.size_joint_feature,)
The learned weights of the SVM.
``loss_curve_`` : list of float
List of loss values if show_loss_every > 0.
``objective_curve_`` : list of float
Primal objective after each pass through the dataset.
``timestamps_`` : list of int
Total training time stored before each iteration.
"""
def __init__(self, model, max_iter=100, C=1.0, verbose=0, momentum=0.,
learning_rate='auto', n_jobs=1,
show_loss_every=0, decay_exponent=1, decay_t0=10,
break_on_no_constraints=True, logger=None, averaging=None):
SubgradientSSVM.__init__(
self, model, max_iter, C, verbose=verbose, n_jobs=n_jobs,
show_loss_every=show_loss_every, decay_exponent=decay_exponent,
momentum=momentum, learning_rate=learning_rate,
break_on_no_constraints=break_on_no_constraints, logger=logger,
decay_t0=decay_t0, averaging=averaging)
def fit(self, X, Y, H_init=None, warm_start=False, initialize=True):
"""Learn parameters using subgradient descent.
Parameters
----------
X : iterable
Traing instances. Contains the structured input objects.
No requirement on the particular form of entries of X is made.
Y : iterable
Training labels. Contains the strctured labels for inputs in X.
Needs to have the same length as X.
constraints : None
Discarded. Only for API compatibility currently.
warm_start : boolean, default=False
Whether to restart a previous fit.
initialize : boolean, default=True
Whether to initialize the model for the data.
Leave this true except if you really know what you are doing.
"""
if self.verbose > 0:
print("Training latent subgradient structural SVM")
if initialize:
self.model.initialize(X, Y)
self.grad_old = np.zeros(self.model.size_joint_feature)
if not warm_start:
self.w = getattr(self, "w", np.random.normal(
0, 1, size=self.model.size_joint_feature))
self.timestamps_ = [time()]
self.objective_curve_ = []
if self.learning_rate == "auto":
self.learning_rate_ = self.C * len(X)
else:
self.learning_rate_ = self.learning_rate
else:
# hackety hack
self.timestamps_[0] = time() - self.timestamps_[-1]
w = self.w.copy()
n_samples = len(X)
try:
# catch ctrl+c to stop training
for iteration in range(self.max_iter):
self.timestamps_.append(time() - self.timestamps_[0])
positive_slacks = 0
objective = 0.
#verbose = max(0, self.verbose - 3)
if self.n_jobs == 1:
# online learning
for x, y in zip(X, Y):
h = self.model.latent(x, y, w)
h_hat = self.model.loss_augmented_inference(
x, h, w, relaxed=True)
delta_joint_feature = (
self.model.joint_feature(x, h)
- self.model.joint_feature(x, h_hat))
slack = (-np.dot(delta_joint_feature, w)
+ self.model.loss(h, h_hat))
objective += np.maximum(slack, 0)
if slack > 0:
positive_slacks += 1
w = self._solve_subgradient(delta_joint_feature, n_samples, w)
else:
#generate batches of size n_jobs
#to speed up inference
if self.n_jobs == -1:
n_jobs = cpu_count()
else:
n_jobs = self.j_jobs
n_batches = int(np.ceil(float(len(X)) / n_jobs))
slices = gen_even_slices(n_samples, n_batches)
for batch in slices:
X_b = X[batch]
Y_b = Y[batch]
verbose = self.verbose - 1
candidate_constraints = Parallel(
n_jobs=self.n_jobs,
verbose=verbose)(delayed(find_constraint_latent)(
self.model, x, y, w)
for x, y in zip(X_b, Y_b))
djoint_feature = np.zeros(self.model.size_joint_feature)
for x, y, constraint in zip(X_b, Y_b,
candidate_constraints):
y_hat, delta_joint_feature, slack, loss = constraint
objective += slack
djoint_feature += delta_joint_feature
if slack > 0:
positive_slacks += 1
djoint_feature /= float(len(X_b))
w = self._solve_subgradient(djoint_feature, n_samples, w)
# some statistics
objective *= self.C
objective += np.sum(self.w ** 2) / 2.
if positive_slacks == 0:
print("No additional constraints")
if self.break_on_no_constraints:
break
if self.verbose > 0:
print(self)
print("iteration %d" % iteration)
print("positive slacks: %d, "
"objective: %f" %
(positive_slacks, objective))
self.objective_curve_.append(objective)
if self.verbose > 2:
print(self.w)
self._compute_training_loss(X, Y, iteration)
if self.logger is not None:
self.logger(self, iteration)
except KeyboardInterrupt:
pass
self.timestamps_.append(time() - self.timestamps_[0])
self.objective_curve_.append(self._objective(X, Y))
if self.logger is not None:
self.logger(self, 'final')
if self.verbose:
if self.objective_curve_:
print("final objective: %f" % self.objective_curve_[-1])
if self.verbose and self.n_jobs == 1:
print("calls to inference: %d" % self.model.inference_calls)
return self
def predict(self, X):
prediction = SubgradientSSVM.predict(self, X)
return [self.model.label_from_latent(h) for h in prediction]
def predict_latent(self, X):
return SubgradientSSVM.predict(self, X)
def score(self, X, Y):
"""Compute score as 1 - loss over whole data set.
Returns the average accuracy (in terms of model.loss)
over X and Y.
Parameters
----------
X : iterable
Evaluation data.
Y : iterable
True labels.
Returns
-------
score : float
Average of 1 - loss over training examples.
"""
if hasattr(self.model, 'batch_loss'):
losses = self.model.batch_loss(
Y, self.model.batch_inference(X, self.w))
else:
losses = [self.model.loss(y, self.model.inference(y, self.w))
for y, y_pred in zip(Y, self.predict(X))]
max_losses = [self.model.max_loss(y) for y in Y]
return 1. - np.sum(losses) / float(np.sum(max_losses))
def _objective(self, X, Y):
constraints = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose - 1)(delayed(find_constraint_latent)(
self.model, x, y, self.w)
for x, y in zip(X, Y))
slacks = list(zip(*constraints))[2]
slacks = np.maximum(slacks, 0)
objective = np.sum(slacks) * self.C + np.sum(self.w ** 2) / 2.
return objective
| bsd-2-clause |
shangwuhencc/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
gagneurlab/concise | concise/data/attract.py | 2 | 1756 | """Helper functions for loading data from the attract db
"""
from concise.utils.pwm import PWM, load_motif_db
import pandas as pd
from pkg_resources import resource_filename
ATTRACT_METADTA = resource_filename('concise', 'resources/attract_metadata.txt')
ATTRACT_PWM = resource_filename('concise', 'resources/attract_pwm.txt')
def get_metadata():
"""
Get pandas.DataFrame with metadata about the Attract PWM's. Columns:
- PWM_id (id of the PWM - pass to get_pwm_list() for getting the pwm
- Gene_name
- Gene_id
- Mutated (if the target gene is mutated)
- Organism
- Motif (concsensus motif)
- Len (lenght of the motif)
- Experiment_description(when available)
- Database (Database from where the motifs were extracted PDB: Protein data bank, C: Cisbp-RNA, R:RBPDB, S: Spliceaid-F, AEDB:ASD)
- Pubmed (pubmed ID)
- Experiment (type of experiment; short description)
- Family (domain)
- Score (Qscore refer to the paper)
"""
dt = pd.read_table(ATTRACT_METADTA)
dt.rename(columns={"Matrix_id": "PWM_id"}, inplace=True)
# put to firt place
cols = ['PWM_id'] + [col for col in dt if col != 'PWM_id']
# rename Matrix_id to PWM_id
return dt[cols]
def get_pwm_list(pwm_id_list, pseudocountProb=0.0001):
"""Get a list of Attract PWM's.
# Arguments
pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table
pseudocountProb: Added pseudocount probabilities to the PWM
# Returns
List of `concise.utils.pwm.PWM` instances.
"""
l = load_motif_db(ATTRACT_PWM)
l = {k.split()[0]: v for k, v in l.items()}
pwm_list = [PWM(l[str(m)] + pseudocountProb, name=m) for m in pwm_id_list]
return pwm_list
| mit |
kaz-Anova/ensemble_amazon | XGBoostClassifier.py | 1 | 8099 | # -*- coding: utf-8 -*-
"""
Created on oct 20 23:15:24 2015
@author: marios
Script that makes Xgboost scikit-like.
The initial version of the script came from Guido Tapia (or such is his kaggle name!). I have modified it quite a bit though.
the github from where this was retrieved was : https://github.com/gatapia/py_ml_utils
He has done excellent job in making many commonly used algorithms scikit-like
"""
from sklearn.base import BaseEstimator, ClassifierMixin
import sys
from sklearn.cross_validation import StratifiedKFold
import xgboost as xgb
import numpy as np
from scipy.sparse import csr_matrix
class XGBoostClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, silent=True,
use_buffer=True, num_round=10,num_parallel_tree=1, ntree_limit=0,
nthread=None, booster='gbtree',
eta=0.3, gamma=0.01,
max_depth=6, min_child_weight=1, subsample=1,
colsample_bytree=1,
l=0, alpha=0, lambda_bias=0, objective='reg:linear',
eval_metric='logloss', seed=0, num_class=None,
max_delta_step=0,classes_=None ,
colsample_bylevel=1.0 , sketch_eps=0.1 , sketch_ratio=2.0 ,
opt_dense_col=1, size_leaf_vector=0.0, min_split_loss=0.0,
cache_opt=1, default_direction =0 , k_folds=0 ,early_stopping_rounds=200
):
assert booster in ['gbtree', 'gblinear']
assert objective in ['reg:linear', 'reg:logistic',
'binary:logistic', 'binary:logitraw', 'multi:softmax',
'multi:softprob', 'rank:pairwise','count:poisson']
assert eval_metric in [ 'rmse', 'mlogloss', 'logloss', 'error',
'merror', 'auc', 'ndcg', 'map', 'ndcg@n', 'map@n', 'kappa']
if eval_metric=='kappa':
booster='gblinear'
self.silent = silent
self.use_buffer = use_buffer
self.num_round = num_round
self.ntree_limit = ntree_limit
self.nthread = nthread
self.booster = booster
# Parameter for Tree Booster
self.eta=eta
self.gamma=gamma
self.max_depth=max_depth
self.min_child_weight=min_child_weight
self.subsample=subsample
self.colsample_bytree=colsample_bytree
self.colsample_bylevel=colsample_bylevel
self.max_delta_step=max_delta_step
self.num_parallel_tree=num_parallel_tree
self.min_split_loss=min_split_loss
self.size_leaf_vector=size_leaf_vector
self.default_direction=default_direction
self.opt_dense_col=opt_dense_col
self.sketch_eps=sketch_eps
self.sketch_ratio=sketch_ratio
self.k_folds=k_folds
self.k_models=[]
self.early_stopping_rounds=early_stopping_rounds
# Parameter for Linear Booster
self.l=l
self.alpha=alpha
self.lambda_bias=lambda_bias
# Misc
self.objective=objective
self.eval_metric=eval_metric
self.seed=seed
self.num_class = num_class
self.n_classes_ =num_class
self.classes_=classes_
def set_params(self,random_state=1):
self.seed=random_state
def build_matrix(self, X, opt_y=None, weighting=None):
if opt_y==None:
if weighting==None:
return xgb.DMatrix(csr_matrix(X), missing =-999.0)
else :
#scale weight
sumtotal=float(X.shape[0])
sumweights=np.sum(weighting)
for s in range(0,len(weighting)):
weighting[s]*=sumtotal/sumweights
return xgb.DMatrix(csr_matrix(X), missing =-999.0, weight=weighting)
else:
if weighting==None:
return xgb.DMatrix(csr_matrix(X), label=np.array(opt_y), missing =-999.0)
else :
sumtotal=float(X.shape[0])
sumweights=np.sum(weighting)
for s in range(0,len(weighting)):
weighting[s]*=sumtotal/sumweights
return xgb.DMatrix(csr_matrix(X), label=np.array(opt_y), missing =-999.0, weight=weighting)
def fit(self, X, y,sample_weight=None):
self.k_models=[]
X1 = self.build_matrix(X, y,weighting= sample_weight)#sample_weight)
param = {}
param['booster']=self.booster
param['objective'] = self.objective
param['bst:eta'] = self.eta
param['seed']= self.seed
param['bst:max_depth'] = self.max_depth
if self.eval_metric!='kappa':
param['eval_metric'] = self.eval_metric
param['bst:min_child_weight']= self.min_child_weight
param['silent'] = 1
param['nthread'] = self.nthread
param['bst:subsample'] = self.subsample
param['gamma'] = self.gamma
param['colsample_bytree']= self.colsample_bytree
param['num_parallel_tree']= self.num_parallel_tree
param['colsample_bylevel']= self.colsample_bylevel
#param['min_split_loss']=self.min_split_loss
param['default_direction']=self.default_direction
param['opt_dense_col']=self.opt_dense_col
param['sketch_eps']=self.sketch_eps
param['sketch_ratio']=self.sketch_ratio
param['size_leaf_vector']=self.size_leaf_vector
if self.num_class is not None:
param['num_class']= self.num_class
if self.k_folds <2:
self.bst = xgb.train(param.items(), X1, self.num_round)
else :
number_of_folds=self.k_folds
kfolder2=StratifiedKFold(y, n_folds=number_of_folds,shuffle=True, random_state=self.seed)
## we split 64-16 5 times to make certain all the data has been use in modelling at least once
for train_indexnew, test_indexnew in kfolder2:
if sample_weight==None:
dtrain = xgb.DMatrix(X[train_indexnew], label=y[train_indexnew])
dtvalid = xgb.DMatrix(X[test_indexnew], label=y[test_indexnew])
else :
dtrain = xgb.DMatrix(X[train_indexnew], label=y[train_indexnew], weight=sample_weight[train_indexnew])
dtvalid = xgb.DMatrix(X[test_indexnew], label=y[test_indexnew], weight=sample_weight[test_indexnew])
watchlist = [(dtrain, 'train'), (dtvalid, 'valid')]
gbdt = xgb.train(param.items(), dtrain, self.num_round, watchlist, verbose_eval=False, early_stopping_rounds=self.early_stopping_rounds)#, verbose_eval=250) #, early_stopping_rounds=250, verbose_eval=250)
#predsnew = gbdt.predict(dtest, ntree_limit=gbdt.best_iteration)
self.k_models.append(gbdt)
return self
def predict(self, X):
if self.k_models!=None and len(self.k_models)<2:
X1 = self.build_matrix(X)
return self.bst.predict(X1)
else :
dtest = xgb.DMatrix(X)
preds= [0.0 for k in X.shape[0]]
for gbdt in self.k_models:
predsnew = gbdt.predict(dtest, ntree_limit=(gbdt.best_iteration+1)*self.num_parallel_tree)
for g in range (0, predsnew.shape[0]):
preds[g]+=predsnew[g]
for g in range (0, len(preds)):
preds[g]/=float(len(self.k_models))
def predict_proba(self, X):
try:
rows=(X.shape[0])
except:
rows=len(X)
X1 = self.build_matrix(X)
if self.k_models!=None and len(self.k_models)<2:
predictions = self.bst.predict(X1)
else :
dtest = xgb.DMatrix(X)
predictions= None
for gbdt in self.k_models:
predsnew = gbdt.predict(dtest, ntree_limit=(gbdt.best_iteration+1)*self.num_parallel_tree)
if predictions==None:
predictions=predsnew
else:
for g in range (0, predsnew.shape[0]):
predictions[g]+=predsnew[g]
for g in range (0, len(predictions)):
predictions[g]/=float(len(self.k_models))
predictions=np.array(predictions)
if self.objective == 'multi:softprob': return predictions.reshape( rows, self.num_class)
return np.vstack([1 - predictions, predictions]).T
| apache-2.0 |
kastnerkyle/dagbldr | examples/sine_rnn/sine_rnn.py | 2 | 3646 | #!/usr/bin/env python
import numpy as np
import theano
from theano import tensor
from dagbldr.nodes import linear
from dagbldr.nodes import simple_fork
from dagbldr.nodes import simple
from dagbldr import get_params
from dagbldr.utils import create_checkpoint_dict
from dagbldr.optimizers import sgd
from dagbldr.training import TrainingLoop
from dagbldr.datasets import minibatch_iterator
def make_sines(n_timesteps, n_offsets, harmonic=False, square=False):
# Generate sinewaves offset in phase
n_full = n_timesteps
d1 = 3 * np.arange(n_full) / (2 * np.pi)
d2 = 3 * np.arange(n_offsets) / (2 * np.pi)
full_sines = np.sin(np.array([d1] * n_offsets).T + d2).astype("float32")
# Uncomment to add harmonics
if harmonic:
full_sines += np.sin(np.array([1.7 * d1] * n_offsets).T + d2)
full_sines += np.sin(np.array([7.362 * d1] * n_offsets).T + d2)
if square:
full_sines[full_sines <= 0] = 0
full_sines[full_sines > 0] = 1
full_sines = full_sines[:, :, None]
return full_sines
n_timesteps = 50
minibatch_size = 4
full_sines = make_sines(10 * n_timesteps, minibatch_size)
all_sines = full_sines[:n_timesteps]
n_full = 10 * n_timesteps
X = all_sines[:-1]
y = all_sines[1:]
n_in = 1
n_hid = 20
n_out = 1
X_sym = tensor.tensor3()
y_sym = tensor.tensor3()
h0 = tensor.fmatrix()
random_state = np.random.RandomState(1999)
X_fork = simple_fork([X_sym], [n_in], n_hid, name="h1",
random_state=random_state)
def step(in_t, h_tm1):
h_t = simple(in_t, h_tm1, n_hid, name="rec", random_state=random_state)
return h_t
h, _ = theano.scan(step,
sequences=[X_fork],
outputs_info=[h0])
y_pred = linear([h], [n_hid], n_out, name="h2", random_state=random_state)
cost = ((y_sym - y_pred) ** 2).sum()
params = list(get_params().values())
params = params
grads = tensor.grad(cost, params)
learning_rate = 0.001
opt = sgd(params, learning_rate)
updates = opt.updates(params, grads)
fit_function = theano.function([X_sym, y_sym, h0], [cost, h], updates=updates)
cost_function = theano.function([X_sym, y_sym, h0], [cost, h])
predict_function = theano.function([X_sym, h0], [y_pred, h])
train_itr = minibatch_iterator([X, y], minibatch_size, axis=1)
valid_itr = minibatch_iterator([X, y], minibatch_size, axis=1)
h_init = np.zeros((minibatch_size, n_hid)).astype("float32")
def train_loop(itr):
X_mb, y_mb = next(itr)
cost, _ = fit_function(X_mb, y_mb, h_init)
return [cost]
def valid_loop(itr):
X_mb, y_mb = next(itr)
cost, _ = cost_function(X_mb, y_mb, h_init)
return [cost]
checkpoint_dict = create_checkpoint_dict(locals())
TL = TrainingLoop(train_loop, train_itr,
valid_loop, valid_itr,
n_epochs=2000,
checkpoint_every_n_epochs=1000,
checkpoint_dict=checkpoint_dict,
skip_minimums=True)
epoch_results = TL.run()
# Run on self generations
n_seed = n_timesteps // 4
X_grow = X[:n_seed]
for i in range(n_timesteps // 4, n_full):
p, _ = predict_function(X_grow, h_init)
# take last prediction only
X_grow = np.concatenate((X_grow, p[-1][None]))
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
f, axarr1 = plt.subplots(minibatch_size, 3)
for i in range(minibatch_size):
# -1 to have the same dims
axarr1[i, 0].plot(full_sines[:-1, i, 0], color="steelblue")
axarr1[i, 1].plot(X_grow[:, i, 0], color="darkred")
axarr1[i, 2].plot(np.abs(X_grow[:-1, i, 0] - full_sines[:-1, i, 0]),
color="darkgreen")
plt.savefig('out.png')
| bsd-3-clause |
DGrady/pandas | asv_bench/benchmarks/io_bench.py | 5 | 6778 | from .pandas_vb_common import *
from pandas import concat, Timestamp, compat
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import timeit
class frame_to_csv(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(3000, 30))
def time_frame_to_csv(self):
self.df.to_csv('__test__.csv')
class frame_to_csv2(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame({'A': range(50000), })
self.df['B'] = (self.df.A + 1.0)
self.df['C'] = (self.df.A + 2.0)
self.df['D'] = (self.df.A + 3.0)
def time_frame_to_csv2(self):
self.df.to_csv('__test__.csv')
class frame_to_csv_date_formatting(object):
goal_time = 0.2
def setup(self):
self.rng = date_range('1/1/2000', periods=1000)
self.data = DataFrame(self.rng, index=self.rng)
def time_frame_to_csv_date_formatting(self):
self.data.to_csv('__test__.csv', date_format='%Y%m%d')
class frame_to_csv_mixed(object):
goal_time = 0.2
def setup(self):
self.df_float = DataFrame(np.random.randn(5000, 5), dtype='float64', columns=self.create_cols('float'))
self.df_int = DataFrame(np.random.randn(5000, 5), dtype='int64', columns=self.create_cols('int'))
self.df_bool = DataFrame(True, index=self.df_float.index, columns=self.create_cols('bool'))
self.df_object = DataFrame('foo', index=self.df_float.index, columns=self.create_cols('object'))
self.df_dt = DataFrame(Timestamp('20010101'), index=self.df_float.index, columns=self.create_cols('date'))
self.df_float.ix[30:500, 1:3] = np.nan
self.df = concat([self.df_float, self.df_int, self.df_bool, self.df_object, self.df_dt], axis=1)
def time_frame_to_csv_mixed(self):
self.df.to_csv('__test__.csv')
def create_cols(self, name):
return [('%s%03d' % (name, i)) for i in range(5)]
class read_csv_infer_datetime_format_custom(object):
goal_time = 0.2
def setup(self):
self.rng = date_range('1/1/2000', periods=1000)
self.data = '\n'.join(self.rng.map((lambda x: x.strftime('%m/%d/%Y %H:%M:%S.%f'))))
def time_read_csv_infer_datetime_format_custom(self):
read_csv(StringIO(self.data), header=None, names=['foo'], parse_dates=['foo'], infer_datetime_format=True)
class read_csv_infer_datetime_format_iso8601(object):
goal_time = 0.2
def setup(self):
self.rng = date_range('1/1/2000', periods=1000)
self.data = '\n'.join(self.rng.map((lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))))
def time_read_csv_infer_datetime_format_iso8601(self):
read_csv(StringIO(self.data), header=None, names=['foo'], parse_dates=['foo'], infer_datetime_format=True)
class read_csv_infer_datetime_format_ymd(object):
goal_time = 0.2
def setup(self):
self.rng = date_range('1/1/2000', periods=1000)
self.data = '\n'.join(self.rng.map((lambda x: x.strftime('%Y%m%d'))))
def time_read_csv_infer_datetime_format_ymd(self):
read_csv(StringIO(self.data), header=None, names=['foo'], parse_dates=['foo'], infer_datetime_format=True)
class read_csv_skiprows(object):
goal_time = 0.2
def setup(self):
self.index = tm.makeStringIndex(20000)
self.df = DataFrame({'float1': randn(20000), 'float2': randn(20000), 'string1': (['foo'] * 20000), 'bool1': ([True] * 20000), 'int1': np.random.randint(0, 200000, size=20000), }, index=self.index)
self.df.to_csv('__test__.csv')
def time_read_csv_skiprows(self):
read_csv('__test__.csv', skiprows=10000)
class read_csv_standard(object):
goal_time = 0.2
def setup(self):
self.index = tm.makeStringIndex(10000)
self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index)
self.df.to_csv('__test__.csv')
def time_read_csv_standard(self):
read_csv('__test__.csv')
class read_parse_dates_iso8601(object):
goal_time = 0.2
def setup(self):
self.rng = date_range('1/1/2000', periods=1000)
self.data = '\n'.join(self.rng.map((lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))))
def time_read_parse_dates_iso8601(self):
read_csv(StringIO(self.data), header=None, names=['foo'], parse_dates=['foo'])
class read_uint64_integers(object):
goal_time = 0.2
def setup(self):
self.na_values = [2**63 + 500]
self.arr1 = np.arange(10000).astype('uint64') + 2**63
self.data1 = '\n'.join(map(lambda x: str(x), self.arr1))
self.arr2 = self.arr1.copy().astype(object)
self.arr2[500] = -1
self.data2 = '\n'.join(map(lambda x: str(x), self.arr2))
def time_read_uint64(self):
read_csv(StringIO(self.data1), header=None)
def time_read_uint64_neg_values(self):
read_csv(StringIO(self.data2), header=None)
def time_read_uint64_na_values(self):
read_csv(StringIO(self.data1), header=None, na_values=self.na_values)
class write_csv_standard(object):
goal_time = 0.2
def setup(self):
self.index = tm.makeStringIndex(10000)
self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index)
def time_write_csv_standard(self):
self.df.to_csv('__test__.csv')
class read_csv_from_s3(object):
# Make sure that we can read part of a file from S3 without
# needing to download the entire thing. Use the timeit.default_timer
# to measure wall time instead of CPU time -- we want to see
# how long it takes to download the data.
timer = timeit.default_timer
params = ([None, "gzip", "bz2"], ["python", "c"])
param_names = ["compression", "engine"]
def setup(self, compression, engine):
if compression == "bz2" and engine == "c" and compat.PY2:
# The Python 2 C parser can't read bz2 from open files.
raise NotImplementedError
try:
import s3fs
except ImportError:
# Skip these benchmarks if `boto` is not installed.
raise NotImplementedError
self.big_fname = "s3://pandas-test/large_random.csv"
def time_read_nrows(self, compression, engine):
# Read a small number of rows from a huge (100,000 x 50) table.
ext = ""
if compression == "gzip":
ext = ".gz"
elif compression == "bz2":
ext = ".bz2"
pd.read_csv(self.big_fname + ext, nrows=10,
compression=compression, engine=engine)
| bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/viz/circle.py | 13 | 15446 | """Functions to plot on circle as for connectivity
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
#
# License: Simplified BSD
from itertools import cycle
from functools import partial
import numpy as np
from ..externals.six import string_types
from ..fixes import tril_indices, normalize_colors
def circular_layout(node_names, node_order, start_pos=90, start_between=True,
group_boundaries=None, group_sep=10):
"""Create layout arranging nodes on a circle.
Parameters
----------
node_names : list of str
Node names.
node_order : list of str
List with node names defining the order in which the nodes are
arranged. Must have the elements as node_names but the order can be
different. The nodes are arranged clockwise starting at "start_pos"
degrees.
start_pos : float
Angle in degrees that defines where the first node is plotted.
start_between : bool
If True, the layout starts with the position between the nodes. This is
the same as adding "180. / len(node_names)" to start_pos.
group_boundaries : None | array-like
List of of boundaries between groups at which point a "group_sep" will
be inserted. E.g. "[0, len(node_names) / 2]" will create two groups.
group_sep : float
Group separation angle in degrees. See "group_boundaries".
Returns
-------
node_angles : array, shape=(len(node_names,))
Node angles in degrees.
"""
n_nodes = len(node_names)
if len(node_order) != n_nodes:
raise ValueError('node_order has to be the same length as node_names')
if group_boundaries is not None:
boundaries = np.array(group_boundaries, dtype=np.int)
if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):
raise ValueError('"group_boundaries" has to be between 0 and '
'n_nodes - 1.')
if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):
raise ValueError('"group_boundaries" must have non-decreasing '
'values.')
n_group_sep = len(group_boundaries)
else:
n_group_sep = 0
boundaries = None
# convert it to a list with indices
node_order = [node_order.index(name) for name in node_names]
node_order = np.array(node_order)
if len(np.unique(node_order)) != n_nodes:
raise ValueError('node_order has repeated entries')
node_sep = (360. - n_group_sep * group_sep) / n_nodes
if start_between:
start_pos += node_sep / 2
if boundaries is not None and boundaries[0] == 0:
# special case when a group separator is at the start
start_pos += group_sep / 2
boundaries = boundaries[1:] if n_group_sep > 1 else None
node_angles = np.ones(n_nodes, dtype=np.float) * node_sep
node_angles[0] = start_pos
if boundaries is not None:
node_angles[boundaries] += group_sep
node_angles = np.cumsum(node_angles)[node_order]
return node_angles
def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,
n_nodes=0, node_angles=None,
ylim=[9, 10]):
"""Isolates connections around a single node when user left clicks a node.
On right click, resets all connections."""
if event.inaxes != axes:
return
if event.button == 1: # left click
# click must be near node radius
if not ylim[0] <= event.ydata <= ylim[1]:
return
# all angles in range [0, 2*pi]
node_angles = node_angles % (np.pi * 2)
node = np.argmin(np.abs(event.xdata - node_angles))
patches = event.inaxes.patches
for ii, (x, y) in enumerate(zip(indices[0], indices[1])):
patches[ii].set_visible(node in [x, y])
fig.canvas.draw()
elif event.button == 3: # right click
patches = event.inaxes.patches
for ii in range(np.size(indices, axis=1)):
patches[ii].set_visible(True)
fig.canvas.draw()
def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
node_angles=None, node_width=None,
node_colors=None, facecolor='black',
textcolor='white', node_edgecolor='black',
linewidth=1.5, colormap='hot', vmin=None,
vmax=None, colorbar=True, title=None,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
fontsize_title=12, fontsize_names=8,
fontsize_colorbar=8, padding=6.,
fig=None, subplot=111, interactive=True,
node_linewidth=2., show=True):
"""Visualize connectivity as a circular graph.
Note: This code is based on the circle graph example by Nicolas P. Rougier
http://www.labri.fr/perso/nrougier/coding/.
Parameters
----------
con : array
Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
array is provided, "indices" has to be used to define the connection
indices.
node_names : list of str
Node names. The order corresponds to the order in con.
indices : tuple of arrays | None
Two arrays with indices of connections for which the connections
strenghts are defined in con. Only needed if con is a 1D array.
n_lines : int | None
If not None, only the n_lines strongest connections (strength=abs(con))
are drawn.
node_angles : array, shape=(len(node_names,)) | None
Array with node positions in degrees. If None, the nodes are equally
spaced on the circle. See mne.viz.circular_layout.
node_width : float | None
Width of each node in degrees. If None, the minimum angle between any
two nodes is used as the width.
node_colors : list of tuples | list of str
List with the color to use for each node. If fewer colors than nodes
are provided, the colors will be repeated. Any color supported by
matplotlib can be used, e.g., RGBA tuples, named colors.
facecolor : str
Color to use for background. See matplotlib.colors.
textcolor : str
Color to use for text. See matplotlib.colors.
node_edgecolor : str
Color to use for lines around nodes. See matplotlib.colors.
linewidth : float
Line width to use for connections.
colormap : str
Colormap to use for coloring the connections.
vmin : float | None
Minimum value for colormap. If None, it is determined automatically.
vmax : float | None
Maximum value for colormap. If None, it is determined automatically.
colorbar : bool
Display a colorbar or not.
title : str
The figure title.
colorbar_size : float
Size of the colorbar.
colorbar_pos : 2-tuple
Position of the colorbar.
fontsize_title : int
Font size to use for title.
fontsize_names : int
Font size to use for node names.
fontsize_colorbar : int
Font size to use for colorbar.
padding : float
Space to add around figure to accommodate long labels.
fig : None | instance of matplotlib.pyplot.Figure
The figure to use. If None, a new figure with the specified background
color will be created.
subplot : int | 3-tuple
Location of the subplot when creating figures with multiple plots. E.g.
121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See
matplotlib.pyplot.subplot.
interactive : bool
When enabled, left-click on a node to show only connections to that
node. Right-click shows all connections.
node_linewidth : float
Line with for nodes.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.pyplot.Figure
The figure handle.
axes : instance of matplotlib.axes.PolarAxesSubplot
The subplot handle.
"""
import matplotlib.pyplot as plt
import matplotlib.path as m_path
import matplotlib.patches as m_patches
n_nodes = len(node_names)
if node_angles is not None:
if len(node_angles) != n_nodes:
raise ValueError('node_angles has to be the same length '
'as node_names')
# convert it to radians
node_angles = node_angles * np.pi / 180
else:
# uniform layout on unit circle
node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
if node_width is None:
# widths correspond to the minimum angle between two nodes
dist_mat = node_angles[None, :] - node_angles[:, None]
dist_mat[np.diag_indices(n_nodes)] = 1e9
node_width = np.min(np.abs(dist_mat))
else:
node_width = node_width * np.pi / 180
if node_colors is not None:
if len(node_colors) < n_nodes:
node_colors = cycle(node_colors)
else:
# assign colors using colormap
node_colors = [plt.cm.spectral(i / float(n_nodes))
for i in range(n_nodes)]
# handle 1D and 2D connectivity information
if con.ndim == 1:
if indices is None:
raise ValueError('indices has to be provided if con.ndim == 1')
elif con.ndim == 2:
if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
raise ValueError('con has to be 1D or a square matrix')
# we use the lower-triangular part
indices = tril_indices(n_nodes, -1)
con = con[indices]
else:
raise ValueError('con has to be 1D or a square matrix')
# get the colormap
if isinstance(colormap, string_types):
colormap = plt.get_cmap(colormap)
# Make figure background the same colors as axes
if fig is None:
fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
# Use a polar axes
if not isinstance(subplot, tuple):
subplot = (subplot,)
axes = plt.subplot(*subplot, polar=True, axisbg=facecolor)
# No ticks, we'll put our own
plt.xticks([])
plt.yticks([])
# Set y axes limit, add additonal space if requested
plt.ylim(0, 10 + padding)
# Remove the black axes border which may obscure the labels
axes.spines['polar'].set_visible(False)
# Draw lines between connected nodes, only draw the strongest connections
if n_lines is not None and len(con) > n_lines:
con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
else:
con_thresh = 0.
# get the connections which we are drawing and sort by connection strength
# this will allow us to draw the strongest connections first
con_abs = np.abs(con)
con_draw_idx = np.where(con_abs >= con_thresh)[0]
con = con[con_draw_idx]
con_abs = con_abs[con_draw_idx]
indices = [ind[con_draw_idx] for ind in indices]
# now sort them
sort_idx = np.argsort(con_abs)
con_abs = con_abs[sort_idx]
con = con[sort_idx]
indices = [ind[sort_idx] for ind in indices]
# Get vmin vmax for color scaling
if vmin is None:
vmin = np.min(con[np.abs(con) >= con_thresh])
if vmax is None:
vmax = np.max(con)
vrange = vmax - vmin
# We want to add some "noise" to the start and end position of the
# edges: We modulate the noise with the number of connections of the
# node and the connection strength, such that the strongest connections
# are closer to the node center
nodes_n_con = np.zeros((n_nodes), dtype=np.int)
for i, j in zip(indices[0], indices[1]):
nodes_n_con[i] += 1
nodes_n_con[j] += 1
# initalize random number generator so plot is reproducible
rng = np.random.mtrand.RandomState(seed=0)
n_con = len(indices[0])
noise_max = 0.25 * node_width
start_noise = rng.uniform(-noise_max, noise_max, n_con)
end_noise = rng.uniform(-noise_max, noise_max, n_con)
nodes_n_con_seen = np.zeros_like(nodes_n_con)
for i, (start, end) in enumerate(zip(indices[0], indices[1])):
nodes_n_con_seen[start] += 1
nodes_n_con_seen[end] += 1
start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
float(nodes_n_con[start]))
end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
float(nodes_n_con[end]))
# scale connectivity for colormap (vmin<=>0, vmax<=>1)
con_val_scaled = (con - vmin) / vrange
# Finally, we draw the connections
for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
# Start point
t0, r0 = node_angles[i], 10
# End point
t1, r1 = node_angles[j], 10
# Some noise in start and end point
t0 += start_noise[pos]
t1 += end_noise[pos]
verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
m_path.Path.LINETO]
path = m_path.Path(verts, codes)
color = colormap(con_val_scaled[pos])
# Actual line
patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
linewidth=linewidth, alpha=1.)
axes.add_patch(patch)
# Draw ring with colored nodes
height = np.ones(n_nodes) * 1.0
bars = axes.bar(node_angles, height, width=node_width, bottom=9,
edgecolor=node_edgecolor, lw=node_linewidth,
facecolor='.9', align='center')
for bar, color in zip(bars, node_colors):
bar.set_facecolor(color)
# Draw node labels
angles_deg = 180 * node_angles / np.pi
for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
if angle_deg >= 270:
ha = 'left'
else:
# Flip the label, so text is always upright
angle_deg += 180
ha = 'right'
axes.text(angle_rad, 10.4, name, size=fontsize_names,
rotation=angle_deg, rotation_mode='anchor',
horizontalalignment=ha, verticalalignment='center',
color=textcolor)
if title is not None:
plt.title(title, color=textcolor, fontsize=fontsize_title,
axes=axes)
if colorbar:
norm = normalize_colors(vmin=vmin, vmax=vmax)
sm = plt.cm.ScalarMappable(cmap=colormap, norm=norm)
sm.set_array(np.linspace(vmin, vmax))
cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
shrink=colorbar_size,
anchor=colorbar_pos)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
cb.ax.tick_params(labelsize=fontsize_colorbar)
plt.setp(cb_yticks, color=textcolor)
# Add callback for interaction
if interactive:
callback = partial(_plot_connectivity_circle_onpick, fig=fig,
axes=axes, indices=indices, n_nodes=n_nodes,
node_angles=node_angles)
fig.canvas.mpl_connect('button_press_event', callback)
if show:
plt.show()
return fig, axes
| bsd-3-clause |
AlexCatarino/Lean | PythonToolbox/setup.py | 4 | 1322 | # -*- coding: utf-8 -*-
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
# https://github.com/QuantConnect/Lean/blob/master/LICENSE
with open('../LICENSE') as f:
license = f.read()
with open('README.rst') as f:
readme = f.read()
setup(
name='quantconnect',
version='0.2',
description = 'QuantConnect API',
long_description=readme,
author = 'QuantConnect Python Team',
author_email = '[email protected]',
url='https://www.quantconnect.com/',
license=license,
packages = find_packages(exclude=('tests', 'docs')),
install_requires=['matplotlib', 'pandas', 'requests']
) | apache-2.0 |
USDA-ARS-NWRC/AWSF | awsm/framework/framework.py | 1 | 31367 | import copy
import logging
import os
import sys
from datetime import datetime
import coloredlogs
import pandas as pd
import pytz
from inicheck.config import MasterConfig, UserConfig
from inicheck.output import print_config_report, generate_config
from inicheck.tools import get_user_config, check_config, cast_all_variables
from smrf.utils import utils
import smrf
from spatialnc.topo import topo as mytopo
import smrf.framework.logger as logger
from awsm.convertFiles import convertFiles as cvf
from awsm.data.init_model import modelInit
from awsm.framework import ascii_art
from awsm.interface import interface as smin, smrf_ipysnobal as smrf_ipy, \
ingest_data
from awsm.utils import utilities as awsm_utils
class AWSM():
"""
Args:
configFile (str): path to configuration file.
Returns:
AWSM class instance.
Attributes:
"""
def __init__(self, config):
"""
Initialize the model, read config file, start and end date, and logging
Args:
config: string path to the config file or inicheck UserConfig instance
"""
# read the config file and store
awsm_mcfg = MasterConfig(modules='awsm')
smrf_mcfg = MasterConfig(modules='smrf')
if isinstance(config, str):
if not os.path.isfile(config):
raise Exception('Configuration file does not exist --> {}'
.format(config))
configFile = config
try:
combined_mcfg = MasterConfig(modules=['smrf', 'awsm'])
# Read in the original users config
self.ucfg = get_user_config(configFile, mcfg=combined_mcfg)
self.configFile = configFile
except UnicodeDecodeError as e:
print(e)
raise Exception(('The configuration file is not encoded in '
'UTF-8, please change and retry'))
elif isinstance(config, UserConfig):
self.ucfg = config
configFile = ''
else:
raise Exception(
'Config passed to AWSM is neither file name nor UserConfig instance')
# get the git version
self.gitVersion = awsm_utils.getgitinfo()
# create blank log and error log because logger is not initialized yet
self.tmp_log = []
self.tmp_err = []
self.tmp_warn = []
# Check the user config file for errors and report issues if any
self.tmp_log.append("Checking config file for issues...")
warnings, errors = check_config(self.ucfg)
print_config_report(warnings, errors)
self.config = self.ucfg.cfg
# Exit AWSM if config file has errors
if len(errors) > 0:
print("Errors in the config file. "
"See configuration status report above.")
# sys.exit()
# ################## Decide which modules to run #####################
self.do_smrf = self.config['awsm master']['run_smrf']
#self.do_isnobal = self.config['awsm master']['run_isnobal']
self.model_type = self.config['awsm master']['model_type']
# self.do_smrf_ipysnobal = \
# self.config['awsm master']['run_smrf_ipysnobal']
# self.do_ipysnobal = self.config['awsm master']['run_ipysnobal']
self.do_forecast = False
if 'gridded' in self.config and self.do_smrf:
self.do_forecast = self.config['gridded']['hrrr_forecast_flag']
# WARNING: The value here is inferred in SMRF.data.loadGrid. A
# change here requires a change there
self.n_forecast_hours = 18
# Options for converting files
self.do_make_in = self.config['awsm master']['make_in']
self.do_make_nc = self.config['awsm master']['make_nc']
# do report?
# self.do_report = self.config['awsm master']['do_report']
self.snowav_config = self.config['awsm master']['snowav_config']
# options for masking isnobal
self.mask_isnobal = self.config['awsm master']['mask_isnobal']
# prompt for making directories
self.prompt_dirs = self.config['awsm master']['prompt_dirs']
# store smrf version if running smrf
self.smrf_version = smrf.__version__
# ################ Time information ##################
self.start_date = pd.to_datetime(self.config['time']['start_date'])
self.end_date = pd.to_datetime(self.config['time']['end_date'])
self.time_step = self.config['time']['time_step']
self.tmz = self.config['time']['time_zone']
self.tzinfo = pytz.timezone(self.config['time']['time_zone'])
# date to use for finding wy
tmp_date = self.start_date.replace(tzinfo=self.tzinfo)
tmp_end_date = self.end_date.replace(tzinfo=self.tzinfo)
# find water year hour of start and end date
self.start_wyhr = int(utils.water_day(tmp_date)[0]*24)
self.end_wyhr = int(utils.water_day(tmp_end_date)[0]*24)
# find start of water year
tmpwy = utils.water_day(tmp_date)[1] - 1
self.wy_start = pd.to_datetime('{:d}-10-01'.format(tmpwy))
# ################ Store some paths from config file ##################
# path to the base drive (i.e. /data/blizzard)
if self.config['paths']['path_dr'] is not None:
self.path_dr = os.path.abspath(self.config['paths']['path_dr'])
else:
print('No base path to drive given. Exiting now!')
sys.exit()
# name of your basin (i.e. Tuolumne)
self.basin = self.config['paths']['basin']
# water year of run
self.wy = utils.water_day(tmp_date)[1]
# if the run is operational or not
self.isops = self.config['paths']['isops']
# name of project if not an operational run
self.proj = self.config['paths']['proj']
# check for project description
self.desc = self.config['paths']['desc']
# find style for folder date stamp
self.folder_date_style = self.config['paths']['folder_date_style']
# setting to output in seperate daily folders
self.daily_folders = self.config['awsm system']['daily_folders']
if self.daily_folders and not self.run_smrf_ipysnobal:
raise ValueError('Cannot run daily_folders with anything other'
' than run_smrf_ipysnobal')
if self.do_forecast:
self.tmp_log.append('Forecasting set to True')
# self.fp_forecastdata = self.config['gridded']['wrf_file']
# if self.fp_forecastdata is None:
# self.tmp_err.append('Forecast set to true, '
# 'but no grid file given')
# print("Errors in the config file. See configuration "
# "status report above.")
# print(self.tmp_err)
# sys.exit()
if self.config['system']['threading']:
# Can't run threaded smrf if running forecast_data
self.tmp_err.append('Cannot run SMRF threaded with'
' gridded input data')
print(self.tmp_err)
sys.exit()
# Time step mass thresholds for iSnobal
self.mass_thresh = []
self.mass_thresh.append(self.config['grid']['thresh_normal'])
self.mass_thresh.append(self.config['grid']['thresh_medium'])
self.mass_thresh.append(self.config['grid']['thresh_small'])
# threads for running iSnobal
self.ithreads = self.config['awsm system']['ithreads']
# how often to output form iSnobal
self.output_freq = self.config['awsm system']['output_frequency']
# number of timesteps to run if ou don't want to run the whole thing
self.run_for_nsteps = self.config['awsm system']['run_for_nsteps']
# pysnobal output variables
self.pysnobal_output_vars = self.config['awsm system']['variables']
self.pysnobal_output_vars = [wrd.lower()
for wrd in self.pysnobal_output_vars]
# snow and emname
self.snow_name = self.config['awsm system']['snow_name']
self.em_name = self.config['awsm system']['em_name']
# options for restarting iSnobal
self.restart_crash = False
if self.config['isnobal restart']['restart_crash']:
self.restart_crash = True
# self.new_init = self.config['isnobal restart']['new_init']
self.depth_thresh = self.config['isnobal restart']['depth_thresh']
self.restart_hr = \
int(self.config['isnobal restart']['wyh_restart_output'])
self.restart_folder = self.config['isnobal restart']['output_folders']
# iSnobal active layer
self.active_layer = self.config['grid']['active_layer']
# if we are going to run ipysnobal with smrf
if self.model_type in ['ipysnobal', 'smrf_ipysnobal']:
self.ipy_threads = self.ithreads
self.ipy_init_type = \
self.config['files']['init_type']
self.forcing_data_type = \
self.config['ipysnobal']['forcing_data_type']
# parameters needed for restart procedure
self.restart_run = False
if self.config['isnobal restart']['restart_crash']:
self.restart_run = True
# find restart hour datetime
reset_offset = pd.to_timedelta(self.restart_hr, unit='h')
# set a new start date for this run
self.restart_date = self.wy_start + reset_offset
self.tmp_log.append('Restart date is {}'.format(self.start_date))
# read in update depth parameters
self.update_depth = False
if 'update depth' in self.config:
self.update_depth = self.config['update depth']['update']
if self.update_depth:
self.update_file = self.config['update depth']['update_file']
self.update_buffer = self.config['update depth']['buffer']
self.flight_numbers = self.config['update depth']['flight_numbers']
# if flights to use is not list, make it a list
if self.flight_numbers is not None:
if not isinstance(self.flight_numbers, list):
self.flight_numbers = [self.flight_numbers]
# list of sections releated to AWSM
# These will be removed for smrf config
self.sec_awsm = awsm_mcfg.cfg.keys()
self.sec_smrf = smrf_mcfg.cfg.keys()
# Make rigid directory structure
self.mk_directories()
# ################ Topo data for iSnobal ##################
# get topo stats
self.csys = self.config['grid']['csys'].upper()
self.nbits = int(self.config['grid']['nbits'])
self.soil_temp = self.config['soil_temp']['temp']
# get topo class
self.topo = mytopo(self.config['topo'], self.mask_isnobal,
self.model_type, self.csys, self.pathdd)
# ################ Generate config backup ##################
# if self.config['output']['input_backup']:
# set location for backup and output backup of awsm sections
config_backup_location = \
os.path.join(self.pathdd, 'awsm_config_backup.ini')
generate_config(self.ucfg, config_backup_location)
# create log now that directory structure is done
self.createLog()
# if we have a model, initialize it
if self.model_type is not None:
self.myinit = modelInit(self._logger, self.config, self.topo,
self.start_wyhr, self.pathro, self.pathrr,
self.pathinit, self.wy_start)
def createLog(self):
'''
Now that the directory structure is done, create log file and print out
saved logging statements.
'''
level_styles = {'info': {'color': 'white'},
'notice': {'color': 'magenta'},
'verbose': {'color': 'blue'},
'success': {'color': 'green', 'bold': True},
'spam': {'color': 'green', 'faint': True},
'critical': {'color': 'red', 'bold': True},
'error': {'color': 'red'},
'debug': {'color': 'green'},
'warning': {'color': 'yellow'}}
field_styles = {'hostname': {'color': 'magenta'},
'programname': {'color': 'cyan'},
'name': {'color': 'white'},
'levelname': {'color': 'white', 'bold': True},
'asctime': {'color': 'green'}}
# start logging
loglevel = self.config['awsm system']['log_level'].upper()
numeric_level = getattr(logging, loglevel, None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
# setup the logging
logfile = None
if self.config['awsm system']['log_to_file']:
if self.config['isnobal restart']['restart_crash']:
logfile = \
os.path.join(self.pathll,
'log_restart_{}.out'.format(self.restart_hr))
elif self.do_forecast:
logfile = \
os.path.join(self.pathll,
'log_forecast_'
'{}.out'.format(self.folder_date_stamp))
else:
logfile = \
os.path.join(self.pathll,
'log_{}.out'.format(self.folder_date_stamp))
# let user know
print('Logging to file: {}'.format(logfile))
self.config['awsm system']['log_file'] = logfile
logger.SMRFLogger(self.config['awsm system'])
self._logger = logging.getLogger(__name__)
self._logger.info(ascii_art.MOUNTAIN)
self._logger.info(ascii_art.TITLE)
# dump saved logs
if len(self.tmp_log) > 0:
for l in self.tmp_log:
self._logger.info(l)
if len(self.tmp_warn) > 0:
for l in self.tmp_warn:
self._logger.warning(l)
if len(self.tmp_err) > 0:
for l in self.tmp_err:
self._logger.error(l)
def runSmrf(self):
"""
Run smrf. Calls :mod: `awsm.interface.interface.smrfMEAS`
"""
# modify config and run smrf
smin.smrfMEAS(self)
def nc2ipw(self, runtype):
"""
Convert ipw smrf output to isnobal inputs
"""
cvf.nc2ipw_mea(self, runtype)
def ipw2nc(self, runtype):
"""
Convert ipw output to netcdf files. Calls
:mod: `awsm.convertFiles.convertFiles.ipw2nc_mea`
"""
cvf.ipw2nc_mea(self, runtype)
def run_isnobal(self, offset=None):
"""
Run isnobal. Calls :mod: `awsm.interface.interface.run_isnobal`
"""
smin.run_isnobal(self, offset=offset)
def run_isnobal_update(self):
"""
Run iSnobal with update procedure
"""
# initialize updater
updater = ingest_data.StateUpdater(self)
# run iSnobal with updates
updater.run_update_procedure_isnobal(self)
def run_smrf_ipysnobal(self):
"""
Run smrf and pass inputs to ipysnobal in memory.
Calls :mod: `awsm.interface.smrf_ipysnobal.run_smrf_ipysnobal`
"""
smrf_ipy.run_smrf_ipysnobal(self)
def run_awsm_daily(self):
"""
This function runs :mod: `awsm.interface.smrf_ipysnobal.run_smrf_ipysnobal`
on an hourly output from Pysnobal, outputting to daily folders, similar
to the HRRR froecast.
"""
smin.run_awsm_daily(self)
def run_ipysnobal(self):
"""
Run PySnobal from previously run smrf forcing data
Calls :mod: `awsm.interface.smrf_ipysnobal.run_ipysnobal`
"""
smrf_ipy.run_ipysnobal(self)
def mk_directories(self):
"""
Create all needed directories starting from the working drive
"""
# rigid directory work
self.tmp_log.append('AWSM creating directories')
# string to append to folders indicatiing run start and end
if self.folder_date_style == 'wyhr':
self.folder_date_stamp = '{:04d}_{:04d}'.format(self.start_wyhr,
self.end_wyhr)
elif self.folder_date_style == 'day':
self.folder_date_stamp = \
'{}'.format(self.start_date.strftime("%Y%m%d"))
elif self.folder_date_style == 'start_end':
self.folder_date_stamp = \
'{}_{}'.format(self.start_date.strftime("%Y%m%d"),
self.end_date.strftime("%Y%m%d"))
# make basin path
self.path_ba = os.path.join(self.path_dr, self.basin)
# check if ops or dev
if self.isops:
opsdev = 'ops'
else:
opsdev = 'devel'
# assign paths accordinly
self.path_od = os.path.join(self.path_ba, opsdev)
self.path_wy = os.path.join(self.path_od, 'wy{}'.format(self.wy))
self.path_wy = os.path.join(self.path_wy, self.proj)
# specific data folder conatining
self.pathd = os.path.join(self.path_wy, 'data')
self.pathr = os.path.join(self.path_wy, 'runs')
# log folders
self.pathlog = os.path.join(self.path_wy, 'logs')
self.pathll = os.path.join(self.pathlog,
'log{}'.format(self.folder_date_stamp))
# name of temporary smrf file to write out
self.smrfini = os.path.join(self.path_wy, 'tmp_smrf_config.ini')
self.forecastini = os.path.join(self.path_wy,
'tmp_smrf_forecast_config.ini')
# if not self.do_forecast:
# assign path names for isnobal, path_names_att will be used
# to create necessary directories
path_names_att = ['pathdd', 'pathrr', 'pathi',
'pathinit', 'pathro', 'paths', 'path_ppt']
self.pathdd = \
os.path.join(self.pathd,
'data{}'.format(self.folder_date_stamp))
self.pathrr = \
os.path.join(self.pathr,
'run{}'.format(self.folder_date_stamp))
self.pathi = os.path.join(self.pathdd, 'input/')
self.pathinit = os.path.join(self.pathdd, 'init/')
self.pathro = os.path.join(self.pathrr, 'output/')
self.paths = os.path.join(self.pathdd, 'smrfOutputs')
self.ppt_desc = \
os.path.join(self.pathdd,
'ppt_desc{}.txt'.format(self.folder_date_stamp))
self.path_ppt = os.path.join(self.pathdd, 'ppt_4b')
# used to check if data direcotry exists
check_if_data = not os.path.exists(self.pathdd)
# else:
# path_names_att = ['pathdd', 'pathrr', 'pathi',
# 'pathinit', 'pathro', 'paths', 'path_ppt']
# self.pathdd = \
# os.path.join(self.pathd,
# 'forecast{}'.format(self.folder_date_stamp))
# self.pathrr = \
# os.path.join(self.pathr,
# 'forecast{}'.format(self.folder_date_stamp))
# self.pathi = os.path.join(self.pathdd, 'input/')
# self.pathinit = os.path.join(self.pathdd, 'init/')
# self.pathro = os.path.join(self.pathrr, 'output/')
# self.paths = os.path.join(self.pathdd, 'smrfOutputs')
# self.ppt_desc = \
# os.path.join(self.pathdd,
# 'ppt_desc{}.txt'.format(self.folder_date_stamp))
# self.path_ppt = os.path.join(self.pathdd, 'ppt_4b')
#
# # used to check if data direcotry exists
# check_if_data = not os.path.exists(self.pathdd)
# add log path to create directory
path_names_att.append('pathll')
# always check paths
check_if_data = True
# Only start if your drive exists
if os.path.exists(self.path_dr):
# If the specific path to your WY does not exist,
# create it and following directories/
# If the working path specified in the config file does not exist
if not os.path.exists(self.path_wy):
y_n = 'a' # set a funny value to y_n
# while it is not y or n (for yes or no)
while y_n not in ['y', 'n']:
if self.prompt_dirs:
y_n = input('Directory %s does not exist. Create base '
'directory and all subdirectories? '
'(y n): ' % self.path_wy)
else:
y_n = 'y'
if y_n == 'n':
self.tmp_err.append('Please fix the base directory'
' (path_wy) in your config file.')
print(self.tmp_err)
sys.exit()
elif y_n == 'y':
self.make_rigid_directories(path_names_att)
# If WY exists, but not this exact run for the dates, create it
elif check_if_data:
y_n = 'a'
while y_n not in ['y', 'n']:
if self.prompt_dirs:
y_n = input('Directory %s does not exist. Create base '
'directory and all subdirectories? '
'(y n): ' % self.pathdd)
else:
y_n = 'y'
if y_n == 'n':
self.tmp_err.append('Please fix the base directory'
' (path_wy) in your config file.')
print(self.tmp_err)
sys.exit()
elif y_n == 'y':
self.make_rigid_directories(path_names_att)
else:
self.tmp_warn.append('Directory structure leading to '
'{} already exists.'.format(self.pathdd))
# make sure runs exists
if not os.path.exists(os.path.join(self.path_wy, 'runs/')):
os.makedirs(os.path.join(self.path_wy, 'runs/'))
# if we're not running forecast, make sure path to outputs exists
if not os.path.exists(self.pathro):
os.makedirs(self.pathro)
# find where to write file
fp_desc = os.path.join(self.path_wy, 'projectDescription.txt')
if not os.path.isfile(fp_desc):
# look for description or prompt for one
if self.desc is not None:
pass
else:
self.desc = input('\nNo description for project. '
'Enter one now, but do not use '
'any punctuation:\n')
f = open(fp_desc, 'w')
f.write(self.desc)
f.close()
else:
self.tmp_log.append('Description file already exists\n')
else:
self.tmp_err.append('Base directory did not exist, '
'not safe to continue. Make sure base '
'directory exists before running.')
print(self.tmp_err)
sys.exit()
def make_rigid_directories(self, path_name):
"""
Creates rigid directory structure from list of relative bases and
extensions from the base
"""
# loop through lists
for idp, pn in enumerate(path_name):
# get attribute of path
path = getattr(self, pn)
if not os.path.exists(path):
os.makedirs(path)
else:
self.tmp_log.append('Directory --{}-- exists, not creating.\n')
def run_report(self):
try:
import snowav
self._logger.info('AWSM finished run, starting report')
snowav.framework.framework.snowav(config_file=self.snowav_config)
except ModuleNotFoundError:
print('Library snowav not installed - skip reporting')
def __enter__(self):
self.start_time = datetime.now()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
Provide some logging info about when AWSM was closed
"""
self._logger.info(
'AWSM finished in: {}'.format(datetime.now() - self.start_time)
)
self._logger.info('AWSM closed --> %s' % datetime.now())
def run_awsm_daily_ops(config_file):
"""
Run each day seperately. Calls run_awsm
"""
# define some formats
fmt_day = '%Y%m%d'
fmt_cfg = '%Y-%m-%d %H:%M'
add_day = pd.to_timedelta(24, unit='h')
# get config instance
config = get_user_config(config_file,
modules=['smrf', 'awsm'])
# copy the config and get total start and end
# config = deepcopy(base_config)
# set naming style
config.raw_cfg['paths']['folder_date_style'] = 'day'
config.apply_recipes()
config = cast_all_variables(config, config.mcfg)
# get the water year
cfg_start_date = pd.to_datetime(config.cfg['time']['start_date'])
tzinfo = pytz.timezone(config.cfg['time']['time_zone'])
wy = utils.water_day(cfg_start_date.replace(tzinfo=tzinfo))[1]
# find the model start depending on restart
if config.cfg['isnobal restart']['restart_crash']:
offset_wyhr = int(config.cfg['isnobal restart']['wyh_restart_output'])
wy_start = pd.to_datetime('{:d}-10-01'.format(wy - 1))
model_start = wy_start + pd.to_timedelta(offset_wyhr, unit='h')
else:
model_start = config.cfg['time']['start_date']
model_end = config.cfg['time']['end_date']
isops = config.cfg['paths']['isops']
if isops:
devops = 'ops'
else:
devops = 'devel'
# find output location for previous output
paths = config.cfg['paths']
prev_out_base = os.path.join(paths['path_dr'],
paths['basin'],
devops,
'wy{}'.format(wy),
paths['proj'],
'runs')
prev_data_base = os.path.join(paths['path_dr'],
paths['basin'],
devops,
'wy{}'.format(wy),
paths['proj'],
'data')
# find day of start and end
start_day = pd.to_datetime(model_start.strftime(fmt_day))
end_day = pd.to_datetime(model_end.strftime(fmt_day))
# find total range of run
ndays = int((end_day-start_day).days) + 1
date_list = [start_day +
pd.to_timedelta(x, unit='D') for x in range(0, ndays)]
# loop through daily runs and run awsm
for idd, sd in enumerate(date_list):
new_config = copy.deepcopy(config)
if idd > 0:
new_config.raw_cfg['isnobal restart']['restart_crash'] = False
new_config.raw_cfg['grid']['thresh_normal'] = 60
new_config.raw_cfg['grid']['thresh_medium'] = 10
new_config.raw_cfg['grid']['thresh_small'] = 1
# get the end of the day
ed = sd + add_day
# make sure we're in the model date range
if sd < model_start:
sd = model_start
if ed > model_end:
ed = model_end
# set the start and end dates
new_config.raw_cfg['time']['start_date'] = sd.strftime(fmt_cfg)
new_config.raw_cfg['time']['end_date'] = ed.strftime(fmt_cfg)
# reset the initialization
if idd > 0:
# find previous output file
prev_day = sd - pd.to_timedelta(1, unit='D')
prev_out = os.path.join(prev_out_base,
'run{}'.format(prev_day.strftime(fmt_day)),
'snow.nc')
# reset if running the model
if new_config.cfg['awsm master']['model_type'] is not None:
new_config.raw_cfg['files']['init_type'] = 'netcdf_out'
new_config.raw_cfg['files']['init_file'] = prev_out
# if we have a previous storm day file, use it
prev_storm = os.path.join(prev_data_base,
'data{}'.format(
prev_day.strftime(fmt_day)),
'smrfOutputs', 'storm_days.nc')
if os.path.isfile(prev_storm):
new_config.raw_cfg['precip']['storm_days_restart'] = prev_storm
# apply recipes with new settings
new_config.apply_recipes()
new_config = cast_all_variables(new_config, new_config.mcfg)
# run awsm for the day
run_awsm(new_config)
def run_awsm(config):
"""
Function that runs awsm how it should be operate for full runs.
Args:
config: string path to the config file or inicheck UserConfig instance
"""
with AWSM(config) as a:
if a.do_forecast:
runtype = 'forecast'
else:
runtype = 'smrf'
if not a.config['isnobal restart']['restart_crash']:
# distribute data by running smrf
if a.do_smrf:
a.runSmrf()
# convert smrf output to ipw for iSnobal
if a.do_make_in:
a.nc2ipw(runtype)
if a.model_type == 'isnobal':
# run iSnobal
if a.update_depth:
a.run_isnobal_update()
else:
a.run_isnobal()
elif a.model_type == 'ipysnobal':
# run iPySnobal
a.run_ipysnobal()
# convert ipw back to netcdf for processing
if a.do_make_nc:
a.ipw2nc(runtype)
# if restart
else:
if a.model_type == 'isnobal':
# restart iSnobal from crash
if a.update_depth:
a.run_isnobal_update()
else:
a.run_isnobal()
# convert ipw back to netcdf for processing
elif a.model_type == 'ipysnobal':
# run iPySnobal
a.run_ipysnobal()
if a.do_make_nc:
a.ipw2nc(runtype)
# Run iPySnobal from SMRF in memory
if a.model_type == 'smrf_ipysnobal':
if a.daily_folders:
a.run_awsm_daily()
else:
a.run_smrf_ipysnobal()
# create report
if a.snowav_config is not None:
a.run_report()
| gpl-3.0 |
hariseldon99/archives | eth_question/scripts/vec_binplot.py | 1 | 1126 | #!/usr/bin/python
"""
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* Rigol Lattice: postprocessing of Petsc Vector data
* Copyright (c) 2013 Analabha Roy ([email protected])
*
* This is free software: you can redistribute it and/or modify it under the
* terms of version 3 of the GNU Lesser General Public License as published by
* the Free Software Foundation.
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
"""
"""
Python program to
read a petsc binary vector using numpy
and plot by index
Usage: vec_binplot.py <PetSc binary vector>
"""
import numpy as np
import matplotlib.pyplot as plt
import sys,os
sys.path.append(os.path.join(os.environ['PETSC_DIR'],'bin','pythonscripts'))
import PetscBinaryIO
if __name__ == '__main__':
petsc = PetscBinaryIO.PetscBinaryIO()
vec = petsc.readBinaryFile(sys.argv[1])
vec = list(vec)[0]
x = range(len(vec))
#np.ndarray.sort(vec)
plt.plot(x,vec)
plt.xlim((0,x[-1]))
plt.show()
# Uncomment for remote systems
# plt.savefig('vec.svg')
# print "Output in svg file"
| gpl-2.0 |
IntelLabs/hpat | examples/series/rolling/series_rolling_mean.py | 1 | 1813 | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
@njit
def series_rolling_mean():
series = pd.Series([4, 3, 5, 2, 6]) # Series of 4, 3, 5, 2, 6
out_series = series.rolling(3).mean()
return out_series # Expect series of NaN, NaN, 4.000000, 3.333333, 4.333333
print(series_rolling_mean())
| bsd-2-clause |
petosegan/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
I2Cvb/data_balancing | pipeline/feature-classification/classification_melanoma_random_forest.py | 1 | 7763 | #title :classiciation_imbalanced_study.py
#description :This will create a header for a python script.
#author :Guillaume Lemaitre, Mojdeh Rastgoo
#date :2016/01/19
#version :0.1
#notes :
#python_version :2.7.6
#==============================================================================
# Import the needed libraries
# Numpy library
import numpy as np
import pandas as pd
import h5py
# Joblib library
### Module to performed parallel processing
from joblib import Parallel, delayed
### Module to performed parallel processing
import multiprocessing
# OS library
import os
from os.path import join, isdir, isfile
# sys library
import sys
# Scikit-learn library
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.cross_validation import StratifiedKFold
from sklearn.preprocessing import MinMaxScaler
from protoclass.classification.classification import Classify
# Initialization to the data paths
dataPath = sys.argv[1]
path_to_save = sys.argv[2]
#fread = pd.read_csv(dataPath.__add__('feature.csv'))
fread = pd.read_csv(join(dataPath, 'feature.csv'))
FeatureLists = fread.values
FeatureLists = FeatureLists[:,0]
#f= h5py.File(dataPath.__add__('PH2_Train_Test_80_20.mat'), 'r')
f = h5py.File(join(dataPath, 'PH2_Train_Test_80_20.mat'), 'r')
#CVIdx = sio.loadmat(datapath.__add__('TrainTestIndex_117_39_80.mat'))
trainIdx = np.asmatrix(f.get('trainingIdx'))
trainIdx = trainIdx.T
trainIdx = trainIdx - 1.
testIdx = np.asmatrix(f.get('testingIdx'))
testIdx = testIdx.T
testIdx = testIdx - 1.
Labels= np.asmatrix(f.get('BinaryLabels'))
Labels = Labels.T
ntree = 100;
config = [{'classifier_str' : 'random-forest', 'n_estimators' : 100},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'random-over-sampling'},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'smote', 'kind_smote' : 'regular'},
#{'classifier_str' : 'random-forest', 'n_estimators' : 100,
#'balancing_criterion' : 'smote', 'kind_smote' : 'borderline1'},
#{'classifier_str' : 'random-forest', 'n_estimators' : 100,
#'balancing_criterion' : 'smote', 'kind_smote' : 'borderline2'},
#{'classifier_str' : 'random-forest', 'n_estimators' : 100,
#'balancing_criterion' : 'smote', 'kind_smote' : 'svm'},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'random-under-sampling', 'replacement' : True},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'tomek_links'},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'clustering'},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'nearmiss', 'version_nearmiss' : 1, 'size_ngh': 3},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'nearmiss', 'version_nearmiss' : 2, 'size_ngh': 3},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'nearmiss', 'version_nearmiss' : 3, 'size_ngh': 3, 'ver3_samp_ngh' : 3},
#{'classifier_str' : 'random-forest', 'n_estimators' : 100,
#'balancing_criterion' : 'cnn', 'size_ngh' : 3, 'n_seeds_S' :1},
#{'classifier_str' : 'random-forest', 'n_estimators' : 100,
#'balancing_criterion' : 'one-sided-selection', 'size_ngh' : 1, 'n_seeds_S' :1},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'ncr', 'size_ngh' : 3},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'easy-ensemble', 'n_subsets' : 10},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'balance-cascade', 'n_max_subset' : 100,
'balancing_classifier' : 'knn', 'bootstrap' : True},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'smote-enn', 'size_ngh' : 3},
{'classifier_str' : 'random-forest', 'n_estimators' : 100,
'balancing_criterion' : 'smote-tomek'}]
FeaturesIdx = np.array([[1,0,0,0,0,0], [0,1,0,0,0,0], [0,0,1,0,0,0], [0,0,0,1,0,0], [0,0,0,0,1,0], [0,0,0,0,0,1],\
[1,1,0,0,0,0], [1,0,1,0,0,0], [1,0,0,1,0,0], [0,1,1,0,0,0], [0,1,0,1,0,0], [0,0,1,1,0,0],\
[0,0,0,0,1,1], [1,1,1,1,0,0], [1,0,0,0,1,1], [0,1,0,0,1,1], [0,0,1,0,1,1], [0,0,0,1,1,1],\
[1,1,0,0,1,1], [1,0,1,0,1,1], [1,0,0,1,1,1], [0,1,1,0,1,1], [0,1,0,1,1,1], [0,0,1,1,1,1]])
#[0,1,0,0],[0,0,1,0],[0,0,0,1] , [0,0,1,1], [1,1,0,0],[1,0,1,1],[0,1,1,1],[1,1,1,1]])
for I in range (0, FeaturesIdx.shape[0]):
NonzeroIdx = np.ravel(np.nonzero(FeaturesIdx[I]))
FVcombined = np.empty(shape = [193, 0])
for PIdx in range (0, NonzeroIdx.shape[0]):
f= h5py.File(join(dataPath,FeatureLists[NonzeroIdx[PIdx]]), 'r')
#f = sio.loadmat(join(featurePath, FeatureLists[NonzeroIdx[PIdx]]))
FV =np.asmatrix(f.get('FV'))
FV =FV.T
FVcombined = np.append(FVcombined, FV, axis = 1)
del FV
FV = FVcombined
rocs = []
gt_labels = []
pred_labels = []
pred_probs = []
# Apply the classification for each fold
n_jobs = -5
for CV in range (0, trainIdx.shape[1]):
print 'Iteration #{}'.format(CV)
# Extract the data
### Training
train_data = FV[np.ravel(trainIdx[:,CV].astype(int)), :]
train_label = np.ravel(Labels[np.ravel(trainIdx[:,CV].astype(int))])
### Testing
test_data = FV[np.ravel(testIdx[:,CV].astype(int)), :]
test_label = np.ravel(Labels[np.ravel(testIdx[:,CV].astype(int))])
config_roc = []
config_pred_label = []
config_pred_prob = []
config_gt_label = []
for c in config:
print c
pred_label, pred_prob, roc = Classify(train_data, train_label, test_data, test_label, gs_n_jobs=n_jobs, **c)
config_roc.append(roc)
config_pred_label.append(pred_label)
config_pred_prob.append(pred_prob)
config_gt_label.append(test_label)
rocs.append(config_roc)
pred_labels.append(config_pred_label)
pred_probs.append(config_pred_prob)
gt_labels.append(config_gt_label)
# Convert the data to store to numpy data
rocs = np.array(rocs)
pred_labels = np.array(pred_labels)
pred_probs = np.array(pred_probs)
gt_labels = np.array(gt_labels)
# Reshape the array to have the first index corresponding to the
# configuration, the second index to the iteration of the k-fold
# and the last index to the data themselve.
rocs = np.swapaxes(rocs, 0, 1)
pred_labels = np.swapaxes(pred_labels, 0, 1)
pred_probs = np.swapaxes(pred_probs, 0, 1)
gt_labels = np.swapaxes(gt_labels, 0, 1)
# Save the results somewhere
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
from os.path import basename
saving_filename = 'melanoma_imbalanced_80_20_' + str(ntree) + '_' + str(I)
saving_path = join(path_to_save, saving_filename)
np.savez(saving_path, gt_labels=gt_labels, pred_labels=pred_labels, pred_probs=pred_probs, rocs=rocs)
tosave={}
tosave['rocs'] = rocs
tosave['pred_labels'] = pred_labels
tosave['pred_probs'] = pred_probs
tosave['gt_labels'] = gt_labels
saving_path = join(path_to_save, saving_filename)
from scipy.io import savemat
savemat(saving_path, tosave)
| mit |
AustereCuriosity/astropy | astropy/table/tests/test_table.py | 1 | 68562 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
import copy
import gc
import sys
from collections import OrderedDict
import numpy as np
from numpy.testing import assert_allclose
from ...extern import six
from ...io import fits
from ...tests.helper import (pytest, assert_follows_unicode_guidelines,
ignore_warnings, catch_warnings)
from ...utils.data import get_pkg_data_filename
from ... import table
from ... import units as u
from .conftest import MaskedTable
from ...extern.six.moves import zip, range, cStringIO as StringIO
try:
with ignore_warnings(DeprecationWarning):
# Ignore DeprecationWarning on pandas import in Python 3.5--see
# https://github.com/astropy/astropy/issues/4380
import pandas # pylint: disable=W0611
except ImportError:
HAS_PANDAS = False
else:
HAS_PANDAS = True
class SetupData(object):
def _setup(self, table_types):
self._table_type = table_types.Table
self._column_type = table_types.Column
@property
def a(self):
if self._column_type is not None:
if not hasattr(self, '_a'):
self._a = self._column_type(
[1, 2, 3], name='a', format='%d',
meta={'aa': [0, 1, 2, 3, 4]})
return self._a
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, '_b'):
self._b = self._column_type(
[4, 5, 6], name='b', format='%d', meta={'aa': 1})
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, '_c'):
self._c = self._column_type([7, 8, 9], 'c')
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, '_d'):
self._d = self._column_type([7, 8, 7], 'd')
return self._d
@property
def obj(self):
if self._column_type is not None:
if not hasattr(self, '_obj'):
self._obj = self._column_type([1, 'string', 3], 'obj', dtype='O')
return self._obj
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a, self.b])
return self._t
@pytest.mark.usefixtures('table_types')
class TestSetTableColumn(SetupData):
def test_set_row(self, table_types):
"""Set a row from a tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[1] = (20, 21)
assert t['a'][0] == 1
assert t['a'][1] == 20
assert t['a'][2] == 3
assert t['b'][0] == 4
assert t['b'][1] == 21
assert t['b'][2] == 6
def test_set_row_existing(self, table_types):
"""Set a row from another existing row"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[0] = t[1]
assert t[0][0] == 2
assert t[0][1] == 5
def test_set_row_fail_1(self, table_types):
"""Set a row from an incorrectly-sized or typed set of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = (20, 21, 22)
with pytest.raises(TypeError):
t[1] = 0
def test_set_row_fail_2(self, table_types):
"""Set a row from an incorrectly-typed tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = ('abc', 'def')
def test_set_new_col_new_table(self, table_types):
"""Create a new column in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t['aa'] = self.a
# Test that the new column name is 'aa' and that the values match
assert np.all(t['aa'] == self.a)
assert t.colnames == ['aa']
def test_set_new_col_new_table_quantity(self, table_types):
"""Create a new column (from a quantity) in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t['aa'] = np.array([1, 2, 3]) * u.m
assert np.all(t['aa'] == np.array([1, 2, 3]))
assert t['aa'].unit == u.m
t['bb'] = 3 * u.m
assert np.all(t['bb'] == 3)
assert t['bb'].unit == u.m
def test_set_new_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Add a column
t['bb'] = self.b
assert np.all(t['bb'] == self.b)
assert t.colnames == ['a', 'bb']
assert t['bb'].meta == self.b.meta
assert t['bb'].format == self.b.format
# Add another column
t['c'] = t['a']
assert np.all(t['c'] == t['a'])
assert t.colnames == ['a', 'bb', 'c']
assert t['c'].meta == t['a'].meta
assert t['c'].format == t['a'].format
# Add a multi-dimensional column
t['d'] = table_types.Column(np.arange(12).reshape(3, 2, 2))
assert t['d'].shape == (3, 2, 2)
assert t['d'][0, 0, 1] == 1
# Add column from a list
t['e'] = ['hello', 'the', 'world']
assert np.all(t['e'] == np.array(['hello', 'the', 'world']))
# Make sure setting existing column still works
t['e'] = ['world', 'hello', 'the']
assert np.all(t['e'] == np.array(['world', 'hello', 'the']))
# Add a column via broadcasting
t['f'] = 10
assert np.all(t['f'] == 10)
# Add a column from a Quantity
t['g'] = np.array([1, 2, 3]) * u.m
assert np.all(t['g'].data == np.array([1, 2, 3]))
assert t['g'].unit == u.m
# Add a column from a (scalar) Quantity
t['g'] = 3 * u.m
assert np.all(t['g'].data == 3)
assert t['g'].unit == u.m
def test_set_new_unmasked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.Column(name='b', data=[1, 2, 3]) # unmasked
t['b'] = b
assert np.all(t['b'] == b)
def test_set_new_masked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.MaskedColumn(name='b', data=[1, 2, 3]) # masked
t['b'] = b
assert np.all(t['b'] == b)
def test_set_new_col_existing_table_fail(self, table_types):
"""Generate failure when creating a new column using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Wrong size
with pytest.raises(ValueError):
t['b'] = [1, 2]
@pytest.mark.usefixtures('table_types')
class TestEmptyData():
def test_1(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', dtype=int, length=100))
assert len(t['a']) == 100
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', dtype=int, shape=(3, ), length=100))
assert len(t['a']) == 100
def test_3(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name='a', dtype=int))
assert len(t['a']) == 0
def test_4(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name='a', dtype=int, shape=(3, 4)))
assert len(t['a']) == 0
def test_5(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a')) # dtype is not specified
assert len(t['a']) == 0
def test_add_via_setitem_and_slice(self, table_types):
"""Test related to #3023 where a MaskedColumn is created with name=None
and then gets changed to name='a'. After PR #2790 this test fails
without the #3023 fix."""
t = table_types.Table()
t['a'] = table_types.Column([1, 2, 3])
t2 = t[:]
assert t2.colnames == t.colnames
@pytest.mark.usefixtures('table_types')
class TestNewFromColumns():
def test_simple(self, table_types):
cols = [table_types.Column(name='a', data=[1, 2, 3]),
table_types.Column(name='b', data=[4, 5, 6], dtype=np.float32)]
t = table_types.Table(cols)
assert np.all(t['a'].data == np.array([1, 2, 3]))
assert np.all(t['b'].data == np.array([4, 5, 6], dtype=np.float32))
assert type(t['b'][1]) is np.float32
def test_from_np_array(self, table_types):
cols = [table_types.Column(name='a', data=np.array([1, 2, 3], dtype=np.int64),
dtype=np.float64),
table_types.Column(name='b', data=np.array([4, 5, 6], dtype=np.float32))]
t = table_types.Table(cols)
assert np.all(t['a'] == np.array([1, 2, 3], dtype=np.float64))
assert np.all(t['b'] == np.array([4, 5, 6], dtype=np.float32))
assert type(t['a'][1]) is np.float64
assert type(t['b'][1]) is np.float32
def test_size_mismatch(self, table_types):
cols = [table_types.Column(name='a', data=[1, 2, 3]),
table_types.Column(name='b', data=[4, 5, 6, 7])]
with pytest.raises(ValueError):
table_types.Table(cols)
def test_name_none(self, table_types):
"""Column with name=None can init a table whether or not names are supplied"""
c = table_types.Column(data=[1, 2], name='c')
d = table_types.Column(data=[3, 4])
t = table_types.Table([c, d], names=(None, 'd'))
assert t.colnames == ['c', 'd']
t = table_types.Table([c, d])
assert t.colnames == ['c', 'col1']
@pytest.mark.usefixtures('table_types')
class TestReverse():
def test_reverse(self, table_types):
t = table_types.Table([[1, 2, 3],
['a', 'b', 'cc']])
t.reverse()
assert np.all(t['col0'] == np.array([3, 2, 1]))
assert np.all(t['col1'] == np.array(['cc', 'b', 'a']))
t2 = table_types.Table(t, copy=False)
assert np.all(t2['col0'] == np.array([3, 2, 1]))
assert np.all(t2['col1'] == np.array(['cc', 'b', 'a']))
t2 = table_types.Table(t, copy=True)
assert np.all(t2['col0'] == np.array([3, 2, 1]))
assert np.all(t2['col1'] == np.array(['cc', 'b', 'a']))
t2.sort('col0')
assert np.all(t2['col0'] == np.array([1, 2, 3]))
assert np.all(t2['col1'] == np.array(['a', 'b', 'cc']))
def test_reverse_big(self, table_types):
x = np.arange(10000)
y = x + 1
t = table_types.Table([x, y], names=('x', 'y'))
t.reverse()
assert np.all(t['x'] == x[::-1])
assert np.all(t['y'] == y[::-1])
@pytest.mark.usefixtures('table_types')
class TestColumnAccess():
def test_1(self, table_types):
t = table_types.Table()
with pytest.raises(KeyError):
t['a']
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[1, 2, 3]))
assert np.all(t['a'] == np.array([1, 2, 3]))
with pytest.raises(KeyError):
t['b'] # column does not exist
def test_itercols(self, table_types):
names = ['a', 'b', 'c']
t = table_types.Table([[1], [2], [3]], names=names)
for name, col in zip(names, t.itercols()):
assert name == col.name
assert isinstance(col, table_types.Column)
@pytest.mark.usefixtures('table_types')
class TestAddLength(SetupData):
def test_right_length(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b)
def test_too_long(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='b', data=[4, 5, 6, 7])) # data too long
def test_too_short(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='b', data=[4, 5])) # data too short
@pytest.mark.usefixtures('table_types')
class TestAddPosition(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 0)
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 1)
def test_3(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, -1)
def test_5(self, table_types):
self._setup(table_types)
t = table_types.Table()
with pytest.raises(ValueError):
t.index_column('b')
def test_6(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b)
assert t.columns.keys() == ['a', 'b']
def test_7(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column('a'))
assert t.columns.keys() == ['b', 'a']
def test_8(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column('a') + 1)
assert t.columns.keys() == ['a', 'b']
def test_9(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b, t.index_column('a') + 1)
t.add_column(self.c, t.index_column('b'))
assert t.columns.keys() == ['a', 'c', 'b']
def test_10(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
ia = t.index_column('a')
t.add_column(self.b, ia + 1)
t.add_column(self.c, ia)
assert t.columns.keys() == ['c', 'a', 'b']
@pytest.mark.usefixtures('table_types')
class TestAddName(SetupData):
def test_override_name(self, table_types):
self._setup(table_types)
t = table_types.Table()
# Check that we can override the name of the input column in the Table
t.add_column(self.a, name='b')
t.add_column(self.b, name='a')
assert t.columns.keys() == ['b', 'a']
# Check that we did not change the name of the input column
assert self.a.info.name == 'a'
assert self.b.info.name == 'b'
# Now test with an input column from another table
t2 = table_types.Table()
t2.add_column(t['a'], name='c')
assert t2.columns.keys() == ['c']
# Check that we did not change the name of the input column
assert t.columns.keys() == ['b', 'a']
# Check that we can give a name if none was present
col = table_types.Column([1, 2, 3])
t.add_column(col, name='c')
assert t.columns.keys() == ['b', 'a', 'c']
def test_default_name(self, table_types):
t = table_types.Table()
col = table_types.Column([1, 2, 3])
t.add_column(col)
assert t.columns.keys() == ['col0']
@pytest.mark.usefixtures('table_types')
class TestInitFromTable(SetupData):
def test_from_table_cols(self, table_types):
"""Ensure that using cols from an existing table gives
a clean copy.
"""
self._setup(table_types)
t = self.t
cols = t.columns
# Construct Table with cols via Table._new_from_cols
t2a = table_types.Table([cols['a'], cols['b'], self.c])
# Construct with add_column
t2b = table_types.Table()
t2b.add_column(cols['a'])
t2b.add_column(cols['b'])
t2b.add_column(self.c)
t['a'][1] = 20
t['b'][1] = 21
for t2 in [t2a, t2b]:
t2['a'][2] = 10
t2['b'][2] = 11
t2['c'][2] = 12
t2.columns['a'].meta['aa'][3] = 10
assert np.all(t['a'] == np.array([1, 20, 3]))
assert np.all(t['b'] == np.array([4, 21, 6]))
assert np.all(t2['a'] == np.array([1, 2, 10]))
assert np.all(t2['b'] == np.array([4, 5, 11]))
assert np.all(t2['c'] == np.array([7, 8, 12]))
assert t2['a'].name == 'a'
assert t2.columns['a'].meta['aa'][3] == 10
assert t.columns['a'].meta['aa'][3] == 3
@pytest.mark.usefixtures('table_types')
class TestAddColumns(SetupData):
def test_add_columns1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c])
assert t.colnames == ['a', 'b', 'c']
def test_add_columns2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d])
assert t.colnames == ['a', 'b', 'c', 'd']
assert np.all(t['c'] == np.array([7, 8, 9]))
def test_add_columns3(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[1, 0])
assert t.colnames == ['d', 'a', 'c', 'b']
def test_add_columns4(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[0, 0])
assert t.colnames == ['c', 'd', 'a', 'b']
def test_add_columns5(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[2, 2])
assert t.colnames == ['a', 'b', 'c', 'd']
def test_add_columns6(self, table_types):
"""Check that we can override column names."""
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c], names=['b', 'c', 'a'])
assert t.colnames == ['b', 'c', 'a']
def test_add_columns7(self, table_types):
"""Check that default names are used when appropriate."""
t = table_types.Table()
col0 = table_types.Column([1, 2, 3])
col1 = table_types.Column([4, 5, 3])
t.add_columns([col0, col1])
assert t.colnames == ['col0', 'col1']
def test_add_duplicate_column(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='a', data=[0, 1, 2]))
t.add_column(table_types.Column(name='a', data=[0, 1, 2]),
rename_duplicate=True)
t.add_column(self.b)
t.add_column(self.c)
assert t.colnames == ['a', 'a_1', 'b', 'c']
t.add_column(table_types.Column(name='a', data=[0, 1, 2]),
rename_duplicate=True)
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2']
# test adding column from a separate Table
t1 = table_types.Table()
t1.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(t1['a'])
t.add_column(t1['a'], rename_duplicate=True)
t1['a'][0] = 100 # Change original column
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3']
assert t1.colnames == ['a']
# Check new column didn't change (since name conflict forced a copy)
assert t['a_3'][0] == self.a[0]
def test_add_duplicate_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
with pytest.raises(ValueError):
t.add_columns([table_types.Column(name='a', data=[0, 1, 2]), table_types.Column(name='b', data=[0, 1, 2])])
t.add_columns([table_types.Column(name='a', data=[0, 1, 2]),
table_types.Column(name='b', data=[0, 1, 2])],
rename_duplicate=True)
t.add_column(self.d)
assert t.colnames == ['a', 'b', 'c', 'a_1', 'b_1', 'd']
@pytest.mark.usefixtures('table_types')
class TestAddRow(SetupData):
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, '_b'):
self._b = self._column_type(name='b', data=[4.0, 5.1, 6.2])
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, '_c'):
self._c = self._column_type(name='c', data=['7', '8', '9'])
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, '_d'):
self._d = self._column_type(name='d', data=[[1, 2], [3, 4], [5, 6]])
return self._d
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a, self.b, self.c])
return self._t
def test_add_none_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=('a', 'b', 'c'), dtype=('(2,)i', 'S4', 'O'))
t.add_row()
assert np.all(t['a'][0] == [0, 0])
assert t['b'][0] == ''
assert t['c'][0] == 0
t.add_row()
assert np.all(t['a'][1] == [0, 0])
assert t['b'][1] == ''
assert t['c'][1] == 0
def test_add_stuff_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=('a', 'b', 'obj'), dtype=('(2,)i', 'S8', 'O'))
t.add_row([[1, 2], 'hello', 'world'])
assert np.all(t['a'][0] == [1, 2])
assert t['b'][0] == 'hello'
assert t['obj'][0] == 'world'
# Make sure it is not repeating last row but instead
# adding zeros (as documented)
t.add_row()
assert np.all(t['a'][1] == [0, 0])
assert t['b'][1] == ''
assert t['obj'][1] == 0
def test_add_table_row(self, table_types):
self._setup(table_types)
t = self.t
t['d'] = self.d
t2 = table_types.Table([self.a, self.b, self.c, self.d])
t.add_row(t2[0])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 1]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t['c'] == np.array(['7', '8', '9', '7']))
assert np.all(t['d'] == np.array([[1, 2], [3, 4], [5, 6], [1, 2]]))
def test_add_table_row_obj(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.obj])
t.add_row([1, 4.0, [10]])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 1]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t['obj'] == np.array([1, 'string', 3, [10]], dtype='O'))
def test_add_qtable_row_multidimensional(self):
q = [[1, 2], [3, 4]] * u.m
qt = table.QTable([q])
qt.add_row(([5, 6] * u.km,))
assert np.all(qt['col0'] == [[1, 2], [3, 4], [5000, 6000]] * u.m)
def test_add_with_tuple(self, table_types):
self._setup(table_types)
t = self.t
t.add_row((4, 7.2, '1'))
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t['c'] == np.array(['7', '8', '9', '1']))
def test_add_with_list(self, table_types):
self._setup(table_types)
t = self.t
t.add_row([4, 7.2, '10'])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t['c'] == np.array(['7', '8', '9', '1']))
def test_add_with_dict(self, table_types):
self._setup(table_types)
t = self.t
t.add_row({'a': 4, 'b': 7.2})
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
if t.masked:
assert np.all(t['c'] == np.array(['7', '8', '9', '7']))
else:
assert np.all(t['c'] == np.array(['7', '8', '9', '']))
def test_add_with_none(self, table_types):
self._setup(table_types)
t = self.t
t.add_row()
assert len(t) == 4
assert np.all(t['a'].data == np.array([1, 2, 3, 0]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 0.0]))
assert np.all(t['c'].data == np.array(['7', '8', '9', '']))
def test_add_missing_column(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row({'bad_column': 1})
def test_wrong_size_tuple(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row((1, 2))
def test_wrong_vals_type(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(TypeError):
t.add_row(1)
def test_add_row_failures(self, table_types):
self._setup(table_types)
t = self.t
t_copy = table_types.Table(t, copy=True)
# Wrong number of columns
try:
t.add_row([1, 2, 3, 4])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
# Wrong data type
try:
t.add_row(['one', 2, 3])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
def test_insert_table_row(self, table_types):
"""
Light testing of Table.insert_row() method. The deep testing is done via
the add_row() tests which calls insert_row(index=len(self), ...), so
here just test that the added index parameter is handled correctly.
"""
self._setup(table_types)
row = (10, 40.0, 'x', [10, 20])
for index in range(-3, 4):
indices = np.insert(np.arange(3), index, 3)
t = table_types.Table([self.a, self.b, self.c, self.d])
t2 = t.copy()
t.add_row(row) # By now we know this works
t2.insert_row(index, row)
for name in t.colnames:
if t[name].dtype.kind == 'f':
assert np.allclose(t[name][indices], t2[name])
else:
assert np.all(t[name][indices] == t2[name])
for index in (-4, 4):
t = table_types.Table([self.a, self.b, self.c, self.d])
with pytest.raises(IndexError):
t.insert_row(index, row)
@pytest.mark.usefixtures('table_types')
class TestTableColumn(SetupData):
def test_column_view(self, table_types):
self._setup(table_types)
t = self.t
a = t.columns['a']
a[2] = 10
assert t['a'][2] == 10
@pytest.mark.usefixtures('table_types')
class TestArrayColumns(SetupData):
def test_1d(self, table_types):
self._setup(table_types)
b = table_types.Column(name='b', dtype=int, shape=(2, ), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t['b'].shape == (3, 2)
assert t['b'][0].shape == (2, )
def test_2d(self, table_types):
self._setup(table_types)
b = table_types.Column(name='b', dtype=int, shape=(2, 4), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t['b'].shape == (3, 2, 4)
assert t['b'][0].shape == (2, 4)
def test_3d(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
b = table_types.Column(name='b', dtype=int, shape=(2, 4, 6), length=3)
t.add_column(b)
assert t['b'].shape == (3, 2, 4, 6)
assert t['b'][0].shape == (2, 4, 6)
@pytest.mark.usefixtures('table_types')
class TestRemove(SetupData):
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a])
return self._t
@property
def t2(self):
if self._table_type is not None:
if not hasattr(self, '_t2'):
self._t2 = self._table_type([self.a, self.b, self.c])
return self._t2
def test_1(self, table_types):
self._setup(table_types)
self.t.remove_columns('a')
assert self.t.columns.keys() == []
assert self.t.as_array() is None
def test_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_columns('a')
assert self.t.columns.keys() == ['b']
assert self.t.dtype.names == ('b',)
assert np.all(self.t['b'] == np.array([4, 5, 6]))
def test_3(self, table_types):
"""Check remove_columns works for a single column with a name of
more than one character. Regression test against #2699"""
self._setup(table_types)
self.t['new_column'] = self.t['a']
assert 'new_column' in self.t.columns.keys()
self.t.remove_columns('new_column')
assert 'new_column' not in self.t.columns.keys()
def test_remove_nonexistent_row(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
self.t.remove_row(4)
def test_remove_row_0(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(0)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['b'] == np.array([5, 6]))
def test_remove_row_1(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(1)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['a'] == np.array([1, 3]))
def test_remove_row_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(2)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([7, 8]))
def test_remove_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows(slice(0, 2, 1))
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([9]))
def test_remove_row_list(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows([0, 2])
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([8]))
def test_remove_row_preserves_meta(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_rows([0, 2])
assert self.t['a'].meta == {'aa': [0, 1, 2, 3, 4]}
assert self.t.dtype == np.dtype([(str('a'), 'int'),
(str('b'), 'int')])
def test_delitem1(self, table_types):
self._setup(table_types)
del self.t['a']
assert self.t.columns.keys() == []
assert self.t.as_array() is None
def test_delitem2(self, table_types):
self._setup(table_types)
del self.t2['b']
assert self.t2.colnames == ['a', 'c']
def test_delitems(self, table_types):
self._setup(table_types)
del self.t2['a', 'b']
assert self.t2.colnames == ['c']
def test_delitem_fail(self, table_types):
self._setup(table_types)
with pytest.raises(KeyError):
del self.t['d']
@pytest.mark.usefixtures('table_types')
class TestKeep(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns([])
assert t.columns.keys() == []
assert t.as_array() is None
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns('b')
assert t.columns.keys() == ['b']
assert t.dtype.names == ('b',)
assert np.all(t['b'] == np.array([4, 5, 6]))
@pytest.mark.usefixtures('table_types')
class TestRename(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.rename_column('a', 'b')
assert t.columns.keys() == ['b']
assert t.dtype.names == ('b',)
assert np.all(t['b'] == np.array([1, 2, 3]))
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.rename_column('a', 'c')
t.rename_column('b', 'a')
assert t.columns.keys() == ['c', 'a']
assert t.dtype.names == ('c', 'a')
if t.masked:
assert t.mask.dtype.names == ('c', 'a')
assert np.all(t['c'] == np.array([1, 2, 3]))
assert np.all(t['a'] == np.array([4, 5, 6]))
def test_rename_by_attr(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t['a'].name = 'c'
t['b'].name = 'a'
assert t.columns.keys() == ['c', 'a']
assert t.dtype.names == ('c', 'a')
assert np.all(t['c'] == np.array([1, 2, 3]))
assert np.all(t['a'] == np.array([4, 5, 6]))
@pytest.mark.usefixtures('table_types')
class TestSort():
def test_single(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4]))
t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)]))
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
t.sort('a')
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['b'] == np.array([5, 6, 4]))
assert np.all(t['c'] == np.array([[3, 4],
[1, 2],
[4, 5]]))
t.sort('b')
assert np.all(t['a'] == np.array([3, 1, 2]))
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['c'] == np.array([[4, 5],
[3, 4],
[1, 2]]))
def test_single_big(self, table_types):
"""Sort a big-ish table with a non-trivial sort order"""
x = np.arange(10000)
y = np.sin(x)
t = table_types.Table([x, y], names=('x', 'y'))
t.sort('y')
idx = np.argsort(y)
assert np.all(t['x'] == x[idx])
assert np.all(t['y'] == y[idx])
def test_empty(self, table_types):
t = table_types.Table([[], []], dtype=['f4', 'U1'])
t.sort('col1')
def test_multiple(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(['a', 'b'])
assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5]))
t.sort(['b', 'a'])
assert np.all(t['a'] == np.array([2, 1, 3, 1, 3, 2]))
assert np.all(t['b'] == np.array([3, 4, 4, 5, 5, 6]))
t.sort(('a', 'b'))
assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5]))
def test_multiple_with_bytes(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"]))
t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
t.sort(['name', 'firstname'])
assert np.all([t['firstname'] == np.array([b"John", b"Jo", b"Max"])])
assert np.all([t['name'] == np.array([b"Jackson", b"Miller", b"Miller"])])
assert np.all([t['tel'] == np.array([19, 15, 12])])
def test_multiple_with_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(table_types.Column(
name='firstname',
data=[six.text_type(x) for x in ["Max", "Jo", "John"]]))
t.add_column(table_types.Column(
name='name',
data=[six.text_type(x) for x in ["Miller", "Miller", "Jackson"]]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
t.sort(['name', 'firstname'])
assert np.all([t['firstname'] == np.array(
[six.text_type(x) for x in ["John", "Jo", "Max"]])])
assert np.all([t['name'] == np.array(
[six.text_type(x) for x in ["Jackson", "Miller", "Miller"]])])
assert np.all([t['tel'] == np.array([19, 15, 12])])
def test_argsort(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t.argsort() == t.as_array().argsort())
i0 = t.argsort('a')
i1 = t.as_array().argsort(order=['a'])
assert np.all(t['a'][i0] == t['a'][i1])
i0 = t.argsort(['a', 'b'])
i1 = t.as_array().argsort(order=['a', 'b'])
assert np.all(t['a'][i0] == t['a'][i1])
assert np.all(t['b'][i0] == t['b'][i1])
def test_argsort_bytes(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"]))
t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0]))
def test_argsort_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(table_types.Column(
name='firstname',
data=[six.text_type(x) for x in ["Max", "Jo", "John"]]))
t.add_column(table_types.Column(
name='name',
data=[six.text_type(x) for x in ["Miller", "Miller", "Jackson"]]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0]))
def test_rebuild_column_view_then_rename(self, table_types):
"""
Issue #2039 where renaming fails after any method that calls
_rebuild_table_column_view (this includes sort and add_row).
"""
t = table_types.Table([[1]], names=('a',))
assert t.colnames == ['a']
assert t.dtype.names == ('a',)
t.add_row((2,))
assert t.colnames == ['a']
assert t.dtype.names == ('a',)
t.rename_column('a', 'b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
t.sort('b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
t.rename_column('b', 'c')
assert t.colnames == ['c']
assert t.dtype.names == ('c',)
@pytest.mark.usefixtures('table_types')
class TestIterator():
def test_iterator(self, table_types):
d = np.array([(2, 1),
(3, 6),
(4, 5)], dtype=[(str('a'), 'i4'), (str('b'), 'i4')])
t = table_types.Table(d)
if t.masked:
with pytest.raises(ValueError):
t[0] == d[0]
else:
for row, np_row in zip(t, d):
assert np.all(row == np_row)
@pytest.mark.usefixtures('table_types')
class TestSetMeta():
def test_set_meta(self, table_types):
d = table_types.Table(names=('a', 'b'))
d.meta['a'] = 1
d.meta['b'] = 1
d.meta['c'] = 1
d.meta['d'] = 1
assert list(d.meta.keys()) == ['a', 'b', 'c', 'd']
@pytest.mark.usefixtures('table_types')
class TestConvertNumpyArray():
def test_convert_numpy_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=('a', 'b'))
np_data = np.array(d)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert np_data is not d.as_array()
assert d.colnames == list(np_data.dtype.names)
np_data = np.array(d, copy=False)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert d.colnames == list(np_data.dtype.names)
with pytest.raises(ValueError):
np_data = np.array(d, dtype=[(str('c'), 'i8'), (str('d'), 'i8')])
def test_as_array_byteswap(self, table_types):
"""Test for https://github.com/astropy/astropy/pull/4080"""
byte_orders = ('>', '<')
native_order = byte_orders[sys.byteorder == 'little']
for order in byte_orders:
col = table_types.Column([1.0, 2.0], name='a', dtype=order + 'f8')
t = table_types.Table([col])
arr = t.as_array()
assert arr['a'].dtype.byteorder in (native_order, '=')
arr = t.as_array(keep_byteorder=True)
if order == native_order:
assert arr['a'].dtype.byteorder in (order, '=')
else:
assert arr['a'].dtype.byteorder == order
def test_byteswap_fits_array(self, table_types):
"""
Test for https://github.com/astropy/astropy/pull/4080, demonstrating
that FITS tables are converted to native byte order.
"""
non_native_order = ('>', '<')[sys.byteorder != 'little']
filename = get_pkg_data_filename('data/tb.fits',
'astropy.io.fits.tests')
t = table_types.Table.read(filename)
arr = t.as_array()
for idx in range(len(arr.dtype)):
assert arr.dtype[idx].byteorder != non_native_order
with fits.open(filename) as hdul:
data = hdul[1].data
for colname in data.columns.names:
assert np.all(data[colname] == arr[colname])
arr2 = t.as_array(keep_byteorder=True)
for colname in data.columns.names:
assert (data[colname].dtype.byteorder ==
arr2[colname].dtype.byteorder)
def _assert_copies(t, t2, deep=True):
assert t.colnames == t2.colnames
np.testing.assert_array_equal(t.as_array(), t2.as_array())
assert t.meta == t2.meta
for col, col2 in zip(t.columns.values(), t2.columns.values()):
if deep:
assert not np.may_share_memory(col, col2)
else:
assert np.may_share_memory(col, col2)
def test_copy():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'])
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_masked():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'], masked=True,
meta={'name': 'test'})
t['x'].mask == [True, False, True]
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_protocol():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'])
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
_assert_copies(t, t2, deep=False)
_assert_copies(t, t3)
def test_disallow_inequality_comparisons():
"""
Regression test for #828 - disallow comparison operators on whole Table
"""
t = table.Table()
with pytest.raises(TypeError):
t > 2
with pytest.raises(TypeError):
t < 1.1
with pytest.raises(TypeError):
t >= 5.5
with pytest.raises(TypeError):
t <= -1.1
def test_equality():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all((t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
def test_equality_masked():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# Make into masked table
t = table.Table(t, masked=True)
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that masking a value causes the row to differ
t.mask['a'][0] = True
assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all((t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
@pytest.mark.xfail
def test_equality_masked_bug():
"""
This highlights a Numpy bug. Once it works, it can be moved into the
test_equality_masked test. Related Numpy bug report:
https://github.com/numpy/numpy/issues/3840
"""
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
t = table.Table(t, masked=True)
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
assert np.all((t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
from ...utils.tests.test_metadata import MetaBaseTest
class TestMetaTable(MetaBaseTest):
test_class = table.Table
args = ()
def test_unicode_column_names(table_types):
"""
Test that unicode column names are accepted. Only do this for
Python 2 since strings are unicode already in Python 3.
"""
if six.PY2:
t = table_types.Table([[1]], names=(six.text_type('a'),))
assert t.colnames == ['a']
t[six.text_type('b')] = 0.0
assert t.colnames == ['a', 'b']
def test_unicode_content():
# If we don't have unicode literals then return
if isinstance('', bytes):
return
# Define unicode literals
string_a = 'астрономическая питона'
string_b = 'миллиарды световых лет'
a = table.Table(
[[string_a, 2],
[string_b, 3]],
names=('a', 'b'))
assert string_a in six.text_type(a)
# This only works because the coding of this file is utf-8, which
# matches the default encoding of Table.__str__
assert string_a.encode('utf-8') in bytes(a)
def test_unicode_policy():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
assert_follows_unicode_guidelines(t)
def test_unicode_bytestring_conversion(table_types):
t = table_types.Table([['abc'], ['def'], [1]], dtype=('S', 'U', 'i'))
assert t['col0'].dtype.kind == 'S'
assert t['col1'].dtype.kind == 'U'
assert t['col2'].dtype.kind == 'i'
t1 = t.copy()
t1.convert_unicode_to_bytestring()
assert t1['col0'].dtype.kind == 'S'
assert t1['col1'].dtype.kind == 'S'
assert t1['col2'].dtype.kind == 'i'
assert t1['col0'][0] == 'abc'
assert t1['col1'][0] == 'def'
assert t1['col2'][0] == 1
t1 = t.copy()
t1.convert_bytestring_to_unicode()
assert t1['col0'].dtype.kind == 'U'
assert t1['col1'].dtype.kind == 'U'
assert t1['col2'].dtype.kind == 'i'
assert t1['col0'][0] == six.text_type('abc')
assert t1['col1'][0] == six.text_type('def')
assert t1['col2'][0] == 1
def test_table_deletion():
"""
Regression test for the reference cycle discussed in
https://github.com/astropy/astropy/issues/2877
"""
deleted = set()
# A special table subclass which leaves a record when it is finalized
class TestTable(table.Table):
def __del__(self):
deleted.add(id(self))
t = TestTable({'a': [1, 2, 3]})
the_id = id(t)
assert t['a'].parent_table is t
del t
# Cleanup
gc.collect()
assert the_id in deleted
def test_nested_iteration():
"""
Regression test for issue 3358 where nested iteration over a single table fails.
"""
t = table.Table([[0, 1]], names=['a'])
out = []
for r1 in t:
for r2 in t:
out.append((r1['a'], r2['a']))
assert out == [(0, 0), (0, 1), (1, 0), (1, 1)]
def test_table_init_from_degenerate_arrays(table_types):
t = table_types.Table(np.array([]))
assert len(t.columns) == 0
with pytest.raises(ValueError):
t = table_types.Table(np.array(0))
t = table_types.Table(np.array([1, 2, 3]))
assert len(t.columns) == 3
@pytest.mark.skipif('not HAS_PANDAS')
class TestPandas(object):
def test_simple(self):
t = table.Table()
for endian in ['<', '>']:
for kind in ['f', 'i']:
for byte in ['2', '4', '8']:
dtype = np.dtype(endian + kind + byte)
x = np.array([1, 2, 3], dtype=dtype)
t[endian + kind + byte] = x
t['u'] = ['a', 'b', 'c']
t['s'] = ['a', 'b', 'c']
d = t.to_pandas()
for column in t.columns:
if column == 'u':
assert np.all(t['u'] == np.array(['a', 'b', 'c']))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
elif column == 's':
assert np.all(t['s'] == np.array(['a', 'b', 'c']))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
else:
# We should be able to compare exact values here
assert np.all(t[column] == d[column])
if t[column].dtype.byteorder in ('=', '|'):
assert d[column].dtype == t[column].dtype
else:
assert d[column].dtype == t[column].byteswap().newbyteorder().dtype
# Regression test for astropy/astropy#1156 - the following code gave a
# ValueError: Big-endian buffer not supported on little-endian
# compiler. We now automatically swap the endian-ness to native order
# upon adding the arrays to the data frame.
d[['<i4', '>i4']]
d[['<f4', '>f4']]
t2 = table.Table.from_pandas(d)
for column in t.columns:
if column in ('u', 's'):
assert np.all(t[column] == t2[column])
else:
assert_allclose(t[column], t2[column])
if t[column].dtype.byteorder in ('=', '|'):
assert t[column].dtype == t2[column].dtype
else:
assert t[column].byteswap().newbyteorder().dtype == t2[column].dtype
def test_2d(self):
t = table.Table()
t['a'] = [1, 2, 3]
t['b'] = np.ones((3, 2))
with pytest.raises(ValueError) as exc:
t.to_pandas()
assert exc.value.args[0] == "Cannot convert a table with multi-dimensional columns to a pandas DataFrame"
def test_mixin(self):
from ...coordinates import SkyCoord
t = table.Table()
t['c'] = SkyCoord([1, 2, 3], [4, 5, 6], unit='deg')
with pytest.raises(ValueError) as exc:
t.to_pandas()
assert exc.value.args[0] == "Cannot convert a table with mixin columns to a pandas DataFrame"
def test_masking(self):
t = table.Table(masked=True)
t['a'] = [1, 2, 3]
t['a'].mask = [True, False, True]
t['b'] = [1., 2., 3.]
t['b'].mask = [False, False, True]
t['u'] = ['a', 'b', 'c']
t['u'].mask = [False, True, False]
t['s'] = ['a', 'b', 'c']
t['s'].mask = [False, True, False]
d = t.to_pandas()
t2 = table.Table.from_pandas(d)
for name, column in t.columns.items():
assert np.all(column.data == t2[name].data)
assert np.all(column.mask == t2[name].mask)
# Masked integer type comes back as float. Nothing we can do about this.
if column.dtype.kind == 'i':
assert t2[name].dtype.kind == 'f'
else:
if column.dtype.byteorder in ('=', '|'):
assert column.dtype == t2[name].dtype
else:
assert column.byteswap().newbyteorder().dtype == t2[name].dtype
@pytest.mark.usefixtures('table_types')
class TestReplaceColumn(SetupData):
def test_fail_replace_column(self, table_types):
"""Raise exception when trying to replace column via table.columns object"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t.columns['a'] = [1, 2, 3]
with pytest.raises(ValueError):
t.replace_column('not there', [1, 2, 3])
def test_replace_column(self, table_types):
"""Replace existing column with a new column"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
ta = t['a']
tb = t['b']
vals = [1.2, 3.4, 5.6]
for col in (vals,
table_types.Column(vals),
table_types.Column(vals, name='a'),
table_types.Column(vals, name='b')):
t.replace_column('a', col)
assert np.all(t['a'] == vals)
assert t['a'] is not ta # New a column
assert t['b'] is tb # Original b column unchanged
assert t.colnames == ['a', 'b']
assert t['a'].meta == {}
assert t['a'].format is None
def test_replace_index_column(self, table_types):
"""Replace index column and generate expected exception"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_index('a')
with pytest.raises(ValueError) as err:
t.replace_column('a', [1, 2, 3])
assert err.value.args[0] == 'cannot replace a table index column'
class Test__Astropy_Table__():
"""
Test initializing a Table subclass from a table-like object that
implements the __astropy_table__ interface method.
"""
class SimpleTable(object):
def __init__(self):
self.columns = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9] * u.m]
self.names = ['a', 'b', 'c']
self.meta = OrderedDict([('a', 1), ('b', 2)])
def __astropy_table__(self, cls, copy, **kwargs):
a, b, c = self.columns
c.info.name = 'c'
cols = [table.Column(a, name='a'),
table.MaskedColumn(b, name='b'),
c]
names = [col.info.name for col in cols]
return cls(cols, names=names, copy=copy, meta=kwargs or self.meta)
def test_simple_1(self):
"""Make a SimpleTable and convert to Table, QTable with copy=False, True"""
for table_cls in (table.Table, table.QTable):
col_c_class = u.Quantity if table_cls is table.QTable else table.MaskedColumn
for cpy in (False, True):
st = self.SimpleTable()
# Test putting in a non-native kwarg `extra_meta` to Table initializer
t = table_cls(st, copy=cpy, extra_meta='extra!')
assert t.colnames == ['a', 'b', 'c']
assert t.meta == {'extra_meta': 'extra!'}
assert np.all(t['a'] == st.columns[0])
assert np.all(t['b'] == st.columns[1])
vals = t['c'].value if table_cls is table.QTable else t['c']
assert np.all(st.columns[2].value == vals)
assert isinstance(t['a'], table.MaskedColumn)
assert isinstance(t['b'], table.MaskedColumn)
assert isinstance(t['c'], col_c_class)
assert t['c'].unit is u.m
assert type(t) is table_cls
# Copy being respected?
t['a'][0] = 10
assert st.columns[0][0] == 1 if cpy else 10
def test_simple_2(self):
"""Test converting a SimpleTable and changing column names and types"""
st = self.SimpleTable()
dtypes = [np.int32, np.float32, np.float16]
names = ['a', 'b', 'c']
t = table.Table(st, dtype=dtypes, names=names, meta=OrderedDict([('c', 3)]))
assert t.colnames == names
assert all(col.dtype.type is dtype
for col, dtype in zip(t.columns.values(), dtypes))
# The supplied meta is ignored. This is consistent with current
# behavior when initializing from an existing astropy Table.
assert t.meta == st.meta
def test_kwargs_exception(self):
"""If extra kwargs provided but without initializing with a table-like
object, exception is raised"""
with pytest.raises(TypeError) as err:
table.Table([[1]], extra_meta='extra!')
assert '__init__() got unexpected keyword argument' in str(err)
def test_replace_column_qtable():
"""Replace existing Quantity column with a new column in a QTable"""
a = [1, 2, 3] * u.m
b = [4, 5, 6]
t = table.QTable([a, b], names=['a', 'b'])
ta = t['a']
tb = t['b']
ta.info.meta = {'aa': [0, 1, 2, 3, 4]}
ta.info.format = '%f'
t.replace_column('a', a.to('cm'))
assert np.all(t['a'] == ta)
assert t['a'] is not ta # New a column
assert t['b'] is tb # Original b column unchanged
assert t.colnames == ['a', 'b']
assert t['a'].info.meta is None
assert t['a'].info.format is None
def test_replace_update_column_via_setitem():
"""
Test table update like ``t['a'] = value``. This leverages off the
already well-tested ``replace_column`` and in-place update
``t['a'][:] = value``, so this testing is fairly light.
"""
a = [1, 2] * u.m
b = [3, 4]
t = table.QTable([a, b], names=['a', 'b'])
assert isinstance(t['a'], u.Quantity)
# Inplace update
ta = t['a']
t['a'] = 5 * u.m
assert np.all(t['a'] == [5, 5] * u.m)
assert t['a'] is ta
# Replace
t['a'] = [5, 6]
assert np.all(t['a'] == [5, 6])
assert isinstance(t['a'], table.Column)
assert t['a'] is not ta
def test_replace_update_column_via_setitem_warnings_normal():
"""
Test warnings related to table replace change in #5556:
Normal warning-free replace
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = 0 # in-place update
assert len(w) == 0
t['a'] = [10, 20, 30] # replace column
assert len(w) == 0
def test_replace_update_column_via_setitem_warnings_slice():
"""
Test warnings related to table replace change in #5556:
Replace a slice, one warning.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t2 = t[:2]
t2['a'] = 0 # in-place slice update
assert np.all(t['a'] == [0, 0, 3])
assert len(w) == 0
t2['a'] = [10, 20] # replace slice
assert len(w) == 1
assert "replaced column 'a' which looks like an array slice" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_attributes():
"""
Test warnings related to table replace change in #5556:
Lost attributes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
t['a'].unit = 'm'
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert len(w) == 1
assert "replaced column 'a' and column attributes ['unit']" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_refcount():
"""
Test warnings related to table replace change in #5556:
Reference count changes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a'] # Generate an extra reference to original column
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert len(w) == 1
assert "replaced column 'a' and the number of references" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_always():
"""
Test warnings related to table replace change in #5556:
Test 'always' setting that raises warning for any replace.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings', ['always']):
t['a'] = 0 # in-place slice update
assert len(w) == 0
from inspect import currentframe, getframeinfo
frameinfo = getframeinfo(currentframe())
t['a'] = [10, 20, 30] # replace column
assert len(w) == 1
assert "replaced column 'a'" == str(w[0].message)
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert w[0].category is table.TableReplaceWarning
assert 'test_table' in w[0].filename
def test_replace_update_column_via_setitem_replace_inplace():
"""
Test the replace_inplace config option related to #5556. In this
case no replace is done.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a']
t['a'].unit = 'm'
with catch_warnings() as w:
with table.conf.set_temp('replace_inplace', True):
with table.conf.set_temp('replace_warnings',
['always', 'refcount', 'attributes', 'slice']):
t['a'] = 0 # in-place update
assert len(w) == 0
assert ta is t['a']
t['a'] = [10, 20, 30] # normally replaces column, but not now
assert len(w) == 0
assert ta is t['a']
assert np.all(t['a'] == [10, 20, 30])
def test_primary_key_is_inherited():
"""Test whether a new Table inherits the primary_key attribute from
its parent Table. Issue #4672"""
t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=('a', 'b'))
t.add_index('a')
original_key = t.primary_key
# can't test if tuples are equal, so just check content
assert original_key[0] is 'a'
t2 = t[:]
t3 = t.copy()
t4 = table.Table(t)
# test whether the reference is the same in the following
assert original_key == t2.primary_key
assert original_key == t3.primary_key
assert original_key == t4.primary_key
# just test one element, assume rest are equal if assert passes
assert t.loc[1] == t2.loc[1]
assert t.loc[1] == t3.loc[1]
assert t.loc[1] == t4.loc[1]
def test_qtable_read_for_ipac_table_with_char_columns():
'''Test that a char column of a QTable is assigned no unit and not
a dimensionless unit, otherwise conversion of reader output to
QTable fails.'''
t1 = table.QTable([["A"]], names="B")
out = StringIO()
t1.write(out, format="ascii.ipac")
t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False)
assert t2["B"].unit is None
| bsd-3-clause |
alexeyum/scikit-learn | examples/linear_model/plot_ridge_path.py | 55 | 2138 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
This example also shows the usefulness of applying Ridge regression
to highly ill-conditioned matrices. For such matrices, a slight
change in the target variable can cause huge variances in the
calculated weights. In such cases, it is useful to set a certain
regularization (alpha) to reduce this variation (noise).
When alpha is very large, the regularization effect dominates the
squared loss function and the coefficients tend to zero.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations. In practise it is necessary to tune alpha
in such a way that a balance is maintained between both.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
peterfpeterson/mantid | qt/applications/workbench/workbench/plotting/mantidfigurecanvas.py | 3 | 2785 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
"""
Qt-based matplotlib canvas
"""
from qtpy.QtCore import Qt
from qtpy.QtGui import QPen
from matplotlib.backends.backend_qt5agg import ( # noqa: F401
FigureCanvasQTAgg, draw_if_interactive, show)
from mantid.plots.mantidimage import MantidImage, ImageIntensity
class MantidFigureCanvas(FigureCanvasQTAgg):
def __init__(self, figure):
super().__init__(figure=figure)
self._pen_color = Qt.black
self._pen_thickness = 1.5
# options controlling the pen used by tools that manipulate the graph - e.g the zoom box
@property
def pen_color(self):
return self._pen_color
@pen_color.setter
def pen_color(self, color):
self._pen_color = color
@property
def pen_thickness(self):
return self._pen_thickness
@pen_thickness.setter
def pen_thickness(self, thickness):
self._pen_thickness = thickness
# Method used by the zoom box tool on the matplotlib toolbar
def drawRectangle(self, rect):
self.update_pen_color()
# Draw the zoom rectangle to the QPainter. _draw_rect_callback needs
# to be called at the end of paintEvent.
if rect is not None:
def _draw_rect_callback(painter):
pen = QPen(self.pen_color, self.pen_thickness / self._dpi_ratio, Qt.DotLine)
painter.setPen(pen)
painter.drawRect(*(pt / self._dpi_ratio for pt in rect))
else:
def _draw_rect_callback(painter):
return
self._draw_rect_callback = _draw_rect_callback
self.update()
def update_pen_color(self):
"""Update the pen color used to draw tool in the matplotlib toolbar, e.g
the zoombox. The color is automatically determined
by considering how dark, or light the image is and setting a pen appropriately.
Only works if the figure contains a MantidImage.
"""
for ax in self.figure.get_axes():
for img in ax.get_images():
if (not isinstance(img, MantidImage)):
continue
intensity = img.calculate_greyscale_intensity()
if intensity == ImageIntensity.DARK:
color = Qt.white
else:
color = Qt.black
self.pen_color = color
# break after we find the first MantidImage
break
| gpl-3.0 |
kedz/cuttsum | wp-scripts/make-lm-dev.py | 1 | 6784 | import corenlp as cnlp
import re
import os
import gzip
import cuttsum.events
import cuttsum.judgements
import pandas as pd
from collections import defaultdict
matches_df = cuttsum.judgements.get_merged_dataframe()
def heal_text(sent_text):
sent_text = sent_text.decode("utf-8")
sent_text = re.sub(
ur"[a-z ]+, [a-z][a-z ]+\( [a-z]+ \) [-\u2014_]+ ",
r"", sent_text)
sent_text = re.sub(
ur"^.*?[a-z ]+, [a-z][a-z]+ [-\u2014_]+ ",
r"", sent_text)
sent_text = re.sub(
ur"^.*?[a-z ]+\([^\)]+\) [-\u2014_]+ ",
r"", sent_text)
sent_text = re.sub(
ur"^.*?[a-z]+ +[-\u2014_]+ ",
r"", sent_text)
sent_text = re.sub(r"\([^)]+\)", r" ", sent_text)
sent_text = re.sub(ur"^ *[-\u2014_]+", r"", sent_text)
sent_text = re.sub(u" ([,.;?!]+)([\"\u201c\u201d'])", r"\1\2", sent_text)
sent_text = re.sub(r" ([:-]) ", r"\1", sent_text)
sent_text = re.sub(r"([^\d]\d{1,3}) , (\d\d\d)([^\d]|$)", r"\1,\2\3", sent_text)
sent_text = re.sub(r"^(\d{1,3}) , (\d\d\d)([^\d]|$)", r"\1,\2\3", sent_text)
sent_text = re.sub(ur" ('|\u2019) ([a-z]|ll|ve|re)( |$)", r"\1\2 ", sent_text)
sent_text = re.sub(r" ([',.;?!]+) ", r"\1 ", sent_text)
sent_text = re.sub(r" ([',.;?!]+)$", r"\1", sent_text)
sent_text = re.sub(r"(\d\.) (\d)", r"\1\2", sent_text)
sent_text = re.sub(r"(a|p)\. m\.", r"\1.m.", sent_text)
sent_text = re.sub(r"u\. (s|n)\.", r"u.\1.", sent_text)
sent_text = re.sub(
ur"\u201c ([^\s])",
ur"\u201c\1", sent_text)
sent_text = re.sub(
ur"([^\s]) \u201d",
ur"\1\u201d", sent_text)
sent_text = re.sub(
ur"\u2018 ([^\s])",
ur"\u2018\1", sent_text)
sent_text = re.sub(
ur"([^\s]) \u2019",
ur"\1\u2019", sent_text)
sent_text = re.sub(
ur"\u00e2",
ur"'", sent_text)
sent_text = re.sub(
r"^photo:reuters|^photo:ap",
r"", sent_text)
sent_text = sent_text.replace("\n", " ")
return sent_text.encode("utf-8")
nuggets = cuttsum.judgements.get_nuggets()
updates = pd.concat([
cuttsum.judgements.get_2013_updates(),
cuttsum.judgements.get_2014_sampled_updates()
])
#updates["text"] = updates["text"].apply(heal_text)
dom2type = {
"accidents": set(["accident"]),
"natural-disasters": set(["earthquake", "storm", "impact event"]),
"social-unrest": set(["protest", "riot"]),
"terrorism": set(["shooting", "bombing", "conflict", "hostage"]),
}
def tokenize(docs, norm, stop, ne, central_per=None, central_loc=None, central_org=None):
if stop:
with open("stopwords.txt", "r") as f:
sw = set([word.strip().decode("utf-8").lower() for word in f])
if norm == "stem":
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
all_toks = []
for doc in docs:
toks = []
for sent in doc:
if norm == "lemma":
stoks = [unicode(tok.lem).lower() for tok in sent]
elif norm == "stem":
stoks = [stemmer.stem(unicode(tok).lower())
for tok in sent]
else:
stoks = [unicode(tok).lower() for tok in sent]
if stop:
toks.extend([tok for tok in stoks if tok not in sw])
else:
toks.extend(stoks)
toks = [tok for tok in toks if len(tok) < 50]
#if len(toks) == 0: continue
string = u" ".join(toks).encode("utf-8")
#print string
all_toks.append(string)
return all_toks
def find_central_nes(docs):
per_counts = defaultdict(int)
org_counts = defaultdict(int)
loc_counts = defaultdict(int)
for doc in docs:
for sent in doc:
for tok in sent:
if tok.ne == "PERSON":
per_counts[unicode(tok.lem).lower()] += 1
elif tok.ne == "LOCATION":
loc_counts[unicode(tok.lem).lower()] += 1
elif tok.ne == "ORGANIZATION":
org_counts[unicode(tok.lem).lower()] += 1
if len(per_counts) > 0:
central_per = max(per_counts.items(), key=lambda x:[1])[0]
else:
central_per = None
if len(org_counts) > 0:
central_org = max(org_counts.items(), key=lambda x:[1])[0]
else:
central_org = None
if len(loc_counts) > 0:
central_loc = max(loc_counts.items(), key=lambda x:[1])[0]
else:
central_loc = None
return central_per, central_loc, central_org
def main(output_path, norm, stop):
dirname, fname = os.path.split(output_path)
if dirname != "" and not os.path.exists(dirname):
os.makedirs(dirname)
output_path = os.path.join(
dirname,
"{}.norm-{}{}.spl.gz".format(
fname, norm, ".stop" if stop else ""))
print "Domain: {}".format(fname)
print "Output Path: {}".format(output_path)
events = [event for event in cuttsum.events.get_events()
if event.type in dom2type[fname] and event.query_num < 26]
ne = False
#if ne is True:
# annotators = ["tokenize", "ssplit", "pos", "lemma", "ner"]
if norm == "lemma":
annotators = ["tokenize", "ssplit", "pos", "lemma"]
else:
annotators = ["tokenize", "ssplit"]
with cnlp.Server(annotators=annotators, mem="6G",
port=2001, max_message_len=1000000) as client, \
gzip.open(output_path, "w") as f:
query_ids = set([event.query_id for event in events])
updates = matches_df[matches_df["query id"].apply(lambda x: x in query_ids)]
texts = updates.drop_duplicates(subset='update id')["update text"].apply(heal_text).tolist()
central_per = None
central_loc = None
central_org = None
print "processing update text"
docs = [client.annotate(text) for text in texts]
for doc in docs[:10]:
print doc
print "tokenizing"
X_upd_txt = tokenize(docs, norm, stop, ne,
central_per=central_per, central_loc=central_loc,
central_org=central_org)
print "writing"
for line in X_upd_txt:
f.write(line + "\n")
if __name__ == u"__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--output", type=str, required=False, default=None)
parser.add_argument("--norm", choices=["stem", "lemma", "none"], type=str, required=True)
parser.add_argument("--stop", action="store_true", default=False)
args = parser.parse_args()
main(args.output, args.norm, args.stop)
| apache-2.0 |
gVallverdu/pymatgen | pymatgen/io/lammps/tests/test_outputs.py | 4 | 6962 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import json
import numpy as np
import pandas as pd
from pymatgen.io.lammps.outputs import LammpsDump, parse_lammps_dumps, \
parse_lammps_log
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
"test_files", "lammps")
class LammpsDumpTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(os.path.join(test_dir, "dump.rdx_wc.100")) as f:
rdx_str = f.read()
cls.rdx = LammpsDump.from_string(string=rdx_str)
with open(os.path.join(test_dir, "dump.tatb")) as f:
tatb_str = f.read()
cls.tatb = LammpsDump.from_string(string=tatb_str)
def test_from_string(self):
self.assertEqual(self.rdx.timestep, 100)
self.assertEqual(self.rdx.natoms, 21)
np.testing.assert_array_equal(self.rdx.box.bounds,
np.array([(35, 48)] * 3))
np.testing.assert_array_equal(self.rdx.data.columns,
["id", "type", "xs", "ys", "zs"])
rdx_data = self.rdx.data.iloc[-1]
rdx_data_target = [19, 2, 0.42369, 0.47347, 0.555425]
np.testing.assert_array_almost_equal(rdx_data, rdx_data_target)
self.assertEqual(self.tatb.timestep, 0)
self.assertEqual(self.tatb.natoms, 384)
bounds = [[0, 13.624], [0, 17.1149153805], [0, 15.1826391451]]
np.testing.assert_array_almost_equal(self.tatb.box.bounds, bounds)
tilt = [-5.75315630927, -6.325466, 7.4257288]
np.testing.assert_array_almost_equal(self.tatb.box.tilt, tilt)
np.testing.assert_array_equal(self.tatb.data.columns,
["id", "type", "q", "x", "y", "z"])
tatb_data = self.tatb.data.iloc[-1]
tatb_data_target = [356, 3, -0.482096, 2.58647, 12.9577, 14.3143]
np.testing.assert_array_almost_equal(tatb_data, tatb_data_target)
def test_json_dict(self):
encoded = json.dumps(self.rdx.as_dict())
decoded = json.loads(encoded)
rdx = LammpsDump.from_dict(decoded)
self.assertEqual(rdx.timestep, 100)
self.assertEqual(rdx.natoms, 21)
np.testing.assert_array_equal(rdx.box.bounds,
np.array([(35, 48)] * 3))
pd.testing.assert_frame_equal(rdx.data, self.rdx.data)
class FuncTest(unittest.TestCase):
def test_parse_lammps_dumps(self):
# gzipped
rdx_10_pattern = os.path.join(test_dir, "dump.rdx.gz")
rdx_10 = list(parse_lammps_dumps(file_pattern=rdx_10_pattern))
timesteps_10 = [d.timestep for d in rdx_10]
np.testing.assert_array_equal(timesteps_10, np.arange(0, 101, 10))
self.assertTupleEqual(rdx_10[-1].data.shape, (21, 5))
# wildcard
rdx_25_pattern = os.path.join(test_dir, "dump.rdx_wc.*")
rdx_25 = list(parse_lammps_dumps(file_pattern=rdx_25_pattern))
timesteps_25 = [d.timestep for d in rdx_25]
np.testing.assert_array_equal(timesteps_25, np.arange(0, 101, 25))
self.assertTupleEqual(rdx_25[-1].data.shape, (21, 5))
def test_parse_lammps_log(self):
comb_file = "log.5Oct16.comb.Si.elastic.g++.1"
comb = parse_lammps_log(filename=os.path.join(test_dir, comb_file))
self.assertEqual(len(comb), 6)
# first comb run
comb0 = comb[0]
np.testing.assert_array_equal(["Step", "Temp", "TotEng", "PotEng",
"E_vdwl", "E_coul"], comb0.columns)
self.assertEqual(len(comb0), 6)
comb0_data = [[0, 1, -4.6295947, -4.6297237, -4.6297237, 0],
[5, 1, -4.6295965, -4.6297255, -4.6297255, 0]]
np.testing.assert_array_almost_equal(comb0.iloc[[0, -1]], comb0_data)
# final comb run
comb_1 = comb[-1]
np.testing.assert_array_equal(["Step", "Lx", "Ly", "Lz",
"Xy", "Xz", "Yz",
"c_fxy[1]", "c_fxy[2]", "c_fxy[3]",
"c_fxy[4]", "c_fxy[5]", "c_fxy[6]"],
comb_1.columns)
self.assertEqual(len(comb_1), 11)
comb_1_data = [[36, 5.1293854e-06], [46, 2192.8256]]
np.testing.assert_array_almost_equal(comb_1.iloc[[0, -1], [0, -3]],
comb_1_data)
ehex_file = "log.13Oct16.ehex.g++.8"
ehex = parse_lammps_log(filename=os.path.join(test_dir, ehex_file))
self.assertEqual(len(ehex), 3)
ehex0, ehex1, ehex2 = ehex
# ehex run #1
np.testing.assert_array_equal(["Step", "Temp", "E_pair", "E_mol",
"TotEng", "Press"], ehex0.columns)
self.assertEqual(len(ehex0), 11)
ehex0_data = [[0, 1.35, -4.1241917, 0, -2.0994448, -3.1961612],
[1000, 1.3732017, -3.7100044, 0,
-1.6504594, 0.83982701]]
np.testing.assert_array_almost_equal(ehex0.iloc[[0, -1]], ehex0_data)
# ehex run #2
np.testing.assert_array_equal(["Step", "Temp", "c_Thot", "c_Tcold"],
ehex1.columns)
self.assertEqual(len(ehex1), 11)
ehex1_data = [[1000, 1.35, 1.431295, 1.2955644],
[11000, 1.3794051, 1.692299, 1.0515688]]
np.testing.assert_array_almost_equal(ehex1.iloc[[0, -1]], ehex1_data)
# ehex run #3
np.testing.assert_array_equal(["Step", "Temp", "c_Thot", "c_Tcold",
"v_tdiff", "f_ave"], ehex2.columns)
self.assertEqual(len(ehex2), 21)
ehex2_data = [[11000, 1.3794051, 1.6903393, 1.0515688, 0, 0],
[31000, 1.3822489, 1.8220413, 1.0322271, -0.7550338,
-0.76999077]]
np.testing.assert_array_almost_equal(ehex2.iloc[[0, -1]], ehex2_data)
peptide_file = "log.5Oct16.peptide.g++.1"
peptide = parse_lammps_log(filename=os.path.join(test_dir,
peptide_file))
peptide0 = peptide[0]
np.testing.assert_array_equal(["Step", "TotEng", "KinEng", "Temp",
"PotEng", "E_bond", "E_angle",
"E_dihed", "E_impro", "E_vdwl",
"E_coul", "E_long", "Press"],
peptide0.columns)
self.assertEqual(len(peptide0), 7)
peptide0_select = peptide0.loc[[0, 6], ["Step", "TotEng", "Press"]]
peptide0_data = [[0, -5237.4580, -837.0112],
[300, -5251.3637, -471.5505]]
np.testing.assert_array_almost_equal(peptide0_select, peptide0_data)
if __name__ == "__main__":
unittest.main()
| mit |
fegonda/icon_demo | code/model/deleteme/cnn_model.py | 1 | 4305 | #---------------------------------------------------------------------------
# Utility.py
#
# Author : Felix Gonda
# Date : July 10, 2015
# School : Harvard University
#
# Project : Master Thesis
# An Interactive Deep Learning Toolkit for
# Automatic Segmentation of Images
#
# Summary : This file contains utility functions for reading, writing, and
# processing images.
#---------------------------------------------------------------------------
import os
import sys
import time
import ConfigParser
import pandas as pd
import numpy as np
import theano
import theano.tensor as T
import cPickle
theano.config.floatX = 'float32'
base_path = os.path.dirname(__file__)
sys.path.insert(1,os.path.join(base_path, '../external'))
sys.path.insert(2,os.path.join(base_path, '../common'))
sys.path
from logistic_sgd import LogisticRegression
from mlp import HiddenLayer
from mlp_model import MLP_Model
from lenet import LeNetConvPoolLayer
from activation_functions import rectified_linear
class CNN_Model(object):
def __init__(self,
input,
batch_size,
patchSize,
rng,
nkerns,
kernelSizes,
hiddenSizes,
fileName=None,
activation=rectified_linear):
self.convLayers = []
self.trainingCost = []
self.validationError = []
self.nkerns = nkerns
self.kernelSizes = kernelSizes
self.hiddenSizes = hiddenSizes
self.patchSize = patchSize
self.batch_size = batch_size
input = input.reshape((self.batch_size, 1, self.patchSize, self.patchSize))
self.layer0_input = input
self.params = []
input_next = input
numberOfFeatureMaps = 1
featureMapSize = patchSize
for i in range(len(nkerns)):
layer = LeNetConvPoolLayer(
rng,
input=input_next,
image_shape=(batch_size, numberOfFeatureMaps, featureMapSize, featureMapSize),
filter_shape=(nkerns[i], numberOfFeatureMaps, kernelSizes[i], kernelSizes[i]),
poolsize=(2, 2)
)
input_next = layer.output
numberOfFeatureMaps = nkerns[i]
featureMapSize = np.int16(np.floor((featureMapSize - kernelSizes[i]+1) / 2))
self.params += layer.params
self.convLayers.append(layer)
# the 2 is there to preserve the batchSize
mlp_input = self.convLayers[-1].output.flatten(2)
self.mlp = MLP_Model(
rng=rng,
input=mlp_input,
n_in=nkerns[-1] * (featureMapSize ** 2),
n_hidden=hiddenSizes,
n_out=2,
activation=rectified_linear
)
self.params += self.mlp.params
self.cost = self.mlp.negative_log_likelihood
self.errors = self.mlp.errors
self.p_y_given_x = self.mlp.p_y_given_x
self.y_pred = self.mlp.y_pred
self.debug_x = self.p_y_given_x
if not fileName is None:
with open(fileName, 'r') as file:
saved_convLayers,
saved_hiddenLayers,
saved_logRegressionLayer,
self.trainingCost,
self.validationError,
saved_nkerns,
saved_kernelSizes,
saved_batch_size,
saved_patchSize,
saved_hiddenSizes = cPickle.load(file)
for s_cl, cl in zip(saved_convLayers, self.convLayers):
cl.W.set_value(s_cl.W.get_value())
cl.b.set_value(s_cl.b.get_value())
for s_hl, hl in zip(saved_hiddenLayers, self.mlp.hiddenLayers):
hl.W.set_value(np.float32(s_hl.W.eval()))
hl.b.set_value(s_hl.b.get_value())
self.mlp.logRegressionLayer.W.set_value(np.float32(saved_logRegressionLayer.W.eval()))
self.mlp.logRegressionLayer.b.set_value(saved_logRegressionLayer.b.get_value())
def save(self, filename):
with open(filename, 'wb') as file:
cPickle.dump((self.convLayers,
self.mlp.hiddenLayers,
self.mlp.logRegressionLayer,
self.trainingCost,
self.validationError,
self.nkerns,
self.kernelSizes,
self.batch_size,
self.patchSize,
self.hiddenSizes), file)
| mit |
bzero/statsmodels | statsmodels/graphics/tests/test_mosaicplot.py | 17 | 18878 | from __future__ import division
from statsmodels.compat.python import iterkeys, zip, lrange, iteritems, range
from numpy.testing import assert_, assert_raises, dec
from numpy.testing import run_module_suite
# utilities for the tests
from statsmodels.compat.collections import OrderedDict
from statsmodels.api import datasets
import numpy as np
from itertools import product
try:
import matplotlib.pyplot as pylab
have_matplotlib = True
except:
have_matplotlib = False
import pandas
pandas_old = int(pandas.__version__.split('.')[1]) < 9
# the main drawing function
from statsmodels.graphics.mosaicplot import mosaic
# other functions to be tested for accuracy
from statsmodels.graphics.mosaicplot import _hierarchical_split
from statsmodels.graphics.mosaicplot import _reduce_dict
from statsmodels.graphics.mosaicplot import _key_splitting
from statsmodels.graphics.mosaicplot import _normalize_split
from statsmodels.graphics.mosaicplot import _split_rect
@dec.skipif(not have_matplotlib or pandas_old)
def test_data_conversion():
# It will not reorder the elements
# so the dictionary will look odd
# as it key order has the c and b
# keys swapped
import pandas
fig, ax = pylab.subplots(4, 4)
data = {'ax': 1, 'bx': 2, 'cx': 3}
mosaic(data, ax=ax[0, 0], title='basic dict', axes_label=False)
data = pandas.Series(data)
mosaic(data, ax=ax[0, 1], title='basic series', axes_label=False)
data = [1, 2, 3]
mosaic(data, ax=ax[0, 2], title='basic list', axes_label=False)
data = np.asarray(data)
mosaic(data, ax=ax[0, 3], title='basic array', axes_label=False)
data = {('ax', 'cx'): 1, ('bx', 'cx'): 2, ('ax', 'dx'): 3, ('bx', 'dx'): 4}
mosaic(data, ax=ax[1, 0], title='compound dict', axes_label=False)
mosaic(data, ax=ax[2, 0], title='inverted keys dict', index=[1, 0], axes_label=False)
data = pandas.Series(data)
mosaic(data, ax=ax[1, 1], title='compound series', axes_label=False)
mosaic(data, ax=ax[2, 1], title='inverted keys series', index=[1, 0])
data = [[1, 2], [3, 4]]
mosaic(data, ax=ax[1, 2], title='compound list', axes_label=False)
mosaic(data, ax=ax[2, 2], title='inverted keys list', index=[1, 0])
data = np.array([[1, 2], [3, 4]])
mosaic(data, ax=ax[1, 3], title='compound array', axes_label=False)
mosaic(data, ax=ax[2, 3], title='inverted keys array', index=[1, 0], axes_label=False)
gender = ['male', 'male', 'male', 'female', 'female', 'female']
pet = ['cat', 'dog', 'dog', 'cat', 'dog', 'cat']
data = pandas.DataFrame({'gender': gender, 'pet': pet})
mosaic(data, ['gender'], ax=ax[3, 0], title='dataframe by key 1', axes_label=False)
mosaic(data, ['pet'], ax=ax[3, 1], title='dataframe by key 2', axes_label=False)
mosaic(data, ['gender', 'pet'], ax=ax[3, 2], title='both keys', axes_label=False)
mosaic(data, ['pet', 'gender'], ax=ax[3, 3], title='keys inverted', axes_label=False)
pylab.suptitle('testing data conversion (plot 1 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_mosaic_simple():
# display a simple plot of 4 categories of data, splitted in four
# levels with increasing size for each group
# creation of the levels
key_set = (['male', 'female'], ['old', 'adult', 'young'],
['worker', 'unemployed'], ['healty', 'ill'])
# the cartesian product of all the categories is
# the complete set of categories
keys = list(product(*key_set))
data = OrderedDict(zip(keys, range(1, 1 + len(keys))))
# which colours should I use for the various categories?
# put it into a dict
props = {}
#males and females in blue and red
props[('male',)] = {'color': 'b'}
props[('female',)] = {'color': 'r'}
# all the groups corresponding to ill groups have a different color
for key in keys:
if 'ill' in key:
if 'male' in key:
props[key] = {'color': 'BlueViolet' , 'hatch': '+'}
else:
props[key] = {'color': 'Crimson' , 'hatch': '+'}
# mosaic of the data, with given gaps and colors
mosaic(data, gap=0.05, properties=props, axes_label=False)
pylab.suptitle('syntetic data, 4 categories (plot 2 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib or pandas_old)
def test_mosaic():
# make the same analysis on a known dataset
# load the data and clean it a bit
affairs = datasets.fair.load_pandas()
datas = affairs.exog
# any time greater than 0 is cheating
datas['cheated'] = affairs.endog > 0
# sort by the marriage quality and give meaningful name
# [rate_marriage, age, yrs_married, children,
# religious, educ, occupation, occupation_husb]
datas = datas.sort(['rate_marriage', 'religious'])
num_to_desc = {1: 'awful', 2: 'bad', 3: 'intermediate',
4: 'good', 5: 'wonderful'}
datas['rate_marriage'] = datas['rate_marriage'].map(num_to_desc)
num_to_faith = {1: 'non religious', 2: 'poorly religious', 3: 'religious',
4: 'very religious'}
datas['religious'] = datas['religious'].map(num_to_faith)
num_to_cheat = {False: 'faithful', True: 'cheated'}
datas['cheated'] = datas['cheated'].map(num_to_cheat)
# finished cleaning
fig, ax = pylab.subplots(2, 2)
mosaic(datas, ['rate_marriage', 'cheated'], ax=ax[0, 0],
title='by marriage happiness')
mosaic(datas, ['religious', 'cheated'], ax=ax[0, 1],
title='by religiosity')
mosaic(datas, ['rate_marriage', 'religious', 'cheated'], ax=ax[1, 0],
title='by both', labelizer=lambda k:'')
ax[1, 0].set_xlabel('marriage rating')
ax[1, 0].set_ylabel('religion status')
mosaic(datas, ['religious', 'rate_marriage'], ax=ax[1, 1],
title='inter-dependence', axes_label=False)
pylab.suptitle("extramarital affairs (plot 3 of 4)")
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_mosaic_very_complex():
# make a scattermatrix of mosaic plots to show the correlations between
# each pair of variable in a dataset. Could be easily converted into a
# new function that does this automatically based on the type of data
key_name = ['gender', 'age', 'health', 'work']
key_base = (['male', 'female'], ['old', 'young'],
['healty', 'ill'], ['work', 'unemployed'])
keys = list(product(*key_base))
data = OrderedDict(zip(keys, range(1, 1 + len(keys))))
props = {}
props[('male', 'old')] = {'color': 'r'}
props[('female',)] = {'color': 'pink'}
L = len(key_base)
fig, axes = pylab.subplots(L, L)
for i in range(L):
for j in range(L):
m = set(range(L)).difference(set((i, j)))
if i == j:
axes[i, i].text(0.5, 0.5, key_name[i],
ha='center', va='center')
axes[i, i].set_xticks([])
axes[i, i].set_xticklabels([])
axes[i, i].set_yticks([])
axes[i, i].set_yticklabels([])
else:
ji = max(i, j)
ij = min(i, j)
temp_data = OrderedDict([((k[ij], k[ji]) + tuple(k[r] for r in m), v)
for k, v in iteritems(data)])
keys = list(iterkeys(temp_data))
for k in keys:
value = _reduce_dict(temp_data, k[:2])
temp_data[k[:2]] = value
del temp_data[k]
mosaic(temp_data, ax=axes[i, j], axes_label=False,
properties=props, gap=0.05, horizontal=i > j)
pylab.suptitle('old males should look bright red, (plot 4 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_axes_labeling():
from numpy.random import rand
key_set = (['male', 'female'], ['old', 'adult', 'young'],
['worker', 'unemployed'], ['yes', 'no'])
# the cartesian product of all the categories is
# the complete set of categories
keys = list(product(*key_set))
data = OrderedDict(zip(keys, rand(len(keys))))
lab = lambda k: ''.join(s[0] for s in k)
fig, (ax1, ax2) = pylab.subplots(1, 2, figsize=(16, 8))
mosaic(data, ax=ax1, labelizer=lab, horizontal=True, label_rotation=45)
mosaic(data, ax=ax2, labelizer=lab, horizontal=False,
label_rotation=[0, 45, 90, 0])
#fig.tight_layout()
fig.suptitle("correct alignment of the axes labels")
#pylab.show()
@dec.skipif(not have_matplotlib or pandas_old)
def test_mosaic_empty_cells():
# SMOKE test see #2286
import pandas as pd
mydata = pd.DataFrame({'id2': {64: 'Angelica',
65: 'DXW_UID', 66: 'casuid01',
67: 'casuid01', 68: 'EC93_uid',
69: 'EC93_uid', 70: 'EC93_uid',
60: 'DXW_UID', 61: 'AtmosFox',
62: 'DXW_UID', 63: 'DXW_UID'},
'id1': {64: 'TGP',
65: 'Retention01', 66: 'default',
67: 'default', 68: 'Musa_EC_9_3',
69: 'Musa_EC_9_3', 70: 'Musa_EC_9_3',
60: 'default', 61: 'default',
62: 'default', 63: 'default'}})
ct = pd.crosstab(mydata.id1, mydata.id2)
fig, vals = mosaic(ct.T.unstack())
fig, vals = mosaic(mydata, ['id1','id2'])
eq = lambda x, y: assert_(np.allclose(x, y))
def test_recursive_split():
keys = list(product('mf'))
data = OrderedDict(zip(keys, [1] * len(keys)))
res = _hierarchical_split(data, gap=0)
assert_(list(iterkeys(res)) == keys)
res[('m',)] = (0.0, 0.0, 0.5, 1.0)
res[('f',)] = (0.5, 0.0, 0.5, 1.0)
keys = list(product('mf', 'yao'))
data = OrderedDict(zip(keys, [1] * len(keys)))
res = _hierarchical_split(data, gap=0)
assert_(list(iterkeys(res)) == keys)
res[('m', 'y')] = (0.0, 0.0, 0.5, 1 / 3)
res[('m', 'a')] = (0.0, 1 / 3, 0.5, 1 / 3)
res[('m', 'o')] = (0.0, 2 / 3, 0.5, 1 / 3)
res[('f', 'y')] = (0.5, 0.0, 0.5, 1 / 3)
res[('f', 'a')] = (0.5, 1 / 3, 0.5, 1 / 3)
res[('f', 'o')] = (0.5, 2 / 3, 0.5, 1 / 3)
def test__reduce_dict():
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), [1] * 8))
eq(_reduce_dict(data, ('m',)), 4)
eq(_reduce_dict(data, ('m', 'o')), 2)
eq(_reduce_dict(data, ('m', 'o', 'w')), 1)
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), lrange(8)))
eq(_reduce_dict(data, ('m',)), 6)
eq(_reduce_dict(data, ('m', 'o')), 1)
eq(_reduce_dict(data, ('m', 'o', 'w')), 0)
def test__key_splitting():
# subdivide starting with an empty tuple
base_rect = {tuple(): (0, 0, 1, 1)}
res = _key_splitting(base_rect, ['a', 'b'], [1, 1], tuple(), True, 0)
assert_(list(iterkeys(res)) == [('a',), ('b',)])
eq(res[('a',)], (0, 0, 0.5, 1))
eq(res[('b',)], (0.5, 0, 0.5, 1))
# subdivide a in two sublevel
res_bis = _key_splitting(res, ['c', 'd'], [1, 1], ('a',), False, 0)
assert_(list(iterkeys(res_bis)) == [('a', 'c'), ('a', 'd'), ('b',)])
eq(res_bis[('a', 'c')], (0.0, 0.0, 0.5, 0.5))
eq(res_bis[('a', 'd')], (0.0, 0.5, 0.5, 0.5))
eq(res_bis[('b',)], (0.5, 0, 0.5, 1))
# starting with a non empty tuple and uneven distribution
base_rect = {('total',): (0, 0, 1, 1)}
res = _key_splitting(base_rect, ['a', 'b'], [1, 2], ('total',), True, 0)
assert_(list(iterkeys(res)) == [('total',) + (e,) for e in ['a', 'b']])
eq(res[('total', 'a')], (0, 0, 1 / 3, 1))
eq(res[('total', 'b')], (1 / 3, 0, 2 / 3, 1))
def test_proportion_normalization():
# extremes should give the whole set, as well
# as if 0 is inserted
eq(_normalize_split(0.), [0.0, 0.0, 1.0])
eq(_normalize_split(1.), [0.0, 1.0, 1.0])
eq(_normalize_split(2.), [0.0, 1.0, 1.0])
# negative values should raise ValueError
assert_raises(ValueError, _normalize_split, -1)
assert_raises(ValueError, _normalize_split, [1., -1])
assert_raises(ValueError, _normalize_split, [1., -1, 0.])
# if everything is zero it will complain
assert_raises(ValueError, _normalize_split, [0.])
assert_raises(ValueError, _normalize_split, [0., 0.])
# one-element array should return the whole interval
eq(_normalize_split([0.5]), [0.0, 1.0])
eq(_normalize_split([1.]), [0.0, 1.0])
eq(_normalize_split([2.]), [0.0, 1.0])
# simple division should give two pieces
for x in [0.3, 0.5, 0.9]:
eq(_normalize_split(x), [0., x, 1.0])
# multiple division should split as the sum of the components
for x, y in [(0.25, 0.5), (0.1, 0.8), (10., 30.)]:
eq(_normalize_split([x, y]), [0., x / (x + y), 1.0])
for x, y, z in [(1., 1., 1.), (0.1, 0.5, 0.7), (10., 30., 40)]:
eq(_normalize_split(
[x, y, z]), [0., x / (x + y + z), (x + y) / (x + y + z), 1.0])
def test_false_split():
# if you ask it to be divided in only one piece, just return the original
# one
pure_square = [0., 0., 1., 1.]
conf_h = dict(proportion=[1], gap=0.0, horizontal=True)
conf_v = dict(proportion=[1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_h), pure_square)
eq(_split_rect(*pure_square, **conf_v), pure_square)
conf_h = dict(proportion=[1], gap=0.5, horizontal=True)
conf_v = dict(proportion=[1], gap=0.5, horizontal=False)
eq(_split_rect(*pure_square, **conf_h), pure_square)
eq(_split_rect(*pure_square, **conf_v), pure_square)
# identity on a void rectangle should not give anything strange
null_square = [0., 0., 0., 0.]
conf = dict(proportion=[1], gap=0.0, horizontal=True)
eq(_split_rect(*null_square, **conf), null_square)
conf = dict(proportion=[1], gap=1.0, horizontal=True)
eq(_split_rect(*null_square, **conf), null_square)
# splitting a negative rectangle should raise error
neg_square = [0., 0., -1., 0.]
conf = dict(proportion=[1], gap=0.0, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1, 1], gap=0.0, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1], gap=0.5, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1, 1], gap=0.5, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
def test_rect_pure_split():
pure_square = [0., 0., 1., 1.]
# division in two equal pieces from the perfect square
h_2split = [(0.0, 0.0, 0.5, 1.0), (0.5, 0.0, 0.5, 1.0)]
conf_h = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 0.5), (0.0, 0.5, 1.0, 0.5)]
conf_v = dict(proportion=[1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in two non-equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 3, 1.0), (1 / 3, 0.0, 2 / 3, 1.0)]
conf_h = dict(proportion=[1, 2], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 3), (0.0, 1 / 3, 1.0, 2 / 3)]
conf_v = dict(proportion=[1, 2], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in three equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 3, 1.0), (1 / 3, 0.0, 1 / 3, 1.0), (2 / 3, 0.0,
1 / 3, 1.0)]
conf_h = dict(proportion=[1, 1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 3), (0.0, 1 / 3, 1.0, 1 / 3), (0.0, 2 / 3,
1.0, 1 / 3)]
conf_v = dict(proportion=[1, 1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in three non-equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 4, 1.0), (1 / 4, 0.0, 1 / 2, 1.0), (3 / 4, 0.0,
1 / 4, 1.0)]
conf_h = dict(proportion=[1, 2, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 4), (0.0, 1 / 4, 1.0, 1 / 2), (0.0, 3 / 4,
1.0, 1 / 4)]
conf_v = dict(proportion=[1, 2, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# splitting on a void rectangle should give multiple void
null_square = [0., 0., 0., 0.]
conf = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*null_square, **conf), [null_square, null_square])
conf = dict(proportion=[1, 2], gap=1.0, horizontal=True)
eq(_split_rect(*null_square, **conf), [null_square, null_square])
def test_rect_deformed_split():
non_pure_square = [1., -1., 1., 0.5]
# division in two equal pieces from the perfect square
h_2split = [(1.0, -1.0, 0.5, 0.5), (1.5, -1.0, 0.5, 0.5)]
conf_h = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*non_pure_square, **conf_h), h_2split)
v_2split = [(1.0, -1.0, 1.0, 0.25), (1.0, -0.75, 1.0, 0.25)]
conf_v = dict(proportion=[1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*non_pure_square, **conf_v), v_2split)
# division in two non-equal pieces from the perfect square
h_2split = [(1.0, -1.0, 1 / 3, 0.5), (1 + 1 / 3, -1.0, 2 / 3, 0.5)]
conf_h = dict(proportion=[1, 2], gap=0.0, horizontal=True)
eq(_split_rect(*non_pure_square, **conf_h), h_2split)
v_2split = [(1.0, -1.0, 1.0, 1 / 6), (1.0, 1 / 6 - 1, 1.0, 2 / 6)]
conf_v = dict(proportion=[1, 2], gap=0.0, horizontal=False)
eq(_split_rect(*non_pure_square, **conf_v), v_2split)
def test_gap_split():
pure_square = [0., 0., 1., 1.]
# null split
conf_h = dict(proportion=[1], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), pure_square)
# equal split
h_2split = [(0.0, 0.0, 0.25, 1.0), (0.75, 0.0, 0.25, 1.0)]
conf_h = dict(proportion=[1, 1], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
# disequal split
h_2split = [(0.0, 0.0, 1 / 6, 1.0), (0.5 + 1 / 6, 0.0, 1 / 3, 1.0)]
conf_h = dict(proportion=[1, 2], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
def test_default_arg_index():
# 2116
import pandas as pd
df = pd.DataFrame({'size' : ['small', 'large', 'large', 'small', 'large',
'small'],
'length' : ['long', 'short', 'short', 'long', 'long',
'short']})
assert_raises(ValueError, mosaic, data=df, title='foobar')
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
mengli/MachineLearning | kaggle/TalkingData/talking_data.py | 2 | 12465 | import pandas as pd
import time
import numpy as np
import lightgbm as lgb
import gc
import os
def do_count(df, group_cols, agg_name, agg_type='uint32', show_max=False, show_agg=True):
if show_agg:
print("Aggregating by ", group_cols, '...')
gp = df[group_cols][group_cols].groupby(group_cols).size().rename(agg_name).to_frame().reset_index()
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print(agg_name + " max value = ", df[agg_name].max())
df[agg_name] = df[agg_name].astype(agg_type)
gc.collect()
return (df)
def do_countuniq(df, group_cols, counted, agg_name, agg_type='uint32', show_max=False, show_agg=True):
if show_agg:
print("Counting unqiue ", counted, " by ", group_cols, '...')
gp = df[group_cols + [counted]].groupby(group_cols)[counted].nunique().reset_index().rename(
columns={counted: agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print(agg_name + " max value = ", df[agg_name].max())
df[agg_name] = df[agg_name].astype(agg_type)
gc.collect()
return (df)
def do_cumcount(df, group_cols, counted, agg_name, agg_type='uint32', show_max=False, show_agg=True):
if show_agg:
print("Cumulative count by ", group_cols, '...')
gp = df[group_cols + [counted]].groupby(group_cols)[counted].cumcount()
df[agg_name] = gp.values
del gp
if show_max:
print(agg_name + " max value = ", df[agg_name].max())
df[agg_name] = df[agg_name].astype(agg_type)
gc.collect()
return (df)
def do_mean(df, group_cols, counted, agg_name, agg_type='float32', show_max=False, show_agg=True):
if show_agg:
print("Calculating mean of ", counted, " by ", group_cols, '...')
gp = df[group_cols + [counted]].groupby(group_cols)[counted].mean().reset_index().rename(
columns={counted: agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print(agg_name + " max value = ", df[agg_name].max())
df[agg_name] = df[agg_name].astype(agg_type)
gc.collect()
return (df)
def do_var(df, group_cols, counted, agg_name, agg_type='float32', show_max=False, show_agg=True):
if show_agg:
print("Calculating variance of ", counted, " by ", group_cols, '...')
gp = df[group_cols + [counted]].groupby(group_cols)[counted].var().reset_index().rename(columns={counted: agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print(agg_name + " max value = ", df[agg_name].max())
df[agg_name] = df[agg_name].astype(agg_type)
gc.collect()
return (df)
def lgb_modelfit_nocv(params, dtrain, dvalid, predictors, target='target', objective='binary', metrics='auc',
feval=None, early_stopping_rounds=20, num_boost_round=3000, verbose_eval=10,
categorical_features=None):
lgb_params = {
'boosting_type': 'gbdt',
'objective': objective,
'metric': metrics,
'learning_rate': 0.2,
'num_leaves': 31, # we should let it be smaller than 2^(max_depth)
'max_depth': -1, # -1 means no limit
'min_child_samples': 20, # Minimum number of data need in a child(min_data_in_leaf)
'max_bin': 255, # Number of bucketed bin for feature values
'subsample': 0.6, # Subsample ratio of the training instance.
'subsample_freq': 0, # frequence of subsample, <=0 means no enable
'colsample_bytree': 0.3, # Subsample ratio of columns when constructing each tree.
'min_child_weight': 5, # Minimum sum of instance weight(hessian) needed in a child(leaf)
'subsample_for_bin': 200000, # Number of samples for constructing bin
'min_split_gain': 0, # lambda_l1, lambda_l2 and min_gain_to_split to regularization
'reg_alpha': 0, # L1 regularization term on weights
'reg_lambda': 0, # L2 regularization term on weights
'nthread': 4,
'verbose': 0,
'metric': metrics
}
lgb_params.update(params)
print("preparing validation datasets")
xgtrain = lgb.Dataset(dtrain[predictors].values, label=dtrain[target].values,
feature_name=predictors,
categorical_feature=categorical_features
)
xgvalid = lgb.Dataset(dvalid[predictors].values, label=dvalid[target].values,
feature_name=predictors,
categorical_feature=categorical_features
)
evals_results = {}
bst1 = lgb.train(lgb_params,
xgtrain,
valid_sets=[xgtrain, xgvalid],
valid_names=['train', 'valid'],
evals_result=evals_results,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=10,
feval=feval)
print("\nModel Report")
print("bst1.best_iteration: ", bst1.best_iteration)
print(metrics + ":", evals_results['valid'][metrics][bst1.best_iteration - 1])
return (bst1, bst1.best_iteration)
def DO(frm, to, fileno):
dtypes = {
'ip': 'uint32',
'app': 'uint16',
'device': 'uint16',
'os': 'uint16',
'channel': 'uint16',
'is_attributed': 'uint8',
'click_id': 'uint32',
}
print('loading train data...', frm, to)
train_df = pd.read_csv("./train.csv", parse_dates=['click_time'], skiprows=range(1, frm), nrows=to - frm,
dtype=dtypes,
usecols=['ip', 'app', 'device', 'os', 'channel', 'click_time', 'is_attributed'])
print('loading test data...')
test_df = pd.read_csv("./test.csv", parse_dates=['click_time'], dtype=dtypes,
usecols=['ip', 'app', 'device', 'os', 'channel', 'click_time', 'click_id'])
len_train = len(train_df)
train_df = train_df.append(test_df)
del test_df
gc.collect()
print('Extracting new features...')
train_df['hour'] = pd.to_datetime(train_df.click_time).dt.hour.astype('uint8')
train_df['day'] = pd.to_datetime(train_df.click_time).dt.day.astype('uint8')
gc.collect()
train_df = do_countuniq(train_df, ['ip'], 'channel', 'X0', 'uint8', show_max=True)
gc.collect()
train_df = do_cumcount(train_df, ['ip', 'device', 'os'], 'app', 'X1', show_max=True)
gc.collect()
train_df = do_countuniq(train_df, ['ip', 'day'], 'hour', 'X2', 'uint8', show_max=True)
gc.collect()
train_df = do_countuniq(train_df, ['ip'], 'app', 'X3', 'uint8', show_max=True)
gc.collect()
train_df = do_countuniq(train_df, ['ip', 'app'], 'os', 'X4', 'uint8', show_max=True)
gc.collect()
train_df = do_countuniq(train_df, ['ip'], 'device', 'X5', 'uint16', show_max=True)
gc.collect()
train_df = do_countuniq(train_df, ['app'], 'channel', 'X6', show_max=True)
gc.collect()
train_df = do_cumcount(train_df, ['ip'], 'os', 'X7', show_max=True)
gc.collect()
train_df = do_countuniq(train_df, ['ip', 'device', 'os'], 'app', 'X8', show_max=True)
gc.collect()
train_df = do_count(train_df, ['ip', 'day', 'hour'], 'ip_tcount', show_max=True)
gc.collect()
train_df = do_count(train_df, ['ip', 'app'], 'ip_app_count', show_max=True)
gc.collect()
train_df = do_count(train_df, ['ip', 'app', 'os'], 'ip_app_os_count', 'uint16', show_max=True)
gc.collect()
train_df = do_var(train_df, ['ip', 'day', 'channel'], 'hour', 'ip_tchan_count', show_max=True)
gc.collect()
train_df = do_var(train_df, ['ip', 'app', 'os'], 'hour', 'ip_app_os_var', show_max=True)
gc.collect()
train_df = do_var(train_df, ['ip', 'app', 'channel'], 'day', 'ip_app_channel_var_day', show_max=True)
gc.collect()
train_df = do_mean(train_df, ['ip', 'app', 'channel'], 'hour', 'ip_app_channel_mean_hour', show_max=True)
gc.collect()
print('doing nextClick')
predictors = []
new_feature = 'nextClick'
filename = 'nextClick_%d_%d.csv' % (frm, to)
if os.path.exists(filename):
print('loading from save file')
QQ = pd.read_csv(filename).values
else:
D = 2 ** 26
train_df['category'] = (train_df['ip'].astype(str) + "_" + train_df['app'].astype(str) + "_" + train_df[
'device'].astype(str) + "_" + train_df['os'].astype(str)).apply(hash) % D
click_buffer = np.full(D, 3000000000, dtype=np.uint32)
train_df['epochtime'] = train_df['click_time'].astype(np.int64) // 10 ** 9
next_clicks = []
for category, t in zip(reversed(train_df['category'].values), reversed(train_df['epochtime'].values)):
next_clicks.append(click_buffer[category] - t)
click_buffer[category] = t
del (click_buffer)
QQ = list(reversed(next_clicks))
print('saving')
pd.DataFrame(QQ).to_csv(filename, index=False)
train_df.drop(['epochtime', 'category', 'click_time'], axis=1, inplace=True)
train_df[new_feature] = pd.Series(QQ).astype('float32')
predictors.append(new_feature)
train_df[new_feature + '_shift'] = train_df[new_feature].shift(+1).values
predictors.append(new_feature + '_shift')
del QQ
gc.collect()
print("vars and data type: ")
train_df.info()
train_df['ip_tcount'] = train_df['ip_tcount'].astype('uint16')
train_df['ip_app_count'] = train_df['ip_app_count'].astype('uint16')
train_df['ip_app_os_count'] = train_df['ip_app_os_count'].astype('uint16')
target = 'is_attributed'
predictors.extend(['app', 'device', 'os', 'channel', 'hour', 'day',
'ip_tcount', 'ip_tchan_count', 'ip_app_count',
'ip_app_os_count', 'ip_app_os_var',
'ip_app_channel_var_day', 'ip_app_channel_mean_hour',
'X0', 'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X7', 'X8'])
categorical = ['app', 'device', 'os', 'channel', 'hour', 'day']
print('predictors', predictors)
test_df = train_df[len_train:]
val_df = train_df[(len_train - val_size):len_train]
train_df = train_df[:(len_train - val_size)]
print("train size: ", len(train_df))
print("valid size: ", len(val_df))
print("test size : ", len(test_df))
sub = pd.DataFrame()
sub['click_id'] = test_df['click_id'].astype('int')
gc.collect()
print("Training...")
start_time = time.time()
params = {
'learning_rate': 0.20,
'num_leaves': 7, # 2^max_depth - 1
'max_depth': 3, # -1 means no limit
'min_child_samples': 100, # Minimum number of data need in a child(min_data_in_leaf)
'max_bin': 100, # Number of bucketed bin for feature values
'subsample': 0.7, # Subsample ratio of the training instance.
'subsample_freq': 1, # frequence of subsample, <=0 means no enable
'colsample_bytree': 0.9, # Subsample ratio of columns when constructing each tree.
'min_child_weight': 0, # Minimum sum of instance weight(hessian) needed in a child(leaf)
'scale_pos_weight': 200 # because training data is extremely unbalanced
}
(bst, best_iteration) = lgb_modelfit_nocv(params,
train_df,
val_df,
predictors,
target,
objective='binary',
metrics='auc',
early_stopping_rounds=30,
verbose_eval=True,
num_boost_round=1000,
categorical_features=categorical)
print('[{}]: model training time'.format(time.time() - start_time))
del train_df
del val_df
gc.collect()
print("Predicting...")
sub['is_attributed'] = bst.predict(test_df[predictors], num_iteration=best_iteration)
print("writing...")
sub.to_csv('sub_it%d.csv' % (fileno), index=False, float_format='%.9f')
print("done...")
return sub
nrows = 184903891 - 1
nchunk = 25000000
val_size = 2500000
frm = nrows - 75000000
to = frm + nchunk
sub = DO(frm, to, 0)
| apache-2.0 |
annayqho/TheCannon | code/lamost/xcalib_5labels/paper_plots/plot_survey_coverage_unknown.py | 1 | 5470 | #!/usr/bin/env python
import numpy as np
import healpy as hp
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import rc
from matplotlib import rcParams
from matplotlib.colors import LogNorm
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
import pyfits
# import the data
hdulist = pyfits.open("../make_lamost_catalog/lamost_catalog_full.fits")
tbdata = hdulist[1].data
# cols = hdulist[1].columns
# cols.names
ra_lamost = tbdata.field('ra')
dec_lamost = tbdata.field('dec')
am_lamost = tbdata.field("cannon_a_k")
rmag_lamost = tbdata.field("mag3")
hdulist.close()
hdulist = pyfits.open("/home/annaho/aida41040/annaho/TheCannon/examples/example_DR12/allStar-v603.fits")
tbdata = hdulist[1].data
ra_apogee_all = tbdata['RA']
dec_apogee_all = tbdata['DEC']
am_apogee_all = tbdata['PARAM_ALPHA_M']
am_apogee_all = tbdata['AK_WISE']
good_coords = np.logical_and(ra_apogee_all > -90, dec_apogee_all > -90)
good = np.logical_and(good_coords, am_apogee_all > -90)
ra_apogee = ra_apogee_all[good]
dec_apogee = dec_apogee_all[good]
am_apogee = am_apogee_all[good]
hdulist.close()
ra_both = np.hstack((ra_apogee, ra_lamost))
dec_both = np.hstack((dec_apogee, dec_lamost))
am_all = np.hstack((am_apogee, am_lamost))
# create a RA and Dec grid
ra_all = []
dec_all = []
for ra in np.arange(0, 360, 0.5):
for dec in np.arange(-90, 90, 0.5):
ra_all.append(ra)
dec_all.append(dec)
ra = np.array(ra_all)
dec = np.array(dec_all)
# convert RA and Dec to phi and theta coordinates
def toPhiTheta(ra, dec):
phi = ra * np.pi/180.
theta = (90.0 - dec) * np.pi / 180.
return phi, theta
phi, theta = toPhiTheta(ra, dec)
phi_lamost, theta_lamost = toPhiTheta(ra_lamost, dec_lamost)
phi_apogee, theta_apogee = toPhiTheta(ra_apogee, dec_apogee)
phi_all, theta_all = toPhiTheta(ra_both, dec_both)
# to just plot all points, do
#hp.visufunc.projplot(theta, phi, 'bo')
#hp.visufunc.projplot(theta_lamost, phi_lamost, 'bo')
#hp.visufunc.graticule() # just the bare background w/ lines
# more examples are here
# https://healpy.readthedocs.org/en/latest/generated/healpy.visufunc.projplot.html#healpy.visufunc.projplot
## to plot a 2D histogram in the Mollweide projection
# define the HEALPIX level
# NSIDE = 32 # defines the resolution of the map
NSIDE = 128
# find the pixel ID for each point
# pix = hp.pixelfunc.ang2pix(NSIDE, theta, phi)
pix_lamost = hp.pixelfunc.ang2pix(NSIDE, theta_lamost, phi_lamost)
pix_apogee = hp.pixelfunc.ang2pix(NSIDE, theta_apogee, phi_apogee)
pix_all = hp.pixelfunc.ang2pix(NSIDE, theta_all, phi_all)
# pix is in the order of ra and dec
# prepare the map array
m_lamost = hp.ma(np.zeros(hp.nside2npix(NSIDE), dtype='float'))
mask_lamost = np.zeros(hp.nside2npix(NSIDE), dtype='bool')
for pix_val in np.unique(pix_lamost):
choose = np.where(pix_lamost==pix_val)[0]
if len(choose) == 1:
#m_lamost[pix_val] = rmag_lamost[choose[0]]
m_lamost[pix_val] = am_lamost[choose[0]]
else:
#m_lamost[pix_val] = np.median(rmag_lamost[choose])
m_lamost[pix_val] = np.median(am_lamost[choose])
mask_lamost[np.setdiff1d(np.arange(len(m_lamost)), pix_lamost)] = 1
m_lamost.mask = mask_lamost
m_apogee= hp.ma(np.zeros(hp.nside2npix(NSIDE), dtype='float'))
mask_apogee= np.zeros(hp.nside2npix(NSIDE), dtype='bool')
for pix_val in np.unique(pix_apogee):
choose = np.where(pix_apogee==pix_val)[0]
if len(choose) == 1:
m_apogee[pix_val] = am_apogee[choose[0]]
else:
m_apogee[pix_val] = np.median(am_apogee[choose])
mask_apogee[np.setdiff1d(np.arange(len(m_apogee)), pix_apogee)] = 1
m_apogee.mask = mask_apogee
m_all = hp.ma(np.zeros(hp.nside2npix(NSIDE), dtype='float'))
mask_all= np.zeros(hp.nside2npix(NSIDE), dtype='bool')
for pix_val in np.unique(pix_all):
choose = np.where(pix_all==pix_val)[0]
if len(choose) == 1:
m_all[pix_val] = am_all[choose[0]]
else:
m_all[pix_val] = np.median(am_all[choose])
mask_all[np.setdiff1d(np.arange(len(m_all)), pix_all)] = 1
m_all.mask = mask_all
# perceptually uniform: inferno, viridis, plasma, magma
cmap=cm.magma
cmap.set_under('w')
# composite map
# plot map ('C' means the input coordinates were in the equatorial system)
rcParams.update({'font.size':16})
#hp.visufunc.mollview(m_apogee, coord=['C','G'], rot=(150, 0, 0), flip='astro',
# notext=True, title=r'$\alpha$/M for APOGEE DR12', cbar=True,
# norm=None, min=-0.07, max=0.3, cmap=cmap, unit = r'$\alpha$/M [dex]')
#hp.visufunc.mollview(m_lamost, coord=['C','G'], rot=(150, 0, 0), flip='astro',
# notext=True, title=r'$\alpha$/M for 500,000 LAMOST giants', cbar=True,
# norm=None, min=-0.07, max=0.3, cmap=cmap, unit = r'$\alpha$/M [dex]')
#notext=True, title="r-band magnitude for 500,000 LAMOST giants", cbar=True,
#norm=None, min=11, max=17, cmap=cmap, unit = r"r-band magnitude [mag]")
hp.visufunc.mollview(m_all, coord=['C','G'], rot=(150, 0, 0), flip='astro',
notext=True, title=r'$\alpha$/M for APOGEE DR12 + 500,000 LAMOST giants', cbar=True,
norm=None, min=0.00, max=0.4, cmap=cmap, unit = r'$\alpha$/M [dex]')
hp.visufunc.graticule()
#plt.show()
plt.savefig("full_ak_map.png")
#plt.savefig("apogee_am_map.png")
#plt.savefig("lamost_am_map_magma.png")
#plt.savefig("lamost_rmag_map.png")
| mit |
sonnyhu/scipy | scipy/interpolate/tests/test_rbf.py | 41 | 4367 | #!/usr/bin/env python
# Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal, run_module_suite)
from numpy import linspace, sin, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (1D)
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_rbf2d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (2D).
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
def check_rbf3d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (3D).
x = random.rand(50, 1)*4 - 2
y = random.rand(50, 1)*4 - 2
z = random.rand(50, 1)*4 - 2
d = x*exp(-x**2 - y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
def test_rbf_interpolation():
for function in FUNCTIONS:
yield check_rbf1d_interpolation, function
yield check_rbf2d_interpolation, function
yield check_rbf3d_interpolation, function
def check_rbf1d_regularity(function, atol):
# Check that the Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-')
# plt.plot(x, y, 'o', xi, yi-sin(xi), ':')
# plt.title(function)
# plt.show()
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2)
def check_rbf1d_stability(function):
# Check that the Rbf function with default epsilon is not subject
# to overshoot. Regression for issue #4523.
#
# Generate some data (fixed random seed hence deterministic)
np.random.seed(1234)
x = np.linspace(0, 10, 50)
z = x + 4.0 * np.random.randn(len(x))
rbf = Rbf(x, z, function=function)
xi = np.linspace(0, 10, 1000)
yi = rbf(xi)
# subtract the linear trend and make sure there no spikes
assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
def test_rbf_stability():
for function in FUNCTIONS:
yield check_rbf1d_stability, function
def test_default_construction():
# Check that the Rbf class can be constructed with the default
# multiquadric basis function. Regression test for ticket #1228.
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
# Check that the Rbf class can be constructed with function=callable.
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
# Check that the Rbf class can be constructed with a two argument
# function=callable.
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, epsilon=None)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
hipstas/audio-tagging-toolkit | attk/excerpt_class.py | 2 | 4744 | #!/usr/bin/python
import os
import sys, getopt
import time, datetime
import subprocess
#here = os.path.abspath(os.path.dirname(__file__))
#sys.path.append(here)
def excerpt_class(media_path,csv_path,out_dir='',class_to_excerpt=1):
try:
basename=media_path.split('/')[-1][:-4]
except:
return("*** Problem loading basenames ***")
try:
from pydub import AudioSegment
import pandas as pd
import numpy as np
tag_data = pd.read_csv(csv_path,header=None)
except:
print("*** Empty or missing tag CSV. ***")
return("*** Empty or missing tag CSV. ***")
try:
includes_label=False
if len(tag_data.iloc[0])==3:
tag_data.columns=["Start","Class","Duration"]
elif len(tag_data.iloc[0])==4:
tag_data.columns=["Start","Class","Duration","Label"]
includes_label=True
if os.path.exists(media_path):
if media_path.lower()[-4:].lower() in ('.wav','.mp3','.mp4'):
if out_dir=='':
out_dir='/'.join(media_path.split('/')[:-1])+'/'
basename = media_path.split('/')[-1][:-4]
tag_data_relevant=tag_data[tag_data['Class']==class_to_excerpt]
tag_data_relevant.reset_index(inplace=True)
audio_source=True
if media_path.lower()[-4:]=='.mp4': # Creates a temporary WAV
audio_source=False # if input is MP4
temp_filename=media_path.split('/')[-1]+'_temp.wav'
audio_path='/var/tmp/'+temp_filename # Pathname for temp WAV
subprocess.call(['ffmpeg', '-y', '-i', media_path, audio_path]) # '-y' option overwrites existing file if present
else:
audio_path=media_path
song=None
try:
if media_path[-4:].lower()=='.mp3':
song = AudioSegment.from_mp3(audio_path)
else:
song = AudioSegment.from_wav(audio_path)
except:
print("Error loading audio with pyDub.")
return("Error loading audio with pyDub.")
#### Batch extracting specified WAV clips ###
for i in range(len(tag_data_relevant)):
#print("*** Extracting file "+str(i)+" of "+str(len(tag_data_relevant))+". ***\n")
#create_tag_excerpt(tag_data_relevant.iloc[i],audio_path,song,basename,out_dirincludes_label)
row=tag_data_relevant.iloc[i]
start = row['Start']
duration = row['Duration']
start_msec = float(start) * 1000.0
duration_msec = float(duration) * 1000
if includes_label==False:
clip_pathname=os.path.join(out_dir, basename+"_start_"+str(start)[:6]+"_dur_"+str(duration)[:6]+'_class_'+str(class_to_excerpt)+'.wav')
else:
clip_pathname=os.path.join(out_dir+basename+"_start_"+str(start)[:6]+"_dur_"+str(duration)[:6]+'_class_'+str(class_to_excerpt)+'_label_'+str(row['Label'])+'.wav')
if not os.path.exists(clip_pathname):
clip_data = song[start_msec:start_msec+duration_msec]
#clip_data=clip_data.set_channels(1)
clip_data.export(clip_pathname, format="wav")
if audio_source==False:
os.remove(audio_path)
#print("*** All segments extracted! ***")
else: print("\n**Error: Not an acceptable media format. **\n")
else: print("\n**Error: Audio file does not exist. **\n")
except Exception as e: print(e)
def main(argv):
media_path = ''
class_to_excerpt = 1
csv_path=''
out_dir=''
audio_source=True
try:
opts, args = getopt.getopt(argv[1:],"hi:t:e:o:",["ifile="])
except getopt.GetoptError:
print("")
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print("")
sys.exit()
elif opt in ("-i", "--ifile"):
media_path = arg
#print(arg)
elif opt in ("-e", "--excerptclass"):
class_to_excerpt = int(arg)
#print(arg)
elif opt in ("-t", "--tags"):
csv_path=arg
#print(csv_path)
elif opt in ("-o", "--outdir"):
out_dir=arg
print("*** Audio output directory: "+out_dir)
excerpt_class(media_path,csv_path,out_dir,class_to_excerpt)
if __name__ == "__main__":
main(sys.argv)
| mit |
JPFrancoia/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 110 | 3768 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
lanselin/pysal | pysal/weights/tests/test_Distance.py | 5 | 11160 | from pysal.weights import Distance as d
from pysal.weights.util import get_points_array
from pysal.weights import Contiguity as c
from pysal.common import RTOL, ATOL
from pysal.cg.kdtree import KDTree
import numpy as np
import pysal as ps
import unittest as ut
PANDAS_EXTINCT = ps.common.pandas is None
# All instances should test these four methods, and define their own functional
# tests based on common codepaths/estimated weights use cases.
class Distance_Mixin(object):
polygon_path = ps.examples.get_path('columbus.shp')
arc_path = ps.examples.get_path('stl_hom.shp')
points = [(10, 10), (20, 10), (40, 10),
(15, 20), (30, 20), (30, 30)]
euclidean_kdt = ps.cg.KDTree(points, distance_metric='euclidean')
polygon_f = ps.open(polygon_path) # our file handler
poly_centroids = get_points_array(polygon_f) # our iterable
polygon_f.seek(0) #go back to head of file
arc_f = ps.open(arc_path)
ps.cg.sphere.arcdist
arc_points = get_points_array(arc_f)
arc_f.seek(0)
arc_kdt = ps.cg.KDTree(arc_points, distance_metric='Arc',
radius=ps.cg.sphere.RADIUS_EARTH_KM)
cls = object # class constructor
known_wi = None #index of known w entry to compare
known_w = dict() #actual w entry
known_name = known_wi
def setUp(self):
self.__dict__.update({k:v for k,v in Distance_Mixin.__dict__.items()
if not k.startswith('_')})
def test_init(self):
# test vanilla, named
raise NotImplementedError('You need to implement this test '
'before this module will pass')
def test_from_shapefile(self):
# test vanilla, named, sparse
raise NotImplementedError('You need to implement this test '
'before this module will pass')
def test_from_array(self):
# test named, sparse
raise NotImplementedError('You need to implement this test '
'before this module will pass')
def test_from_dataframe(self):
# test named, columnar, defau
raise NotImplementedError('You need to implement this test '
'before this module will pass')
class Test_KNN(ut.TestCase, Distance_Mixin):
def setUp(self):
Distance_Mixin.setUp(self)
self.known_wi0 = 7
self.known_w0 = [3, 6, 12, 11]
self.known_wi1 = 0
self.known_w1 = [2, 1, 3 ,7]
self.known_wi2 = 4
self.known_w2 = [1, 3, 9, 12]
self.known_wi3 = 40
self.known_w3 = [31, 38, 45, 49]
##########################
# Classmethod tests #
##########################
def test_init(self):
w = d.KNN(self.euclidean_kdt, k=2)
self.assertEqual(w.neighbors[0], [1,3])
@ut.skipIf(PANDAS_EXTINCT, 'Missing pandas')
def test_from_dataframe(self):
df = ps.pdio.read_files(self.polygon_path)
w = d.KNN.from_dataframe(df, k=4)
self.assertEqual(w.neighbors[self.known_wi0], self.known_w0)
self.assertEqual(w.neighbors[self.known_wi1], self.known_w1)
def test_from_array(self):
w = d.KNN.from_array(self.poly_centroids, k=4)
self.assertEqual(w.neighbors[self.known_wi0], self.known_w0)
self.assertEqual(w.neighbors[self.known_wi1], self.known_w1)
def test_from_shapefile(self):
w = d.KNN.from_shapefile(self.polygon_path, k=4)
self.assertEqual(w.neighbors[self.known_wi0], self.known_w0)
self.assertEqual(w.neighbors[self.known_wi1], self.known_w1)
##########################
# Function/User tests #
##########################
def test_reweight(self):
w = d.KNN(self.points, k=2)
new_point = [(21,21)]
wnew = w.reweight(k=4, p=1, new_data=new_point, inplace=False)
self.assertEqual(wnew[0], {1: 1.0, 3: 1.0, 4: 1.0, 6: 1.0})
class Test_DistanceBand(ut.TestCase, Distance_Mixin):
def setUp(self):
Distance_Mixin.setUp(self)
self.grid_path = ps.examples.get_path('lattice10x10.shp')
self.grid_rook_w = c.Rook.from_shapefile(self.grid_path)
self.grid_f = ps.open(self.grid_path)
self.grid_points = get_points_array(self.grid_f)
self.grid_f.seek(0)
self.grid_kdt = KDTree(self.grid_points)
##########################
# Classmethod tests #
##########################
def test_init(self):
w = d.DistanceBand(self.grid_kdt, 1)
for k,v in w:
self.assertEquals(v, self.grid_rook_w[k])
def test_from_shapefile(self):
w = d.DistanceBand.from_shapefile(self.grid_path, 1)
for k,v in w:
self.assertEquals(v, self.grid_rook_w[k])
def test_from_array(self):
w = d.DistanceBand.from_array(self.grid_points, 1)
for k,v in w:
self.assertEquals(v, self.grid_rook_w[k])
@ut.skipIf(PANDAS_EXTINCT, 'Missing pandas')
def test_from_dataframe(self):
import pandas as pd
geom_series = ps.pdio.shp.shp2series(self.grid_path)
random_data = np.random.random(size=len(geom_series))
df = pd.DataFrame({'obs':random_data, 'geometry':geom_series})
w = d.DistanceBand.from_dataframe(df, 1)
for k,v in w:
self.assertEquals(v, self.grid_rook_w[k])
##########################
# Function/User tests #
##########################
def test_integers(self):
"""
see issue #126
"""
grid_integers = [tuple(map(int, poly.vertices[0]))
for poly in self.grid_f]
self.grid_f.seek(0)
grid_dbw = d.DistanceBand(grid_integers, 1)
for k,v in grid_dbw:
self.assertEquals(v, self.grid_rook_w[k])
def test_arcdist(self):
arc = ps.cg.sphere.arcdist
kdt = KDTree(self.arc_points, distance_metric='Arc',
radius=ps.cg.sphere.RADIUS_EARTH_KM)
npoints = self.arc_points.shape[0]
full = np.matrix([[arc(self.arc_points[i], self.arc_points[j])
for j in xrange(npoints)]
for i in xrange(npoints)])
maxdist = full.max()
w = d.DistanceBand(kdt, maxdist, binary=False, alpha=1.0)
np.testing.assert_allclose(w.sparse.todense(), full)
def test_dense(self):
w_rook = ps.weights.Rook.from_shapefile(
ps.examples.get_path('lattice10x10.shp'))
polys = ps.open(ps.examples.get_path('lattice10x10.shp'))
centroids = [p.centroid for p in polys]
w_db = d.DistanceBand(centroids, 1, build_sp=False)
for k in w_db.id_order:
np.testing.assert_equal(w_db[k], w_rook[k])
class Test_Kernel(ut.TestCase, Distance_Mixin):
def setUp(self):
Distance_Mixin.setUp(self)
self.known_wi0 = 0
self.known_w0 = {0: 1, 1: 0.500000049999995, 3: 0.4409830615267465}
self.known_wi1 = 0
self.known_w1 = {0: 1.0, 1: 0.33333333333333337,
3: 0.2546440075000701}
self.known_w1_bw = 15.
self.known_wi2 = 0
self.known_w2 = {0: 1.0, 1: 0.59999999999999998,
3: 0.55278640450004202, 4: 0.10557280900008403}
self.known_w2_bws = [25.0, 15.0, 25.0, 16.0, 14.5, 25.0]
self.known_wi3 = 0
self.known_w3 = [1.0, 0.10557289844279438, 9.9999990066379496e-08]
self.known_w3_abws =[[11.180341005532938], [11.180341005532938],
[20.000002000000002], [11.180341005532938],
[14.142137037944515], [18.027758180095585]]
self.known_wi4 = 0
self.known_w4 = {0: 0.3989422804014327,
1: 0.26741902915776961,
3: 0.24197074871621341}
self.known_w4_abws = self.known_w3_abws
self.known_wi5 = 1
self.known_w5 = {4: 0.0070787731484506233,
2: 0.2052478782400463,
3: 0.23051223027663237,
1: 1.0}
self.known_wi6 = 0
self.known_w6 = {0: 1.0, 2: 0.03178906767736345,
1: 9.9999990066379496e-08}
#stick answers & params here
##########################
# Classmethod tests #
##########################
def test_init(self):
w = d.Kernel(self.euclidean_kdt)
for k,v in w[self.known_wi0].items():
np.testing.assert_allclose(v, self.known_w0[k], rtol=RTOL)
def test_from_shapefile(self):
w = d.Kernel.from_shapefile(self.polygon_path, idVariable='POLYID')
for k,v in w[self.known_wi5].items():
np.testing.assert_allclose((k,v), (k,self.known_w5[k]), rtol=RTOL)
w = d.Kernel.from_shapefile(self.polygon_path, fixed=False)
for k,v in w[self.known_wi6].items():
np.testing.assert_allclose((k,v), (k,self.known_w6[k]), rtol=RTOL)
def test_from_array(self):
w = d.Kernel.from_array(self.points)
for k,v in w[self.known_wi0].items():
np.testing.assert_allclose(v, self.known_w0[k], rtol=RTOL)
@ut.skipIf(PANDAS_EXTINCT, 'Missing pandas')
def test_from_dataframe(self):
df = ps.pdio.read_files(self.polygon_path)
w = d.Kernel.from_dataframe(df)
for k,v in w[self.known_wi5-1].items():
np.testing.assert_allclose(v, self.known_w5[k+1], rtol=RTOL)
##########################
# Function/User tests #
##########################
def test_fixed_bandwidth(self):
w = d.Kernel(self.points, bandwidth=15.0)
for k,v in w[self.known_wi1].items():
np.testing.assert_allclose((k,v), (k, self.known_w1[k]))
np.testing.assert_allclose(np.ones((w.n,1))*15, w.bandwidth)
w = d.Kernel(self.points, bandwidth=self.known_w2_bws)
for k,v in w[self.known_wi2].items():
np.testing.assert_allclose((k,v), (k, self.known_w2[k]), rtol=RTOL)
for i in range(w.n):
np.testing.assert_allclose(w.bandwidth[i], self.known_w2_bws[i], rtol=RTOL)
def test_adaptive_bandwidth(self):
w = d.Kernel(self.points, fixed=False)
np.testing.assert_allclose(sorted(w[self.known_wi3].values()),
sorted(self.known_w3), rtol=RTOL)
bws = w.bandwidth.tolist()
np.testing.assert_allclose(bws, self.known_w3_abws, rtol=RTOL)
w = d.Kernel(self.points, fixed=False, function='gaussian')
for k,v in w[self.known_wi4].items():
np.testing.assert_allclose((k,v), (k, self.known_w4[k]), rtol=RTOL)
bws = w.bandwidth.tolist()
np.testing.assert_allclose(bws, self.known_w4_abws, rtol=RTOL)
knn = ut.TestLoader().loadTestsFromTestCase(Test_KNN)
kern = ut.TestLoader().loadTestsFromTestCase(Test_Kernel)
db = ut.TestLoader().loadTestsFromTestCase(Test_DistanceBand)
suite = ut.TestSuite([knn, kern, db])
if __name__ == '__main__':
runner = ut.TextTestRunner()
runner.run(suite)
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/tests/test_table.py | 7 | 3776 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.testing.decorators import image_comparison
from matplotlib.table import CustomCell
from matplotlib.path import Path
from nose.tools import assert_equal
@image_comparison(baseline_images=['table_zorder'],
extensions=['png'],
remove_text=True)
def test_zorder():
data = [[66386, 174296],
[58230, 381139]]
colLabels = ('Freeze', 'Wind')
rowLabels = ['%d year' % x for x in (100, 50)]
cellText = []
yoff = np.array([0.0] * len(colLabels))
for row in reversed(data):
yoff += row
cellText.append(['%1.1f' % (x/1000.0) for x in yoff])
t = np.linspace(0, 2*np.pi, 100)
plt.plot(t, np.cos(t), lw=4, zorder=2)
plt.table(cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels,
loc='center',
zorder=-2,
)
plt.table(cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels,
loc='upper center',
zorder=4,
)
plt.yticks([])
@image_comparison(baseline_images=['table_labels'],
extensions=['png'])
def test_label_colours():
dim = 3
c = np.linspace(0, 1, dim)
colours = plt.cm.RdYlGn(c)
cellText = [['1'] * dim] * dim
fig = plt.figure()
ax1 = fig.add_subplot(4, 1, 1)
ax1.axis('off')
ax1.table(cellText=cellText,
rowColours=colours,
loc='best')
ax2 = fig.add_subplot(4, 1, 2)
ax2.axis('off')
ax2.table(cellText=cellText,
rowColours=colours,
rowLabels=['Header'] * dim,
loc='best')
ax3 = fig.add_subplot(4, 1, 3)
ax3.axis('off')
ax3.table(cellText=cellText,
colColours=colours,
loc='best')
ax4 = fig.add_subplot(4, 1, 4)
ax4.axis('off')
ax4.table(cellText=cellText,
colColours=colours,
colLabels=['Header'] * dim,
loc='best')
@image_comparison(baseline_images=['table_cell_manipulation'],
extensions=['png'], remove_text=True)
def test_diff_cell_table():
cells = ('horizontal', 'vertical', 'open', 'closed', 'T', 'R', 'B', 'L')
cellText = [['1'] * len(cells)] * 2
colWidths = [0.1] * len(cells)
_, axes = plt.subplots(nrows=len(cells), figsize=(4, len(cells)+1))
for ax, cell in zip(axes, cells):
ax.table(
colWidths=colWidths,
cellText=cellText,
loc='center',
edges=cell,
)
ax.axis('off')
plt.tight_layout()
def test_customcell():
types = ('horizontal', 'vertical', 'open', 'closed', 'T', 'R', 'B', 'L')
codes = (
(Path.MOVETO, Path.LINETO, Path.MOVETO, Path.LINETO, Path.MOVETO),
(Path.MOVETO, Path.MOVETO, Path.LINETO, Path.MOVETO, Path.LINETO),
(Path.MOVETO, Path.MOVETO, Path.MOVETO, Path.MOVETO, Path.MOVETO),
(Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY),
(Path.MOVETO, Path.MOVETO, Path.MOVETO, Path.LINETO, Path.MOVETO),
(Path.MOVETO, Path.MOVETO, Path.LINETO, Path.MOVETO, Path.MOVETO),
(Path.MOVETO, Path.LINETO, Path.MOVETO, Path.MOVETO, Path.MOVETO),
(Path.MOVETO, Path.MOVETO, Path.MOVETO, Path.MOVETO, Path.LINETO),
)
for t, c in zip(types, codes):
cell = CustomCell((0, 0), visible_edges=t, width=1, height=1)
code = tuple(s for _, s in cell.get_path().iter_segments())
assert_equal(c, code)
| mit |
rigetticomputing/pyquil | pyquil/wavefunction.py | 1 | 9032 | ##############################################################################
# Copyright 2018 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""
Module containing the Wavefunction object and methods for working with wavefunctions.
"""
import itertools
import warnings
from typing import Dict, Iterator, List, Optional, Sequence, cast
import numpy as np
OCTETS_PER_DOUBLE_FLOAT = 8
OCTETS_PER_COMPLEX_DOUBLE = 2 * OCTETS_PER_DOUBLE_FLOAT
class Wavefunction(object):
"""
Encapsulate a wavefunction representing a quantum state
as returned by :py:class:`~pyquil.api.WavefunctionSimulator`.
.. note::
The elements of the wavefunction are ordered by bitstring. E.g., for two qubits the order
is ``00, 01, 10, 11``, where the the bits **are ordered in reverse** by the qubit index,
i.e., for qubits 0 and 1 the bitstring ``01`` indicates that qubit 0 is in the state 1.
See also :ref:`the related documentation section in the WavefunctionSimulator Overview
<basis_ordering>`.
"""
def __init__(self, amplitude_vector: np.ndarray):
"""
Initializes a wavefunction
:param amplitude_vector: A numpy array of complex amplitudes
"""
if len(amplitude_vector) == 0 or len(amplitude_vector) & (len(amplitude_vector) - 1) != 0:
raise TypeError("Amplitude vector must have a length that is a power of two")
self.amplitudes = np.asarray(amplitude_vector)
sumprob = np.sum(self.probabilities())
if not np.isclose(sumprob, 1.0):
raise ValueError(
"The wavefunction is not normalized. "
"The probabilities sum to {} instead of 1".format(sumprob)
)
@staticmethod
def ground(qubit_num: int) -> "Wavefunction":
warnings.warn("ground() has been deprecated in favor of zeros()", stacklevel=2)
return Wavefunction.zeros(qubit_num)
@staticmethod
def zeros(qubit_num: int) -> "Wavefunction":
"""
Constructs the groundstate wavefunction for a given number of qubits.
:param qubit_num:
:return: A Wavefunction in the ground state
"""
amplitude_vector = np.zeros(2 ** qubit_num)
amplitude_vector[0] = 1.0
return Wavefunction(amplitude_vector)
@staticmethod
def from_bit_packed_string(coef_string: bytes) -> "Wavefunction":
"""
From a bit packed string, unpacks to get the wavefunction
:param coef_string:
"""
num_cfloat = len(coef_string) // OCTETS_PER_COMPLEX_DOUBLE
amplitude_vector = np.ndarray(shape=(num_cfloat,), buffer=coef_string, dtype=">c16")
return Wavefunction(amplitude_vector)
def __len__(self) -> int:
return len(self.amplitudes).bit_length() - 1
def __iter__(self) -> Iterator[complex]:
return cast(Iterator[complex], self.amplitudes.__iter__())
def __getitem__(self, index: int) -> complex:
return cast(complex, self.amplitudes[index])
def __setitem__(self, key: int, value: complex) -> None:
self.amplitudes[key] = value
def __str__(self) -> str:
return self.pretty_print(decimal_digits=10)
def probabilities(self) -> np.ndarray:
"""Returns an array of probabilities in lexicographical order"""
return np.abs(self.amplitudes) ** 2
def get_outcome_probs(self) -> Dict[str, float]:
"""
Parses a wavefunction (array of complex amplitudes) and returns a dictionary of
outcomes and associated probabilities.
:return: A dict with outcomes as keys and probabilities as values.
:rtype: dict
"""
outcome_dict = {}
qubit_num = len(self)
for index, amplitude in enumerate(self.amplitudes):
outcome = get_bitstring_from_index(index, qubit_num)
outcome_dict[outcome] = abs(amplitude) ** 2
return outcome_dict
def pretty_print_probabilities(self, decimal_digits: int = 2) -> Dict[str, float]:
"""
TODO: This doesn't seem like it is named correctly...
Prints outcome probabilities, ignoring all outcomes with approximately zero probabilities
(up to a certain number of decimal digits) and rounding the probabilities to decimal_digits.
:param int decimal_digits: The number of digits to truncate to.
:return: A dict with outcomes as keys and probabilities as values.
"""
outcome_dict = {}
qubit_num = len(self)
for index, amplitude in enumerate(self.amplitudes):
outcome = get_bitstring_from_index(index, qubit_num)
prob = round(abs(amplitude) ** 2, decimal_digits)
if prob != 0.0:
outcome_dict[outcome] = prob
return outcome_dict
def pretty_print(self, decimal_digits: int = 2) -> str:
"""
Returns a string repr of the wavefunction, ignoring all outcomes with approximately zero
amplitude (up to a certain number of decimal digits) and rounding the amplitudes to
decimal_digits.
:param int decimal_digits: The number of digits to truncate to.
:return: A string representation of the wavefunction.
"""
outcome_dict = {}
qubit_num = len(self)
pp_string = ""
for index, amplitude in enumerate(self.amplitudes):
outcome = get_bitstring_from_index(index, qubit_num)
amplitude = (
round(amplitude.real, decimal_digits) + round(amplitude.imag, decimal_digits) * 1.0j
)
if amplitude != 0.0:
outcome_dict[outcome] = amplitude
pp_string += str(amplitude) + "|{}> + ".format(outcome)
if len(pp_string) >= 3:
pp_string = pp_string[:-3] # remove the dangling + if it is there
return pp_string
def plot(self, qubit_subset: Optional[Sequence[int]] = None) -> None:
"""
TODO: calling this will error because of matplotlib
Plots a bar chart with bitstring on the x axis and probability on the y axis.
:param qubit_subset: Optional parameter used for plotting a subset of the Hilbert space.
"""
import matplotlib.pyplot as plt
prob_dict = self.get_outcome_probs()
if qubit_subset:
sub_dict = {}
qubit_num = len(self)
for i in qubit_subset:
if i > (2 ** qubit_num - 1):
raise IndexError("Index {} too large for {} qubits.".format(i, qubit_num))
else:
sub_dict[get_bitstring_from_index(i, qubit_num)] = prob_dict[
get_bitstring_from_index(i, qubit_num)
]
prob_dict = sub_dict
plt.bar(range(len(prob_dict)), prob_dict.values(), align="center", color="#6CAFB7")
plt.xticks(range(len(prob_dict)), prob_dict.keys())
plt.show()
def sample_bitstrings(self, n_samples: int) -> np.ndarray:
"""
Sample bitstrings from the distribution defined by the wavefunction.
:param n_samples: The number of bitstrings to sample
:return: An array of shape (n_samples, n_qubits)
"""
possible_bitstrings = np.array(list(itertools.product((0, 1), repeat=len(self))))
inds = np.random.choice(2 ** len(self), n_samples, p=self.probabilities())
bitstrings = possible_bitstrings[inds, :]
return bitstrings
def get_bitstring_from_index(index: int, qubit_num: int) -> str:
"""
Returns the bitstring in lexical order that corresponds to the given index in 0 to 2^(qubit_num)
:param int index:
:param int qubit_num:
:return: the bitstring
:rtype: str
"""
if index > (2 ** qubit_num - 1):
raise IndexError("Index {} too large for {} qubits.".format(index, qubit_num))
return bin(index)[2:].rjust(qubit_num, "0")
def _octet_bits(o: int) -> List[int]:
"""
Get the bits of an octet.
:param o: The octets.
:return: The bits as a list in LSB-to-MSB order.
"""
if not isinstance(o, int):
raise TypeError("o should be an int")
if not (0 <= o <= 255):
raise ValueError("o should be between 0 and 255 inclusive")
bits = [0] * 8
for i in range(8):
if 1 == o & 1:
bits[i] = 1
o = o >> 1
return bits
| apache-2.0 |
elkingtonmcb/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_agg.py | 69 | 11729 | """
An agg http://antigrain.com/ backend
Features that are implemented
* capstyles and join styles
* dashes
* linewidth
* lines, rectangles, ellipses
* clipping to a rectangle
* output to RGBA and PNG
* alpha blending
* DPI scaling properly - everything scales properly (dashes, linewidths, etc)
* draw polygon
* freetype2 w/ ft2font
TODO:
* allow save to file handle
* integrate screen dpi w/ ppi and text
"""
from __future__ import division
import numpy as npy
from matplotlib import verbose, rcParams
from matplotlib.backend_bases import RendererBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, maxdict
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont
from matplotlib.ft2font import FT2Font, LOAD_FORCE_AUTOHINT
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox
from _backend_agg import RendererAgg as _RendererAgg
from matplotlib import _png
backend_version = 'v2.2'
class RendererAgg(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles
"""
debug=1
texd = maxdict(50) # a cache of tex image rasters
_fontd = maxdict(50)
def __init__(self, width, height, dpi):
if __debug__: verbose.report('RendererAgg.__init__', 'debug-annoying')
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
if __debug__: verbose.report('RendererAgg.__init__ width=%s, height=%s'%(width, height), 'debug-annoying')
self._renderer = _RendererAgg(int(width), int(height), dpi, debug=False)
if __debug__: verbose.report('RendererAgg.__init__ _RendererAgg done',
'debug-annoying')
#self.draw_path = self._renderer.draw_path # see below
self.draw_markers = self._renderer.draw_markers
self.draw_path_collection = self._renderer.draw_path_collection
self.draw_quad_mesh = self._renderer.draw_quad_mesh
self.draw_image = self._renderer.draw_image
self.copy_from_bbox = self._renderer.copy_from_bbox
self.restore_region = self._renderer.restore_region
self.tostring_rgba_minimized = self._renderer.tostring_rgba_minimized
self.mathtext_parser = MathTextParser('Agg')
self.bbox = Bbox.from_bounds(0, 0, self.width, self.height)
if __debug__: verbose.report('RendererAgg.__init__ done',
'debug-annoying')
def draw_path(self, gc, path, transform, rgbFace=None):
nmax = rcParams['agg.path.chunksize'] # here at least for testing
npts = path.vertices.shape[0]
if nmax > 100 and npts > nmax and path.should_simplify and rgbFace is None:
nch = npy.ceil(npts/float(nmax))
chsize = int(npy.ceil(npts/nch))
i0 = npy.arange(0, npts, chsize)
i1 = npy.zeros_like(i0)
i1[:-1] = i0[1:] - 1
i1[-1] = npts
for ii0, ii1 in zip(i0, i1):
v = path.vertices[ii0:ii1,:]
c = path.codes
if c is not None:
c = c[ii0:ii1]
c[0] = Path.MOVETO # move to end of last chunk
p = Path(v, c)
self._renderer.draw_path(gc, p, transform, rgbFace)
else:
self._renderer.draw_path(gc, path, transform, rgbFace)
def draw_mathtext(self, gc, x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if __debug__: verbose.report('RendererAgg.draw_mathtext',
'debug-annoying')
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
x = int(x) + ox
y = int(y) - oy
self._renderer.draw_text_image(font_image, x, y + 1, angle, gc)
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
Render the text
"""
if __debug__: verbose.report('RendererAgg.draw_text', 'debug-annoying')
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
font = self._get_agg_font(prop)
if font is None: return None
if len(s) == 1 and ord(s) > 127:
font.load_char(ord(s), flags=LOAD_FORCE_AUTOHINT)
else:
# We pass '0' for angle here, since it will be rotated (in raster
# space) in the following call to draw_text_image).
font.set_text(s, 0, flags=LOAD_FORCE_AUTOHINT)
font.draw_glyphs_to_bitmap()
#print x, y, int(x), int(y)
self._renderer.draw_text_image(font.get_image(), int(x), int(y) + 1, angle, gc)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
# passing rgb is a little hack to make cacheing in the
# texmanager more efficient. It is not meant to be used
# outside the backend
"""
if ismath=='TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: descent of TeX text (I am imitating backend_ps here -JKS)
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
font = self._get_agg_font(prop)
font.set_text(s, 0.0, flags=LOAD_FORCE_AUTOHINT) # the width and height of unrotated string
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def draw_tex(self, gc, x, y, s, prop, angle):
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key)
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = npy.array(Z * 255.0, npy.uint8)
self._renderer.draw_text_image(Z, x, y, angle, gc)
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def _get_agg_font(self, prop):
"""
Get the font for text instance t, cacheing for efficiency
"""
if __debug__: verbose.report('RendererAgg._get_agg_font',
'debug-annoying')
key = hash(prop)
font = self._fontd.get(key)
if font is None:
fname = findfont(prop)
font = self._fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self._fontd[fname] = font
self._fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def points_to_pixels(self, points):
"""
convert point measures to pixes using dpi and the pixels per
inch of the display
"""
if __debug__: verbose.report('RendererAgg.points_to_pixels',
'debug-annoying')
return points*self.dpi/72.0
def tostring_rgb(self):
if __debug__: verbose.report('RendererAgg.tostring_rgb',
'debug-annoying')
return self._renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('RendererAgg.tostring_argb',
'debug-annoying')
return self._renderer.tostring_argb()
def buffer_rgba(self,x,y):
if __debug__: verbose.report('RendererAgg.buffer_rgba',
'debug-annoying')
return self._renderer.buffer_rgba(x,y)
def clear(self):
self._renderer.clear()
def option_image_nocomposite(self):
# It is generally faster to composite each image directly to
# the Figure, and there's no file size benefit to compositing
# with the Agg backend
return True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if __debug__: verbose.report('backend_agg.new_figure_manager',
'debug-annoying')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasAgg(thisFig)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasAgg(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def copy_from_bbox(self, bbox):
renderer = self.get_renderer()
return renderer.copy_from_bbox(bbox)
def restore_region(self, region):
renderer = self.get_renderer()
return renderer.restore_region(region)
def draw(self):
"""
Draw the figure using the renderer
"""
if __debug__: verbose.report('FigureCanvasAgg.draw', 'debug-annoying')
self.renderer = self.get_renderer()
self.figure.draw(self.renderer)
def get_renderer(self):
l, b, w, h = self.figure.bbox.bounds
key = w, h, self.figure.dpi
try: self._lastKey, self.renderer
except AttributeError: need_new_renderer = True
else: need_new_renderer = (self._lastKey != key)
if need_new_renderer:
self.renderer = RendererAgg(w, h, self.figure.dpi)
self._lastKey = key
return self.renderer
def tostring_rgb(self):
if __debug__: verbose.report('FigureCanvasAgg.tostring_rgb',
'debug-annoying')
return self.renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('FigureCanvasAgg.tostring_argb',
'debug-annoying')
return self.renderer.tostring_argb()
def buffer_rgba(self,x,y):
if __debug__: verbose.report('FigureCanvasAgg.buffer_rgba',
'debug-annoying')
return self.renderer.buffer_rgba(x,y)
def get_default_filetype(self):
return 'png'
def print_raw(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = file(filename_or_obj, 'wb')
renderer._renderer.write_rgba(filename_or_obj)
renderer.dpi = original_dpi
print_rgba = print_raw
def print_png(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = file(filename_or_obj, 'wb')
_png.write_png(renderer._renderer.buffer_rgba(0, 0),
renderer.width, renderer.height,
filename_or_obj, self.figure.dpi)
renderer.dpi = original_dpi
| agpl-3.0 |
bennlich/scikit-image | doc/examples/plot_restoration.py | 17 | 1960 | # -*- coding: utf-8 -*-
"""
=====================
Image Deconvolution
=====================
In this example, we deconvolve a noisy version of an image using Wiener
and unsupervised Wiener algorithms. This algorithms are based on
linear models that can't restore sharp edge as much as non-linear
methods (like TV restoration) but are much faster.
Wiener filter
-------------
The inverse filter based on the PSF (Point Spread Function),
the prior regularisation (penalisation of high frequency) and the
tradeoff between the data and prior adequacy. The regularization
parameter must be hand tuned.
Unsupervised Wiener
-------------------
This algorithm has a self-tuned regularisation parameters based on
data learning. This is not common and based on the following
publication. The algorithm is based on a iterative Gibbs sampler that
draw alternatively samples of posterior conditionnal law of the image,
the noise power and the image frequency power.
.. [1] François Orieux, Jean-François Giovannelli, and Thomas
Rodet, "Bayesian estimation of regularization and point
spread function parameters for Wiener-Hunt deconvolution",
J. Opt. Soc. Am. A 27, 1593-1607 (2010)
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import color, data, restoration
astro = color.rgb2gray(data.astronaut())
from scipy.signal import convolve2d as conv2
psf = np.ones((5, 5)) / 25
astro = conv2(astro, psf, 'same')
astro += 0.1 * astro.std() * np.random.standard_normal(astro.shape)
deconvolved, _ = restoration.unsupervised_wiener(astro, psf)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 5))
plt.gray()
ax[0].imshow(astro, vmin=deconvolved.min(), vmax=deconvolved.max())
ax[0].axis('off')
ax[0].set_title('Data')
ax[1].imshow(deconvolved)
ax[1].axis('off')
ax[1].set_title('Self tuned restoration')
fig.subplots_adjust(wspace=0.02, hspace=0.2,
top=0.9, bottom=0.05, left=0, right=1)
plt.show()
| bsd-3-clause |
vibhorag/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
micahhausler/pandashells | pandashells/test/p_plot_test.py | 7 | 1077 | #! /usr/bin/env python
from mock import patch, MagicMock
from unittest import TestCase
from pandashells.bin.p_plot import main
class MainTests(TestCase):
@patch('pandashells.bin.p_plot.argparse.ArgumentParser')
@patch('pandashells.bin.p_plot.arg_lib.add_args')
@patch('pandashells.bin.p_plot.io_lib.df_from_input')
@patch('pandashells.bin.p_plot.plot_lib.set_plot_styling')
@patch('pandashells.bin.p_plot.plot_lib.draw_xy_plot')
def test_plotting(
self, draw_xy_mock, set_plot_styling_mock, df_from_input_mock,
add_args_mock, ArgumentParserMock):
args = MagicMock()
parser = MagicMock(parse_args=MagicMock(return_value=args))
ArgumentParserMock.return_value = parser
df_from_input_mock.return_value = 'df'
main()
add_args_mock.assert_called_with(
parser, 'io_in', 'xy_plotting', 'decorating', 'example')
df_from_input_mock.assert_called_with(args)
set_plot_styling_mock.assert_called_with(args)
draw_xy_mock.assert_called_with(args, 'df')
| bsd-2-clause |
peterhogan/python | maze_wiki.py | 1 | 2851 | # Code by Erik Sweet and Bill Basener
import random
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.cm as cm
num_rows = int(input("Rows: ")) # number of rows
num_cols = int(input("Columns: ")) # number of columns
M = np.zeros((num_rows,num_cols,5), dtype=np.uint8)
# The array M is going to hold the array information for each cell.
# The first four coordinates tell if walls exist on those sides
# and the fifth indicates if the cell has been visited in the search.
# M(LEFT, UP, RIGHT, DOWN, CHECK_IF_VISITED)
image = np.zeros((num_rows*10,num_cols*10), dtype=np.uint8)
# The array image is going to be the output image to display
# Set starting row and column
r = 0
c = 0
history = [(r,c)] # The history is the
# Trace a path though the cells of the maze and open walls along the path.
# We do this with a while loop, repeating the loop until there is no history,
# which would mean we backtracked to the initial start.
while history:
M[r,c,4] = 1 # designate this location as visited
# check if the adjacent cells are valid for moving to
check = []
if c > 0 and M[r,c-1,4] == 0:
check.append('L')
if r > 0 and M[r-1,c,4] == 0:
check.append('U')
if c < num_cols-1 and M[r,c+1,4] == 0:
check.append('R')
if r < num_rows-1 and M[r+1,c,4] == 0:
check.append('D')
if len(check): # If there is a valid cell to move to.
# Mark the walls between cells as open if we move
history.append([r,c])
move_direction = random.choice(check)
if move_direction == 'L':
M[r,c,0] = 1
c = c-1
M[r,c,2] = 1
if move_direction == 'U':
M[r,c,1] = 1
r = r-1
M[r,c,3] = 1
if move_direction == 'R':
M[r,c,2] = 1
c = c+1
M[r,c,0] = 1
if move_direction == 'D':
M[r,c,3] = 1
r = r+1
M[r,c,1] = 1
else: # If there are no valid cells to move to.
# retrace one step back in history if no move is possible
r,c = history.pop()
# Open the walls at the start and finish
M[0,0,0] = 1
M[num_rows-1,num_cols-1,2] = 1
# Generate the image for display
for row in range(0,num_rows):
for col in range(0,num_cols):
cell_data = M[row,col]
for i in range(10*row+1,10*row+9):
image[i,range(10*col+1,10*col+9)] = 255
if cell_data[0] == 1:image[range(10*row+1,10*row+9),10*col] = 255
if cell_data[1] == 1:image[10*row,range(10*col+1,10*col+9)] = 255
if cell_data[2] == 1:image[range(10*row+1,10*row+9),10*col+9] = 255
if cell_data[3] == 1:image[10*row+9,range(10*col+1,10*col+9)] = 255
# Display the image
plt.imshow(image, cmap = cm.Greys_r, interpolation='none')
plt.show()
| mit |
nipy/nireg | scripts/scripting.py | 3 | 6418 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
A scripting wrapper around 4D registration (SpaceTimeRealign)
"""
from __future__ import absolute_import
import os
import os.path as op
import numpy as np
import numpy.linalg as npl
import nibabel as nib
from nibabel.filename_parser import splitext_addext
import nibabel.eulerangles as euler
from nibabel.optpkg import optional_package
matplotlib, HAVE_MPL, _ = optional_package('matplotlib')
if HAVE_MPL:
import matplotlib.pyplot as plt
from .groupwise_registration import SpaceTimeRealign
import nipy.algorithms.slicetiming as st
from nipy.io.api import save_image
timefuncs = st.timefuncs.SLICETIME_FUNCTIONS
__all__ = ["space_time_realign", "aff2euler"]
def aff2euler(affine):
"""
Compute Euler angles from 4 x 4 `affine`
Parameters
----------
affine : 4 by 4 array
An affine transformation matrix
Returns
-------
The Euler angles associated with the affine
"""
return euler.mat2euler(aff2rot_zooms(affine)[0])
def aff2rot_zooms(affine):
"""
Compute a rotation matrix and zooms from 4 x 4 `affine`
Parameters
----------
affine : 4 by 4 array
An affine transformation matrix
Returns
-------
R: 3 by 3 array
A rotation matrix in 3D
zooms: length 3 1-d array
Vector with voxel sizes.
"""
RZS = affine[:3, :3]
zooms = np.sqrt(np.sum(RZS * RZS, axis=0))
RS = RZS / zooms
# Adjust zooms to make RS correspond (below) to a true
# rotation matrix.
if npl.det(RS) < 0:
zooms[0] *= -1
RS[:,0] *= -1
# retrieve rotation matrix from RS with polar decomposition.
# Discard shears
P, S, Qs = npl.svd(RS)
R = np.dot(P, Qs)
return R, zooms
def space_time_realign(input, tr, slice_order='descending', slice_dim=2,
slice_dir=1, apply=True, make_figure=False,
out_name=None):
"""
This is a scripting interface to `nipy.algorithms.registration.SpaceTimeRealign`
Parameters
----------
input : str or list
A full path to a file-name (4D nifti time-series) , or to a directory
containing 4D nifti time-series, or a list of full-paths to files.
tr : float
The repetition time
slice_order : str (optional)
This is the order of slice-times in the acquisition. This is used as a
key into the ``SLICETIME_FUNCTIONS`` dictionary from
:mod:`nipy.algorithms.slicetiming.timefuncs`. Default: 'descending'.
slice_dim : int (optional)
Denotes the axis in `images` that is the slice axis. In a 4D image,
this will often be axis = 2 (default).
slice_dir : int (optional)
1 if the slices were acquired slice 0 first (default), slice -1 last,
or -1 if acquire slice -1 first, slice 0 last.
apply : bool (optional)
Whether to apply the transformation and produce an output. Default:
True.
make_figure : bool (optional)
Whether to generate a .png figure with the parameters across scans.
out_name : bool (optional)
Specify an output location (full path) for the files that are
generated. Default: generate files in the path of the inputs (with an
`_mc` suffix added to the file-names.
Returns
-------
transforms : ndarray
An (n_times_points,) shaped array containing
`nipy.algorithms.registration.affine.Rigid` class instances for each time
point in the time-series. These can be used as affine transforms by
referring to their `.as_affine` attribute.
"""
if make_figure:
if not HAVE_MPL:
e_s ="You need to have matplotlib installed to run this function"
e_s += " with `make_figure` set to `True`"
raise RuntimeError(e_s)
# If we got only a single file, we motion correct that one:
if op.isfile(input):
if not (input.endswith('.nii') or input.endswith('.nii.gz')):
e_s = "Input needs to be a nifti file ('.nii' or '.nii.gz'"
raise ValueError(e_s)
fnames = [input]
input = nib.load(input)
# If this is a full-path to a directory containing files, it's still a
# string:
elif isinstance(input, str):
list_of_files = os.listdir(input)
fnames = [op.join(input, f) for f in np.sort(list_of_files)
if (f.endswith('.nii') or f.endswith('.nii.gz')) ]
input = [nib.load(x) for x in fnames]
# Assume that it's a list of full-paths to files:
else:
input = [nib.load(x) for x in input]
slice_times = timefuncs[slice_order]
slice_info = [slice_dim,
slice_dir]
reggy = SpaceTimeRealign(input,
tr,
slice_times,
slice_info)
reggy.estimate(align_runs=True)
# We now have the transformation parameters in here:
transforms = np.squeeze(np.array(reggy._transforms))
rot = np.array([t.rotation for t in transforms])
trans = np.array([t.translation for t in transforms])
if apply:
new_reggy = reggy.resample(align_runs=True)
for run_idx, new_im in enumerate(new_reggy):
# Fix output TR - it was probably lost in the image realign step
assert new_im.affine.shape == (5, 5)
new_im.affine[:] = new_im.affine.dot(np.diag([1, 1, 1, tr, 1]))
# Save it out to a '.nii.gz' file:
froot, ext, trail_ext = splitext_addext(fnames[run_idx])
path, fname = op.split(froot)
# We retain the file-name adding '_mc' regardless of where it's
# saved
new_path = path if out_name is None else out_name
save_image(new_im, op.join(new_path, fname + '_mc.nii.gz'))
if make_figure:
figure, ax = plt.subplots(2)
figure.set_size_inches([8, 6])
ax[0].plot(rot)
ax[0].set_xlabel('Time (TR)')
ax[0].set_ylabel('Translation (mm)')
ax[1].plot(trans)
ax[1].set_xlabel('Time (TR)')
ax[1].set_ylabel('Rotation (radians)')
figure.savefig(op.join(os.path.split(fnames[0])[0],
'mc_params.png'))
return transforms
| bsd-3-clause |
juharris/tensorflow | tensorflow/examples/skflow/mnist.py | 8 | 3167 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
### Download and load MNIST data.
mnist = learn.datasets.load_dataset('mnist')
### Linear classifier.
feature_columns = learn.infer_real_valued_columns_from_input(mnist.train.images)
classifier = learn.LinearClassifier(
feature_columns=feature_columns, n_classes=10)
classifier.fit(mnist.train.images, mnist.train.labels, batch_size=100,
steps=1000)
score = metrics.accuracy_score(
mnist.test.labels, classifier.predict(mnist.test.images))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(
tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv_model(X, y):
# pylint: disable=invalid-name,missing-docstring
# reshape X to 4d tensor with 2nd and 3rd dimensions being image width and
# height final dimension being the number of color channels.
X = tf.reshape(X, [-1, 28, 28, 1])
# first conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = learn.ops.conv2d(X, n_filters=32, filter_shape=[5, 5],
bias=True, activation=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
h_conv2 = learn.ops.conv2d(h_pool1, n_filters=64, filter_shape=[5, 5],
bias=True, activation=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# densely connected layer with 1024 neurons.
h_fc1 = learn.ops.dnn(
h_pool2_flat, [1024], activation=tf.nn.relu, dropout=0.5)
return learn.models.logistic_regression(h_fc1, y)
# Training and predicting.
classifier = learn.TensorFlowEstimator(
model_fn=conv_model, n_classes=10, batch_size=100, steps=20000,
learning_rate=0.001)
classifier.fit(mnist.train.images, mnist.train.labels)
score = metrics.accuracy_score(
mnist.test.labels, classifier.predict(mnist.test.images))
print('Accuracy: {0:f}'.format(score))
| apache-2.0 |
matthiasdiener/spack | var/spack/repos/builtin/packages/flit/package.py | 5 | 2269 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Flit(MakefilePackage):
"""Floating-point Litmus Tests (FLiT) is a C++ test infrastructure for
detecting variability in floating-point code caused by variations in
compiler code generation, hardware and execution environments."""
homepage = "https://pruners.github.io/flit"
url = "https://github.com/PRUNERS/FLiT"
url = "https://github.com/PRUNERS/FLiT/archive/v2.0-alpha.1.tar.gz"
version('2.0-alpha.1', '62cf7784bcdc15b962c813b11e478159')
# FIXME: fix install and build to handle the old version, which is not
# installable
# version('1.0.0', '27763c89b044c5e3cfe62dd319a36a2b')
conflicts("@:1.999", msg="Only can build version 2.0 and up")
# Add dependencies
depends_on('python@3:', type='run')
depends_on('py-numpy', type='run')
depends_on('py-matplotlib tk=False', type='run')
depends_on('py-toml', type='run')
@property
def install_targets(self):
return ['install', 'PREFIX=%s' % self.prefix]
| lgpl-2.1 |
karoraw1/GLM_Wrapper | OTU_Time_Series/rarefy.py | 1 | 1197 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 12 11:38:23 2017
@author: login
"""
import pandas as pd
import numpy as np
def abund_vec_to_list(sp_ser):
sp_list = []
for idx in sp_ser.index:
sp_count = sp_ser.ix[idx]
for cnt in range(int(sp_count)):
sp_list.append(idx)
return np.array(sp_list)
def rarefy_table(otu_table):
"""
Rarefies and otu table (pandas dataframe) to minimum lib size
"""
lib_size = int(otu_table.sum(axis=1).min())
print "Minimum library size is {}".format(lib_size)
data_ = np.zeros(otu_table.shape)
rarefiedTable = pd.DataFrame(data = data_, columns=otu_table.columns,
index=otu_table.index)
for row in otu_table.index:
def add_cnt_to_col(c, n):
rarefiedTable.ix[row, c] = n
return None
sp_ser = otu_table.ix[row, :]
sp_pool = abund_vec_to_list(sp_ser)
shrunk_pool = np.random.choice(sp_pool, size=(1, lib_size), replace=False)
cols, cnts = np.unique(shrunk_pool, return_counts=True)
map(add_cnt_to_col, cols, cnts)
return rarefiedTable
| mit |
vkscool/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/finance.py | 69 | 20558 | """
A collection of modules for collecting, analyzing and plotting
financial data. User contributions welcome!
"""
#from __future__ import division
import os, time, warnings
from urllib import urlopen
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
try: import datetime
except ImportError:
raise ImportError('The finance module requires datetime support (python2.3)')
import numpy as np
from matplotlib import verbose, get_configdir
from dates import date2num
from matplotlib.cbook import Bunch
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.colors import colorConverter
from lines import Line2D, TICKLEFT, TICKRIGHT
from patches import Rectangle
from matplotlib.transforms import Affine2D
configdir = get_configdir()
cachedir = os.path.join(configdir, 'finance.cache')
def parse_yahoo_historical(fh, asobject=False, adjusted=True):
"""
Parse the historical data in file handle fh from yahoo finance and return
results as a list of
d, open, close, high, low, volume
where d is a floating poing representation of date, as returned by date2num
if adjust=True, use adjusted prices
"""
results = []
lines = fh.readlines()
datefmt = None
for line in lines[1:]:
vals = line.split(',')
if len(vals)!=7: continue
datestr = vals[0]
if datefmt is None:
try:
datefmt = '%Y-%m-%d'
dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
except ValueError:
datefmt = '%d-%b-%y' # Old Yahoo--cached file?
dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
d = date2num(dt)
open, high, low, close = [float(val) for val in vals[1:5]]
volume = int(vals[5])
if adjusted:
aclose = float(vals[6])
m = aclose/close
open *= m
high *= m
low *= m
close = aclose
results.append((d, open, close, high, low, volume))
results.reverse()
if asobject:
if len(results)==0: return None
else:
date, open, close, high, low, volume = map(np.asarray, zip(*results))
return Bunch(date=date, open=open, close=close, high=high, low=low, volume=volume)
else:
return results
def fetch_historical_yahoo(ticker, date1, date2, cachename=None):
"""
Fetch historical data for ticker between date1 and date2. date1 and
date2 are datetime instances
Ex:
fh = fetch_historical_yahoo('^GSPC', d1, d2)
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
a file handle is returned
"""
ticker = ticker.upper()
d1 = (date1.month-1, date1.day, date1.year)
d2 = (date2.month-1, date2.day, date2.year)
urlFmt = 'http://table.finance.yahoo.com/table.csv?a=%d&b=%d&c=%d&d=%d&e=%d&f=%d&s=%s&y=0&g=d&ignore=.csv'
url = urlFmt % (d1[0], d1[1], d1[2],
d2[0], d2[1], d2[2], ticker)
if cachename is None:
cachename = os.path.join(cachedir, md5(url).hexdigest())
if os.path.exists(cachename):
fh = file(cachename)
verbose.report('Using cachefile %s for %s'%(cachename, ticker))
else:
if not os.path.isdir(cachedir): os.mkdir(cachedir)
fh = file(cachename, 'w')
fh.write(urlopen(url).read())
fh.close()
verbose.report('Saved %s data to cache file %s'%(ticker, cachename))
fh = file(cachename, 'r')
return fh
def quotes_historical_yahoo(ticker, date1, date2, asobject=False, adjusted=True, cachename=None):
"""
Get historical data for ticker between date1 and date2. date1 and
date2 are datetime instances
results are a list of tuples
(d, open, close, high, low, volume)
where d is a floating poing representation of date, as returned by date2num
if asobject is True, the return val is an object with attrs date,
open, close, high, low, volume, which are equal length arrays
if adjust=True, use adjusted prices
Ex:
sp = f.quotes_historical_yahoo('^GSPC', d1, d2, asobject=True, adjusted=True)
returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
[n,bins,patches] = hist(returns, 100)
mu = mean(returns)
sigma = std(returns)
x = normpdf(bins, mu, sigma)
plot(bins, x, color='red', lw=2)
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
"""
fh = fetch_historical_yahoo(ticker, date1, date2, cachename)
try: ret = parse_yahoo_historical(fh, asobject, adjusted)
except IOError, exc:
warnings.warn('urlopen() failure\n' + url + '\n' + exc.strerror[1])
return None
return ret
def plot_day_summary(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""
quotes is a list of (time, open, close, high, low, ...) tuples
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
time must be in float date format - see date2num
ax : an Axes instance to plot to
ticksize : open/close tick marker in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
lines = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open : color = colorup
else : color = colordown
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color=color,
antialiased=False, # no need to antialias vert lines
)
oline = Line2D(
xdata=(t, t), ydata=(open, open),
color=color,
antialiased=False,
marker=TICKLEFT,
markersize=ticksize,
)
cline = Line2D(
xdata=(t, t), ydata=(close, close),
color=color,
antialiased=False,
markersize=ticksize,
marker=TICKRIGHT)
lines.extend((vline, oline, cline))
ax.add_line(vline)
ax.add_line(oline)
ax.add_line(cline)
ax.autoscale_view()
return lines
def candlestick(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
quotes is a list of (time, open, close, high, low, ...) tuples.
As long as the first 5 elements of the tuples are these values,
the tuple can be as long as you want (eg it may store volume).
time must be in float days format - see date2num
Plot the time, open, close, high, low as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
ax : an Axes instance to plot to
width : fraction of a day for the rectangle width
colorup : the color of the rectangle where close >= open
colordown : the color of the rectangle where close < open
alpha : the rectangle alpha level
return value is lines, patches where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
OFFSET = width/2.0
lines = []
patches = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open :
color = colorup
lower = open
height = close-open
else :
color = colordown
lower = close
height = open-close
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color='k',
linewidth=0.5,
antialiased=True,
)
rect = Rectangle(
xy = (t-OFFSET, lower),
width = width,
height = height,
facecolor = color,
edgecolor = color,
)
rect.set_alpha(alpha)
lines.append(vline)
patches.append(rect)
ax.add_line(vline)
ax.add_patch(rect)
ax.autoscale_view()
return lines, patches
def plot_day_summary2(ax, opens, closes, highs, lows, ticksize=4,
colorup='k', colordown='r',
):
"""
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
ax : an Axes instance to plot to
ticksize : size of open and close ticks in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
# the ticks will be from ticksize to 0 in points at the origin and
# we'll translate these to the i, close location
openSegments = [ ((-ticksize, 0), (0, 0)) ]
# the ticks will be from 0 to ticksize in points at the origin and
# we'll translate these to the i, close location
closeSegments = [ ((0, 0), (ticksize, 0)) ]
offsetsOpen = [ (i, open) for i, open in zip(xrange(len(opens)), opens) if open != -1 ]
offsetsClose = [ (i, close) for i, close in zip(xrange(len(closes)), closes) if close != -1 ]
scale = ax.figure.dpi * (1.0/72.0)
tickTransform = Affine2D().scale(scale, 0.0)
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,1
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,1
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(rangeSegments)==len(offsetsOpen))
assert(len(offsetsOpen)==len(offsetsClose))
assert(len(offsetsClose)==len(colors))
useAA = 0, # use tuple here
lw = 1, # and here
rangeCollection = LineCollection(rangeSegments,
colors = colors,
linewidths = lw,
antialiaseds = useAA,
)
openCollection = LineCollection(openSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsOpen,
transOffset = ax.transData,
)
openCollection.set_transform(tickTransform)
closeCollection = LineCollection(closeSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsClose,
transOffset = ax.transData,
)
closeCollection.set_transform(tickTransform)
minpy, maxx = (0, len(rangeSegments))
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(openCollection)
ax.add_collection(closeCollection)
return rangeCollection, openCollection, closeCollection
def candlestick2(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""
Represent the open, close as a bar line and high low range as a
vertical line.
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
return value is lineCollection, barCollection
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
delta = width/2.
barVerts = [ ( (i-delta, open), (i-delta, close), (i+delta, close), (i+delta, open) ) for i, open, close in zip(xrange(len(opens)), opens, closes) if open != -1 and close!=-1 ]
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(barVerts)==len(rangeSegments))
useAA = 0, # use tuple here
lw = 0.5, # and here
rangeCollection = LineCollection(rangeSegments,
colors = ( (0,0,0,1), ),
linewidths = lw,
antialiaseds = useAA,
)
barCollection = PolyCollection(barVerts,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
)
minx, maxx = 0, len(rangeSegments)
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
ax.add_collection(rangeCollection)
return rangeCollection, barCollection
def volume_overlay(ax, opens, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The opens and closes
are used to determine the color of the bar. -1 is missing. If a
value is missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
delta = width/2.
bars = [ ( (i-delta, 0), (i-delta, v), (i+delta, v), (i+delta, 0)) for i, v in enumerate(volumes) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = (0,),
linewidths = (0.5,),
)
corners = (0, 0), (len(bars), max(volumes))
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
return barCollection
def volume_overlay2(ax, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The closes are used to
determine the color of the bar. -1 is missing. If a value is
missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
nb: first point is not displayed - it is used only for choosing the
right color
"""
return volume_overlay(ax,closes[:-1],closes[1:],volumes[1:],colorup,colordown,width,alpha)
def volume_overlay3(ax, quotes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. quotes is a list of (d,
open, close, high, low, volume) and close-open is used to
determine the color of the bar
kwarg
width : the bar width in points
colorup : the color of the lines where close1 >= close0
colordown : the color of the lines where close1 < close0
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
dates, opens, closes, highs, lows, volumes = zip(*quotes)
colors = [colord[close1>=close0] for close0, close1 in zip(closes[:-1], closes[1:]) if close0!=-1 and close1 !=-1]
colors.insert(0,colord[closes[0]>=opens[0]])
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, volume), (right, volume), (right, 0)) for d, open, close, high, low, volume in quotes]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
dates = [d for d, open, close, high, low, volume in quotes]
offsetsBars = [(d, 0) for d in dates]
useAA = 0, # use tuple here
lw = 0.5, # and here
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (min(dates), max(dates))
miny = 0
maxy = max([volume for d, open, close, high, low, volume in quotes])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
#print 'datalim', ax.dataLim.get_bounds()
#print 'viewlim', ax.viewLim.get_bounds()
ax.add_collection(barCollection)
ax.autoscale_view()
return barCollection
def index_bar(ax, vals,
facecolor='b', edgecolor='l',
width=4, alpha=1.0, ):
"""
Add a bar collection graph with height vals (-1 is missing).
ax : an Axes instance to plot to
width : the bar width in points
alpha : bar transparency
"""
facecolors = (colorConverter.to_rgba(facecolor, alpha),)
edgecolors = (colorConverter.to_rgba(edgecolor, alpha),)
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, v), (right, v), (right, 0)) for v in vals if v != -1 ]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
offsetsBars = [ (i, 0) for i,v in enumerate(vals) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = facecolors,
edgecolors = edgecolors,
antialiaseds = (0,),
linewidths = (0.5,),
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (0, len(offsetsBars))
miny = 0
maxy = max([v for v in vals if v!=-1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
return barCollection
| gpl-3.0 |
rishikksh20/scikit-learn | sklearn/feature_selection/__init__.py | 140 | 1302 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
from .from_model import SelectFromModel
from .mutual_info_ import mutual_info_regression, mutual_info_classif
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectFromModel',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression',
'mutual_info_classif',
'mutual_info_regression']
| bsd-3-clause |
florian-f/sklearn | examples/mixture/plot_gmm.py | 18 | 2796 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two gaussians with EM
and variational dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import pylab as pl
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a dirichlet process mixture of gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = pl.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
pl.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
pl.xlim(-10, 10)
pl.ylim(-3, 6)
pl.xticks(())
pl.yticks(())
pl.title(title)
pl.show()
| bsd-3-clause |
asazo/ANN | tarea2/2_script.py | 1 | 10170 | # -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
def unpickle(file):
import cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
def load_single_NORB_train_val(PATH, i, onlyx=False):
print "Cargando batch training set",i,"..."
f = os.path.join(PATH, 'data_batch_%d' % (i, ))
datadict = unpickle(f)
X = datadict['data'].T
Y = np.array(datadict['labels'])
Z = np.zeros((X.shape[0], X.shape[1] + 1))
Z[:,:-1] = X
Z[:, -1] = Y
np.random.shuffle(Z)
if onlyx:
Xtr = Z[5832:,0:-1]
return Xtr
else:
Xtr = Z[5832:,0:-1]
Ytr = Z[5832:,-1]
Xval = Z[:5832,0:-1]
Yval = Z[:5832,-1]
print "Cargado"
return Xtr, Ytr, Xval, Yval
def load_NORB_test(PATH):
print "Cargando testing set..."
xts = []
yts = []
for b in range(11, 13):
f = os.path.join(PATH, 'data_batch_%d' % (b, ))
datadict = unpickle(f)
X = datadict['data'].T
Y = np.array(datadict['labels'])
Z = np.zeros((X.shape[0], X.shape[1] + 1))
Z[:,:-1] = X
Z[:, -1] = Y
np.random.shuffle(Z)
xts.append(Z[0:,0:-1])
yts.append(Z[:,-1])
Xts = np.concatenate(xts)
Yts = np.concatenate(yts)
del xts,yts
print "Cargado."
return Xts, Yts
def scale_data(X, normalize=True, myrange=None):
from sklearn.preprocessing import MinMaxScaler, StandardScaler
if normalize and not myrange:
print "Normalizando data (mean 0, std 1)"
return StandardScaler().fit_transform(X)
elif isinstance(myrange, tuple):
print "Escalando data al rango", myrange
return X * (myrange[1] - myrange[0]) + myrange[0]
else:
return "Error mientras escalaba."
# Modelo MLP FF
def get_ff_model(activation, n_classes):
model = Sequential()
model.add(Dense(4000, input_dim=2048, activation=activation))
model.add(Dense(2000, activation=activation))
model.add(Dense(n_classes, activation='softmax'))
sgd = SGD(lr=0.1, decay=0.0)
model.compile(optimizer=sgd,
loss='binary_crossentropy',
metrics=['accuracy'])
return model
# Definir numero de clases
n_classes = 6
Xts, Yts = load_NORB_test(".")
Xts_scaled = scale_data(Xts)
Yts_class = np_utils.to_categorical(Yts.astype(int), n_classes)
# Experimento: error de pruebas en funcion de theta (proporcion data no supervisada)
# Entrenar por porcentajes conocidos implica iterativamente avanzar batch sobre batch...
accuracies = []
model = get_ff_model('relu', n_classes)
print "Metricas:",model.metrics_names
for i, theta in enumerate(np.linspace(0.1, 1, 10)):
print "Analizando theta =",theta
print "Utilizando",i+1,"batches de 10"
Xtr, Ytr, Xval, Yval = load_single_NORB_train_val(".", i+1)
n_tr = Xtr.shape[0]
# Escalar datos y categorizar
print "Escalando data..."
Xtr_scaled = scale_data(Xtr)
Xval_scaled = scale_data(Xval)
print "Data escalada."
print "Pasando a data categorica para labels..."
Ytr_class = np_utils.to_categorical(Ytr.astype(int), n_classes)
Yval_class = np_utils.to_categorical(Yval.astype(int), n_classes)
print "Data categorizada."
model.fit(Xtr_scaled, Ytr_class, batch_size=10, validation_data=(Xval_scaled, Yval_class), nb_epoch=1)
print "Batch entrenado."
a = model.evaluate(Xts_scaled, Yts_class, batch_size=10, verbose=1)
print "Resultado:",a
accuracies.append(a)
print accuracies
from sklearn.neural_network import BernoulliRBM
from sklearn.externals import joblib
from keras.layers import Input, Dense
from keras.models import Model
from keras.optimizers import SGD
# Pre entrenar con RBM
RBM1 = BernoulliRBM(n_components=512, batch_size=10,
learning_rate=0.01, verbose=1, n_iter=30)
RBM2 = BernoulliRBM(n_components=100, batch_size=10,
learning_rate=0.01, verbose=1, n_iter=30)
for i, rev_theta in enumerate(np.linspace(0.1, 1, 10)):
theta = 1 - rev_theta
print "Preentrenando modelo para theta=",theta
print "Leyendo batch",i+1
Xtr_ns = load_single_NORB_train_val(".", i+1, onlyx=True)
Xtr_ns = scale_data(Xtr_ns)
RBM1.partial_fit(Xtr_ns)
Xtr_ns2 = RBM1.transform(Xtr_ns)
print "..."
Xtr_ns2 = scale_data(Xtr_ns2)
RBM2.partial_fit(Xtr_ns2)
del Xtr_ns, Xtr_ns2
print "..."
joblib.dump(RBM1, "2/RBM1_512_"+str(theta)+".pkl")
joblib.dump(RBM2, "2/RBM2_512_"+str(theta)+".pkl")
# Entrenar usando RBM
accuracies = []
activation = 'tanh'
for i, theta in enumerate(np.linspace(0.1, 1, 10)):
print "Analizando theta =",theta
if i != 9:
print "Cargando rbms"
RBM1 = joblib.load('2/RBM1_512_'+str(theta)+".pkl")
RBM2 = joblib.load('2/RBM2_512_'+str(theta)+".pkl")
model = Sequential()
model.add(Dense(512, input_dim=2048, activation=activation))
if i != 9:
print "seteando pesos 1"
model.layers[-1].set_weights([RBM1.components_.T, RBM1.intercept_hidden_])
model.add(Dense(100, activation=activation))
if i != 9:
print "seteando pesos 2"
model.layers[-1].set_weights([RBM2.components_.T, RBM2.intercept_hidden_])
model.add(Dense(n_classes, activation='softmax'))
sgd = SGD(lr=0.1, decay=0.0)
model.compile(optimizer=sgd,
loss='binary_crossentropy',
metrics=['accuracy'])
print "Entrenando..."
for n in range(2):
for k in range(0, i+1):
print "Leyendo batch",k
Xtr, Ytr, Xval, Yval = load_single_NORB_train_val(".", k+1)
# Escalar datos y categorizar
print "Escalando data..."
Xtr_scaled = scale_data(Xtr)
Xval_scaled = scale_data(Xval)
print "Data escalada."
print "Pasando a data categorica para labels..."
Ytr_class = np_utils.to_categorical(Ytr.astype(int), n_classes)
Yval_class = np_utils.to_categorical(Yval.astype(int), n_classes)
print "Data categorizada."
model.fit(Xtr_scaled, Ytr_class, batch_size=10,
validation_data=(Xval_scaled, Yval_class), nb_epoch=1)
print "Batch entrenado."
a = model.evaluate(Xts_scaled, Yts_class, batch_size=10, verbose=1)
print "Resultado:",a
accuracies.append(a)
print accuracies
del Xtr, Ytr, Xval, Yval
print accuracies
# Preentrenamiento con AE
hidden_layer = 512
hidden_layer2 = 100
activation1 = 'relu'
activation2 = 'sigmoid'
input_img1 = Input(shape=(2048,))
encoded1 = Dense(hidden_layer, activation=activation1)(input_img1)
decoded1 = Dense(2048, activation=activation2)(encoded1)
autoencoder1 = Model(input=input_img1, output=decoded1)
encoder1 = Model(input=input_img1, output=encoded1)
autoencoder1.compile(optimizer=SGD(lr=0.001), loss='binary_crossentropy')
input_img2 = Input(shape=(hidden_layer,))
encoded2 = Dense(hidden_layer2, activation=activation2)(input_img2)
decoded2 = Dense(hidden_layer, activation=activation2)(encoded2)
autoencoder2 = Model(input=input_img2, output=decoded2)
encoder2 = Model(input=input_img2, output=encoded2)
autoencoder2.compile(optimizer=SGD(lr=0.001), loss='binary_crossentropy')
for i, rev_theta in enumerate(np.linspace(0.1, 0.9, 9)):
theta = 1 - rev_theta
print "Preentrenando modelo para theta=",theta
print "Leyendo batch",i+1
Xtr_ns, Ytr_ns, Xval_ns, Yval_ns = load_single_NORB_train_val(".", i+1)
Xtr_ns = scale_data(Xtr_ns)
Xval_ns = scale_data(Xval_ns)
autoencoder1.fit(Xtr_ns, Xtr_ns, nb_epoch=10, batch_size=250,shuffle=True, validation_data=(Xval_ns, Xval_ns))
autoencoder1.save('2/AE1_'+str(theta)+'.h5')
encoder1.save('2/E1_'+str(theta)+'.h5')
Xtr_ns_1 = encoder1.predict(Xtr_ns)
Xval_ns_1 = encoder1.predict(Xval_ns)
autoencoder2.fit(Xtr_ns_1,Xtr_ns_1,nb_epoch=10,batch_size=250, shuffle=True, validation_data=(Xval_ns_1, Xval_ns_1))
autoencoder2.save('2/AE2_'+str(theta)+'.h5')
encoder2.save('2/E2_'+str(theta)+'.h5')
accuracies = []
activation = 'tanh'
from keras.models import load_model
for i, theta in enumerate(np.linspace(0.1, 1, 10)):
print "Analizando theta =",theta
if i != 9:
AE1 = load_model('2/AEpretraining512/AE1_'+str(theta)+".h5")
AE2 = load_model('2/AEpretraining512/AE2_'+str(theta)+".h5")
model = Sequential()
model.add(Dense(hidden_layer, input_dim=2048, activation=activation))
if i != 9:
print "setear pesos 1"
model.layers[-1].set_weights(AE1.layers[1].get_weights())
model.add(Dense(hidden_layer2, activation=activation))
if i != 9:
print "setear pesos 2"
model.layers[-1].set_weights(AE2.layers[1].get_weights())
model.add(Dense(n_classes, activation='softmax'))
sgd = SGD(lr=0.1, decay=0.0)
model.compile(optimizer=sgd,
loss='binary_crossentropy',
metrics=['accuracy'])
print "Entrenando..."
for n in range(2):
for k in range(0, i+1):
print "Leyendo batch",k
Xtr, Ytr, Xval, Yval = load_single_NORB_train_val(".", k+1)
# Escalar datos y categorizar
print "Escalando data..."
Xtr_scaled = scale_data(Xtr)
Xval_scaled = scale_data(Xval)
print "Data escalada."
print "Pasando a data categorica para labels..."
Ytr_class = np_utils.to_categorical(Ytr.astype(int), n_classes)
Yval_class = np_utils.to_categorical(Yval.astype(int), n_classes)
print "Data categorizada."
model.fit(Xtr_scaled, Ytr_class, batch_size=10,
validation_data=(Xval_scaled, Yval_class), nb_epoch=1)
print "Batch entrenado."
a = model.evaluate(Xts_scaled, Yts_class, batch_size=10, verbose=1)
print "Resultado:",a
accuracies.append(a)
print accuracies
del Xtr, Ytr, Xval, Yval
print accuracies
| mit |
bbfamily/abu | abupy/SimilarBu/ABuSimilar.py | 1 | 22856 | # -*- encoding:utf-8 -*-
"""
相关系数相似应用模块
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import functools
import logging
import math
import operator
import os
import pandas as pd
from . import ABuCorrcoef
from . import ABuSimilarDrawing
from .ABuCorrcoef import ECoreCorrType
from ..TradeBu import AbuBenchmark
from ..CoreBu import ABuEnv
from ..CoreBu.ABuParallel import delayed, Parallel
from ..CoreBu.ABuEnv import EMarketDataSplitMode, EMarketTargetType
from ..MarketBu import ABuSymbolPd
from ..MarketBu.ABuMarket import split_k_market, all_symbol
from ..MarketBu.ABuSymbol import IndexSymbol, Symbol
from ..UtilBu.ABuDTUtil import consume_time
from ..UtilBu.ABuProgress import do_clear_output
from ..CoreBu.ABuEnvProcess import add_process_env_sig, AbuEnvProcess
# noinspection PyUnresolvedReferences
from ..CoreBu.ABuFixes import xrange
from ..UtilBu import ABuProgress
"""进行相似度数据收集并行进程数,IO操作偏多,所以分配多个,默认=cpu个数*2, windows还是..."""
g_process_panel_cnt = ABuEnv.g_cpu_cnt * 2 if ABuEnv.g_is_mac_os else ABuEnv.g_cpu_cnt
def from_local(func):
"""
现所有相似度应用默认为from_local模式,即需要在有数据的情况下做相似度应用
为进行相似度数据收集的函数装饰,作用是忽略env中的数据获取模式,改变数据获取模式,
只使用本地数据模式进行数据收集,完成整个任务后,再恢复之前的数据获取模式
:param func: 进行相似度应用且有数据收集行为的函数
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
# 临时保存env设置中的g_data_fetch_mode
fetch_mode = ABuEnv.g_data_fetch_mode
# 设置数据获取模式为强制本地缓存模式
ABuEnv.g_data_fetch_mode = ABuEnv.EMarketDataFetchMode.E_DATA_FETCH_FORCE_LOCAL
if fetch_mode != ABuEnv.EMarketDataFetchMode.E_DATA_FETCH_FORCE_LOCAL:
# 如果原有设置不是强制本地缓存模式,warning提示
logging.warning('data from local. run ABu.run_kl_update if you want to get the latest data.')
result = func(*args, **kwargs)
# 恢复之前的g_data_fetch_mode
ABuEnv.g_data_fetch_mode = fetch_mode
return result
return wrapper
def from_net(func):
"""
为进行相似度数据收集的函数装饰,作用是忽略env中的数据获取模式,改变数据获取模式,
只使用网络数据模式进行数据收集,完成整个任务后,再恢复之前的数据获取模式
:param func: 进行相似度应用且有数据收集行为的函数
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
# 临时保存env设置中的g_data_fetch_mode
fetch_mode = ABuEnv.g_data_fetch_mode
# 设置数据获取模式为强制网络模式
ABuEnv.g_data_fetch_mode = ABuEnv.EMarketDataFetchMode.E_DATA_FETCH_FORCE_NET
if fetch_mode != ABuEnv.EMarketDataFetchMode.E_DATA_FETCH_FORCE_NET:
# 如果原有设置不是强制网络模式,warning提示
logging.warning('data from net!!!')
result = func(*args, **kwargs)
# 恢复之前的g_data_fetch_mode
ABuEnv.g_data_fetch_mode = fetch_mode
return result
return wrapper
@from_local
def _find_similar(symbol, cmp_cnt=None, n_folds=2, start=None, end=None, show_cnt=None, rolling=False,
show=True, corr_type=ECoreCorrType.E_CORE_TYPE_PEARS):
"""
被from_local装饰器装饰 即强制走本地数据,获取全市场symbol涨跌幅度pd.DataFrame对象,
使用symbol涨跌幅度与全市场symbol涨跌幅度进行相关对比,可视化结果及信息
:param symbol: 外部指定目标symbol,str对象
:param cmp_cnt: 相关性对比多少个交易日,int,可选参数
:param n_folds: 相关性对比n_folds年,int,可选参数
:param start: 请求的开始日期str对象,可选参数
:param end: 请求的结束日期str对象,可选参数
:param show_cnt: 最终结果展示以及可视化相似度个数
:param rolling: 是否使用时间加权相关计算,与corr_type=ECoreCorrType.E_CORE_TYPE_ROLLING一样,单独拿出来了
:param show: 是否可视化最终top最相关的股票
:param corr_type: ECoreCorrType对象,暂时支持皮尔逊,斯皮尔曼,+-符号相关系数,移动时间加权相关系数
"""
if isinstance(symbol, Symbol):
# 如果传递的时Symbol对象,取value
symbol = symbol.value
# 获取全市场symbol涨跌幅度pd.DataFrame对象
market_change_df = _all_market_cg(symbol, cmp_cnt=cmp_cnt, n_folds=n_folds, start=start, end=end)
if market_change_df is None:
logging.info('{} data is miss, please update data first!'.format(symbol))
return
# 重新赋予标尺实际的交易日数量
cmp_cnt = market_change_df[symbol].shape[0]
# symbol涨跌幅度df数据
benchmark_df = market_change_df[symbol]
# 清一下输出,太乱
do_clear_output()
# 开始使用symbol涨跌幅度与全市场symbol涨跌幅度进行相关对比,可视化结果及信息
sorted_corr = _handle_market_change_df(market_change_df, cmp_cnt, benchmark_df, show_cnt,
corr_type, rolling, show)
return sorted_corr
def find_similar_with_se(symbol, start, end, show_cnt=10, rolling=False, show=True,
corr_type=ECoreCorrType.E_CORE_TYPE_PEARS):
"""
固定参数使用start, end参数提供时间范围规则,套接_find_similar,为_find_similar提供时间范围规则
:param symbol: 外部指定目标symbol,str对象
:param start: 请求的开始日期str对象
:param end: 请求的结束日期str对象
:param show_cnt: 最终结果展示以及可视化相似度个数
:param rolling: 是否使用时间加权相关计算,与corr_type=ECoreCorrType.E_CORE_TYPE_ROLLING一样,单独拿出来了
:param show: 是否可视化最终top最相关的股票
:param corr_type: ECoreCorrType对象,暂时支持皮尔逊,斯皮尔曼,+-符号相关系数,移动时间加权相关系数
:return:
"""
return _find_similar(symbol, start=start, end=end, show_cnt=show_cnt, rolling=rolling, show=show,
corr_type=corr_type)
def find_similar_with_folds(symbol, n_folds=2, show_cnt=10, rolling=False, show=True,
corr_type=ECoreCorrType.E_CORE_TYPE_PEARS):
"""
固定参数使用n_folds参数提供时间范围规则,套接_find_similar,为_find_similar提供时间范围规则
:param symbol: 外部指定目标symbol,str对象
:param n_folds: 相关性对比n_folds年,int
:param show_cnt: 最终结果展示以及可视化相似度个数
:param rolling: 是否使用时间加权相关计算,与corr_type=ECoreCorrType.E_CORE_TYPE_ROLLING一样,单独拿出来了
:param show: 是否可视化最终top最相关的股票
:param corr_type: ECoreCorrType对象,暂时支持皮尔逊,斯皮尔曼,+-符号相关系数,移动时间加权相关系数
:return:
"""
return _find_similar(symbol, n_folds=n_folds, show_cnt=show_cnt, rolling=rolling, show=show,
corr_type=corr_type)
def find_similar_with_cnt(symbol, cmp_cnt=60, show_cnt=10, rolling=False, show=True,
corr_type=ECoreCorrType.E_CORE_TYPE_PEARS):
"""
固定参数使用cmp_cnt参数提供时间范围规则,套接_find_similar,为_find_similar提供时间范围规则
:param symbol: 外部指定目标symbol,str对象
:param cmp_cnt: 相关性对比多少个交易日,int
:param show_cnt: 最终结果展示以及可视化相似度个数
:param rolling: 是否使用时间加权相关计算,与corr_type=ECoreCorrType.E_CORE_TYPE_ROLLING一样,单独拿出来了
:param show: 是否可视化最终top最相关的股票
:param corr_type: ECoreCorrType对象,暂时支持皮尔逊,斯皮尔曼,+-符号相关系数,移动时间加权相关系数
:return:
"""
return _find_similar(symbol, cmp_cnt=cmp_cnt, show_cnt=show_cnt, rolling=rolling, show=show,
corr_type=corr_type)
@add_process_env_sig
def _make_symbols_cg_df(symbols, benchmark):
"""
相关性金融数据收集,子进程委托函数,子进程通过make_kl_df完成主进程委托的symbols个
金融数据收集工作,最终返回所有金融时间序列涨跌幅度pd.DataFrame对象
:param symbols: 可迭代symbols序列,序列中的元素为str对象
:param benchmark: 进行数据收集使用的标尺对象,数据时间范围确定使用,AbuBenchmark实例对象
:return: 所有金融时间序列涨跌幅度pd.DataFrame对象
"""
# 子进程金融数据收集工作, 由于本事是在子进程内工作,所以不再make_kl_df中使用parallel模式,上层进行多任务分配及任务数确定
panel = ABuSymbolPd.make_kl_df(symbols, data_mode=EMarketDataSplitMode.E_DATA_SPLIT_UNDO, benchmark=benchmark,
show_progress=True)
if panel is None or panel.empty:
logging.info('pid {} panel is None'.format(os.getpid()))
return None
# 转换panel轴方向,即可方便获取所有金融时间数据的某一个列
panel = panel.swapaxes('items', 'minor')
net_cg_df = panel['p_change'].fillna(value=0)
"""
转轴后直接获取p_change,即所有金融时间序列涨跌幅度pd.DataFrame对象,形如下所示:
usF usFCAU usGM usHMC usTM usTSLA usTTM
2015-06-25 -0.387 -0.517 -1.308 0.522 -0.391 1.365 -0.029
2015-06-26 -0.259 1.300 -0.922 0.366 0.437 -0.632 -0.229
2015-06-29 -2.468 -6.799 -3.345 -2.676 -2.222 -1.898 -2.550
2015-06-30 -0.067 0.000 0.301 1.250 0.982 2.381 1.353
2015-07-01 -0.133 0.688 -0.870 -1.605 -0.112 0.332 0.261
.................................................................
"""
return net_cg_df
def _make_benchmark_cg_df(symbol, benchmark):
"""
根据benchmark提取其时间序列对象kl_pd中的p_change列,返回p_change组成的
新pd.DataFrame对象,行序列名即为symbol
:param symbol: 标尺对象symbol,str对象
:param benchmark: 进行数据收集使用的标尺对象,数据时间范围确定使用,AbuBenchmark实例对象
:return: 返回p_change组成的新pd.DataFrame对象
"""
kl_pd = benchmark.kl_pd
net_cg_df = pd.DataFrame({symbol: kl_pd['p_change']}, index=kl_pd.index).fillna(value=0)
"""
p_change组成的新pd.DataFrame对象,行序列名即为symbol, 形如下所示:
us.IXIC
2014-07-25 -0.50
2014-07-28 -0.10
2014-07-29 -0.05
2014-07-30 0.45
2014-07-31 -2.09
2014-08-01 -0.39
2014-08-04 0.72
"""
return net_cg_df
def _net_cg_df_create(symbol, benchmark):
"""
获取env中全市场symbol,切分分配子进程,委托子进程_make_symbols_cg_df函数,
将子进程返回的金融时间序列涨跌幅度pd.DataFrame对象再次进行连接,组合成为全市场
symbol涨跌幅度pd.DataFrame对象
:param symbol: 标尺对象symbol,str对象
:param benchmark: 进行数据收集使用的标尺对象,数据时间范围确定使用,AbuBenchmark实例对象
:return: 全市场symbol涨跌幅度pd.DataFrame对象
"""
# 获取全市场symbol,没有指定市场参数,即根据env中设置的市场来获取所有市场symbol
choice_symbols = all_symbol()
# 通过split_k_market将市场symbol切割为子进程需要完成的任务数量
process_symbols = split_k_market(g_process_panel_cnt, market_symbols=choice_symbols)
# 因为切割会有余数,所以将原始设置的进程数切换为分割好的个数, 即32 -> 33 16 -> 17
n_process_pick_stock = len(process_symbols)
parallel = Parallel(
n_jobs=n_process_pick_stock, verbose=0, pre_dispatch='2*n_jobs')
# 暂时关闭多进程进度条,太多, 注意这种全局设置一定要在AbuEnvProcess初始化之前完成
# ABuProgress.g_show_ui_progress = False
# _make_symbols_cg_df被装饰器add_process_env_sig装饰,需要进程间内存拷贝对象AbuEnvProcess, 详AbuEnvProcess
p_nev = AbuEnvProcess()
change_df_array = parallel(
delayed(_make_symbols_cg_df)(choice_symbols, benchmark, env=p_nev) for choice_symbols in process_symbols)
# ABuProgress.g_show_ui_progress = True
# 还是显示进度条,但是完事时检测一下还有没有ui进度条
ABuProgress.do_check_process_is_dead()
"""
如果标尺的涨跌幅已经在choice_symbols中就不单独获取组装了,没有的情况是如:
eg. env中指定市场参数港股,即g_market_target = EMarketTargetType.E_MARKET_TARGET_HK,但是
传人的symbol是a股市场中的一支股票,即目的是想从整个港股市场中分析与这支a股股票的相关系数,这时即会
触发_make_benchmark_cg_df的使用
"""
change_df_concat = None if symbol in choice_symbols else _make_benchmark_cg_df(symbol, benchmark)
for change_df in change_df_array:
if change_df is not None:
# 将所有子进程返回的金融时间序列涨跌幅度pd.DataFrame对象再次进行连接
change_df_concat = change_df if change_df_concat is None else pd.concat([change_df, change_df_concat],
axis=1)
return change_df_concat
@consume_time
def _all_market_cg(symbol, cmp_cnt=None, n_folds=2, start=None, end=None):
"""
获取全市场symbol涨跌幅度pd.DataFrame对象
:param symbol: 外部指定目标symbol,str对象
:param cmp_cnt: 对比多少个交易日,int,可选参数
:param n_folds: 对比n_folds年,int,可选参数
:param start: 请求的开始日期 str对象,可选参数
:param end: 请求的结束日期 str对象,可选参数
:return: 全市场symbol涨跌幅度pd.DataFrame对象, 形如下所示:
e.g.
usA usAA usAAC
2015/7/27 0.76 -1.94 0.59
2015/7/28 2.12 2.6 1.3
2015/7/29 -0.12 2.94 -1.34
2015/7/30 1.41 -1.77 -4.04
2015/7/31 -0.05 -1.1 1.39
"""
if cmp_cnt is not None:
# 如果有传递对比多少个交易日这个参数,即反向修改n_folds,ceil向上对齐金融序列获取年数
n_folds = int(math.ceil(cmp_cnt / ABuEnv.g_market_trade_year))
# 标尺不是使用大盘symbol,而是传人的symbol做为标尺
benchmark = AbuBenchmark(benchmark=symbol, n_folds=n_folds, start=start, end=end, rs=False)
if benchmark.kl_pd is None or benchmark.kl_pd.empty:
logging.info('{} make benchmark get None'.format(symbol))
return None
if cmp_cnt is not None and benchmark.kl_pd.shape[0] > cmp_cnt:
# 再次根据对比多少个交易日这个参数,对齐时间序列
benchmark.kl_pd = benchmark.kl_pd.iloc[-cmp_cnt:]
# 有了symbol和benchmark,即可开始获取全市场symbol涨跌幅度pd.DataFrame对象all_market_change_df
all_market_change_df = _net_cg_df_create(symbol, benchmark)
return all_market_change_df
def _handle_market_change_df(market_change_df, cmp_cnt, benchmark_df, show_cnt, corr_type, rolling=True, show=True):
"""
使用benchmark_df与全市场market_change_df进行相关系数计算,可视化结果及信息
:param market_change_df: 全市场symbol涨跌幅度pd.DataFrame对象
:param cmp_cnt: 对比多少个交易日,int
:param benchmark_df: 标尺symbol对应的pd.Series对象
:param show_cnt: 最终结果展示以及可视化相似度个数
:param corr_type: ECoreCorrType对象,暂时支持皮尔逊,斯皮尔曼,+-符号相关系数,移动时间加权相关系数
:param rolling: 是否使用时间加权相关计算,与corr_type = ECoreCorrType.E_CORE_TYPE_ROLLING一样,单独拿出来了
:param show: 是否可视化最终top最相关的股票
:return:
"""
# 使用[-cmp_cnt:]再次确定时间序列周期
benchmark_df = benchmark_df.iloc[-cmp_cnt:]
market_change_df = market_change_df.iloc[-cmp_cnt:]
if corr_type == ECoreCorrType.E_CORE_TYPE_ROLLING:
# 把参数时间加权rolling和corr_type设置进行merge
rolling = True
if rolling:
# 时间加权统一使用ABuCorrcoef.rolling_corr单独计算,即使用两个参数方式计算,详见ABuCorrcoef.rolling_corr
corr_ret = ABuCorrcoef.rolling_corr(market_change_df, benchmark_df)
corr_ret = pd.Series(corr_ret, index=market_change_df.columns, name=benchmark_df.name)
else:
# 其它加权计算统一使用corr_df计算,即统一使用大矩阵计算相关系数后再拿出benchmark_df对应的相关系数列
corr_ret = ABuCorrcoef.corr_matrix(market_change_df, corr_type)[benchmark_df.name]
# 对结果进行zip排序,按照相关系统由正相关到负相关排序
sorted_ret = sorted(zip(corr_ret.index, corr_ret), key=operator.itemgetter(1), reverse=True)
"""
最终sorted_ret为可迭代序列,形如:
[('usTSLA', 1.0), ('usSINA', 0.45565379371028253), ('usWB', 0.44811939073120288),
('usAEH', 0.37792534372729375), ('usCRESY', 0.37347584342214574),
('us.IXIC', 0.36856818073255937), ('usCVG', 0.36841463066151853),
('usOCN', 0.36412381487296047), ('usYHOO', 0.36217456000137549), ...............]
"""
if show:
# 根据是否是ipython环境决定信息输出函数
log_func = logging.info if ABuEnv.g_is_ipython else print
log_func(sorted_ret[:show_cnt])
# 绘制show_cnt个最相关的股票股价走势
ABuSimilarDrawing.draw_show_close(sorted_ret, cmp_cnt, show_cnt)
return sorted_ret
@consume_time
@from_local
def multi_corr_df(corr_jobs, cmp_cnt=252, n_folds=None, start=None, end=None):
"""
被from_local装饰器装饰 即强制走本地数据,匹配市场对应的benchmark,根据参数
使用_all_market_cg获取全市场symbol涨跌幅度pd.DataFrame对象change_df使用
corr_jobs个相关系数计算方法分别计算change_df的相关系数,所有结果组成一个字典返回
:param corr_jobs: 需要执行相关计算方法ECoreCorrType序列
:param cmp_cnt: 对比多少个交易日,int
:param n_folds: 对比n_folds年,int,可选参数
:param start: 请求的开始日期 str对象,可选参数
:param end: 请求的结束日期 str对象,可选参数
:return: 返回相关系数矩阵组成的字典对象,如下所示 eg:
{'pears':
usBIDU usFB usGOOG usNOAH usSFUN usTSLA usVIPS usWUBA
usBIDU 1.0000 0.3013 0.3690 0.4015 0.3680 0.3015 0.3706 0.4320
usFB 0.3013 1.0000 0.6609 0.2746 0.1978 0.4080 0.2856 0.2438
usGOOG 0.3690 0.6609 1.0000 0.3682 0.1821 0.3477 0.3040 0.2917
usNOAH 0.4015 0.2746 0.3682 1.0000 0.3628 0.2178 0.4645 0.4488
usSFUN 0.3680 0.1978 0.1821 0.3628 1.0000 0.2513 0.2843 0.4883
usTSLA 0.3015 0.4080 0.3477 0.2178 0.2513 1.0000 0.2327 0.3340
usVIPS 0.3706 0.2856 0.3040 0.4645 0.2843 0.2327 1.0000 0.4189
usWUBA 0.4320 0.2438 0.2917 0.4488 0.4883 0.3340 0.4189 1.0000
'sperm':
usBIDU usFB usGOOG usNOAH usSFUN usTSLA usVIPS usWUBA
usBIDU 1.0000 0.3888 0.4549 0.4184 0.3747 0.3623 0.4333 0.4396
usFB 0.3888 1.0000 0.7013 0.2927 0.2379 0.4200 0.3123 0.2216
usGOOG 0.4549 0.7013 1.0000 0.3797 0.2413 0.3871 0.3922 0.3035
usNOAH 0.4184 0.2927 0.3797 1.0000 0.3581 0.2066 0.4643 0.4382
usSFUN 0.3747 0.2379 0.2413 0.3581 1.0000 0.2645 0.3890 0.4693
usTSLA 0.3623 0.4200 0.3871 0.2066 0.2645 1.0000 0.2540 0.2801
usVIPS 0.4333 0.3123 0.3922 0.4643 0.3890 0.2540 1.0000 0.4080
usWUBA 0.4396 0.2216 0.3035 0.4382 0.4693 0.2801 0.4080 1.0000 }
"""
if isinstance(corr_jobs, ECoreCorrType):
# 如果直接传递进来一个ECoreCorrType,暂时兼容,做成序列
corr_jobs = [corr_jobs]
if any([not isinstance(corr_job, ECoreCorrType) for corr_job in corr_jobs]):
# 序列中的所有元素必须是ECoreCorrType
raise TypeError('corr_job must ECoreCorrType')
# 匹配市场对应的benchmark
if ABuEnv.g_market_target == EMarketTargetType.E_MARKET_TARGET_US:
# 美股
benchmark = IndexSymbol.IXIC
elif ABuEnv.g_market_target == EMarketTargetType.E_MARKET_TARGET_HK:
# 港股
benchmark = IndexSymbol.HSI
elif ABuEnv.g_market_target == EMarketTargetType.E_MARKET_TARGET_CN:
# a股
benchmark = IndexSymbol.SH
elif ABuEnv.g_market_target == EMarketTargetType.E_MARKET_TARGET_FUTURES_CN:
# 国内期货
benchmark = IndexSymbol.BM_FUTURES_CN
elif ABuEnv.g_market_target == EMarketTargetType.E_MARKET_TARGET_TC:
# 币类市场
benchmark = IndexSymbol.TC_INX
elif ABuEnv.g_market_target == EMarketTargetType.E_MARKET_TARGET_OPTIONS_US:
# 美股期权暂时也以IXIC做为标尺
benchmark = IndexSymbol.IXIC
elif ABuEnv.g_market_target == EMarketTargetType.E_MARKET_TARGET_FUTURES_GLOBAL:
# 国际期货暂时也以BM_FUTURES_GB做为标尺
benchmark = IndexSymbol.BM_FUTURES_GB
else:
# 没匹配上也不抛错误,随便给一个,因为这里要的benchmark主要目的只是做为时间标尺
benchmark = IndexSymbol.IXIC
# 根据参数使用_all_market_cg获取全市场symbol涨跌幅度pd.DataFrame对象change_df
change_df = _all_market_cg(benchmark, cmp_cnt=cmp_cnt, n_folds=n_folds, start=start, end=end)
# 使用corr_jobs个相关系数计算方法分别计算change_df的相关系数,所有结果组成一个字典返回
return {corr_job.value: ABuCorrcoef.corr_matrix(change_df, corr_job) for corr_job in corr_jobs}
| gpl-3.0 |
datapythonista/pandas | pandas/tests/resample/test_datetime_index.py | 1 | 60159 | from datetime import datetime
from functools import partial
from io import StringIO
import numpy as np
import pytest
import pytz
from pandas._libs import lib
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import (
DataFrame,
Series,
Timedelta,
Timestamp,
isna,
notna,
)
import pandas._testing as tm
from pandas.core.groupby.grouper import Grouper
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import (
Period,
period_range,
)
from pandas.core.resample import (
DatetimeIndex,
_get_timestamp_range_edges,
)
import pandas.tseries.offsets as offsets
from pandas.tseries.offsets import Minute
@pytest.fixture()
def _index_factory():
return date_range
@pytest.fixture
def _index_freq():
return "Min"
@pytest.fixture
def _static_values(index):
return np.random.rand(len(index))
def test_custom_grouper(index):
dti = index
s = Series(np.array([1] * len(dti)), index=dti, dtype="int64")
b = Grouper(freq=Minute(5))
g = s.groupby(b)
# check all cython functions work
g.ohlc() # doesn't use _cython_agg_general
funcs = ["add", "mean", "prod", "min", "max", "var"]
for f in funcs:
g._cython_agg_general(f, alt=None, numeric_only=True)
b = Grouper(freq=Minute(5), closed="right", label="right")
g = s.groupby(b)
# check all cython functions work
g.ohlc() # doesn't use _cython_agg_general
funcs = ["add", "mean", "prod", "min", "max", "var"]
for f in funcs:
g._cython_agg_general(f, alt=None, numeric_only=True)
assert g.ngroups == 2593
assert notna(g.mean()).all()
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
idx = DatetimeIndex(idx, freq="5T")
expect = Series(arr, index=idx)
# GH2763 - return input dtype if we can
result = g.agg(np.sum)
tm.assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype="float64")
r = df.groupby(b).agg(np.sum)
assert len(r.columns) == 10
assert len(r.index) == 2593
@pytest.mark.parametrize(
"_index_start,_index_end,_index_name",
[("1/1/2000 00:00:00", "1/1/2000 00:13:00", "index")],
)
@pytest.mark.parametrize(
"closed, expected",
[
(
"right",
lambda s: Series(
[s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=date_range("1/1/2000", periods=4, freq="5min", name="index"),
),
),
(
"left",
lambda s: Series(
[s[:5].mean(), s[5:10].mean(), s[10:].mean()],
index=date_range(
"1/1/2000 00:05", periods=3, freq="5min", name="index"
),
),
),
],
)
def test_resample_basic(series, closed, expected):
s = series
expected = expected(s)
result = s.resample("5min", closed=closed, label="right").mean()
tm.assert_series_equal(result, expected)
def test_resample_integerarray():
# GH 25580, resample on IntegerArray
ts = Series(
range(9), index=date_range("1/1/2000", periods=9, freq="T"), dtype="Int64"
)
result = ts.resample("3T").sum()
expected = Series(
[3, 12, 21],
index=date_range("1/1/2000", periods=3, freq="3T"),
dtype="Int64",
)
tm.assert_series_equal(result, expected)
result = ts.resample("3T").mean()
expected = Series(
[1, 4, 7],
index=date_range("1/1/2000", periods=3, freq="3T"),
dtype="Float64",
)
tm.assert_series_equal(result, expected)
def test_resample_basic_grouper(series):
s = series
result = s.resample("5Min").last()
grouper = Grouper(freq=Minute(5), closed="left", label="left")
expected = s.groupby(grouper).agg(lambda x: x[-1])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"_index_start,_index_end,_index_name",
[("1/1/2000 00:00:00", "1/1/2000 00:13:00", "index")],
)
@pytest.mark.parametrize(
"keyword,value",
[("label", "righttt"), ("closed", "righttt"), ("convention", "starttt")],
)
def test_resample_string_kwargs(series, keyword, value):
# see gh-19303
# Check that wrong keyword argument strings raise an error
msg = f"Unsupported value {value} for `{keyword}`"
with pytest.raises(ValueError, match=msg):
series.resample("5min", **({keyword: value}))
@pytest.mark.parametrize(
"_index_start,_index_end,_index_name",
[("1/1/2000 00:00:00", "1/1/2000 00:13:00", "index")],
)
def test_resample_how(series, downsample_method):
if downsample_method == "ohlc":
pytest.skip("covered by test_resample_how_ohlc")
s = series
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
expected = s.groupby(grouplist).agg(downsample_method)
expected.index = date_range("1/1/2000", periods=4, freq="5min", name="index")
result = getattr(
s.resample("5min", closed="right", label="right"), downsample_method
)()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"_index_start,_index_end,_index_name",
[("1/1/2000 00:00:00", "1/1/2000 00:13:00", "index")],
)
def test_resample_how_ohlc(series):
s = series
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
expected = DataFrame(
s.groupby(grouplist).agg(_ohlc).values.tolist(),
index=date_range("1/1/2000", periods=4, freq="5min", name="index"),
columns=["open", "high", "low", "close"],
)
result = s.resample("5min", closed="right", label="right").ohlc()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max", "sum", "prod", "mean", "var", "std"])
def test_numpy_compat(func):
# see gh-12811
s = Series([1, 2, 3, 4, 5], index=date_range("20130101", periods=5, freq="s"))
r = s.resample("2s")
msg = "numpy operations are not valid with resample"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(r, func)(func, 1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(r, func)(axis=1)
def test_resample_how_callables():
# GH#7929
data = np.arange(5, dtype=np.int64)
ind = date_range(start="2014-01-01", periods=len(data), freq="d")
df = DataFrame({"A": data, "B": data}, index=ind)
def fn(x, a=1):
return str(type(x))
class FnClass:
def __call__(self, x):
return str(type(x))
df_standard = df.resample("M").apply(fn)
df_lambda = df.resample("M").apply(lambda x: str(type(x)))
df_partial = df.resample("M").apply(partial(fn))
df_partial2 = df.resample("M").apply(partial(fn, a=2))
df_class = df.resample("M").apply(FnClass())
tm.assert_frame_equal(df_standard, df_lambda)
tm.assert_frame_equal(df_standard, df_partial)
tm.assert_frame_equal(df_standard, df_partial2)
tm.assert_frame_equal(df_standard, df_class)
def test_resample_rounding():
# GH 8371
# odd results when rounding is needed
data = """date,time,value
11-08-2014,00:00:01.093,1
11-08-2014,00:00:02.159,1
11-08-2014,00:00:02.667,1
11-08-2014,00:00:03.175,1
11-08-2014,00:00:07.058,1
11-08-2014,00:00:07.362,1
11-08-2014,00:00:08.324,1
11-08-2014,00:00:08.830,1
11-08-2014,00:00:08.982,1
11-08-2014,00:00:09.815,1
11-08-2014,00:00:10.540,1
11-08-2014,00:00:11.061,1
11-08-2014,00:00:11.617,1
11-08-2014,00:00:13.607,1
11-08-2014,00:00:14.535,1
11-08-2014,00:00:15.525,1
11-08-2014,00:00:17.960,1
11-08-2014,00:00:20.674,1
11-08-2014,00:00:21.191,1"""
df = pd.read_csv(
StringIO(data),
parse_dates={"timestamp": ["date", "time"]},
index_col="timestamp",
)
df.index.name = None
result = df.resample("6s").sum()
expected = DataFrame(
{"value": [4, 9, 4, 2]}, index=date_range("2014-11-08", freq="6s", periods=4)
)
tm.assert_frame_equal(result, expected)
result = df.resample("7s").sum()
expected = DataFrame(
{"value": [4, 10, 4, 1]}, index=date_range("2014-11-08", freq="7s", periods=4)
)
tm.assert_frame_equal(result, expected)
result = df.resample("11s").sum()
expected = DataFrame(
{"value": [11, 8]}, index=date_range("2014-11-08", freq="11s", periods=2)
)
tm.assert_frame_equal(result, expected)
result = df.resample("13s").sum()
expected = DataFrame(
{"value": [13, 6]}, index=date_range("2014-11-08", freq="13s", periods=2)
)
tm.assert_frame_equal(result, expected)
result = df.resample("17s").sum()
expected = DataFrame(
{"value": [16, 3]}, index=date_range("2014-11-08", freq="17s", periods=2)
)
tm.assert_frame_equal(result, expected)
def test_resample_basic_from_daily():
# from daily
dti = date_range(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="D", name="index"
)
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample("w-sun").last()
assert len(result) == 3
assert (result.index.dayofweek == [6, 6, 6]).all()
assert result.iloc[0] == s["1/2/2005"]
assert result.iloc[1] == s["1/9/2005"]
assert result.iloc[2] == s.iloc[-1]
result = s.resample("W-MON").last()
assert len(result) == 2
assert (result.index.dayofweek == [0, 0]).all()
assert result.iloc[0] == s["1/3/2005"]
assert result.iloc[1] == s["1/10/2005"]
result = s.resample("W-TUE").last()
assert len(result) == 2
assert (result.index.dayofweek == [1, 1]).all()
assert result.iloc[0] == s["1/4/2005"]
assert result.iloc[1] == s["1/10/2005"]
result = s.resample("W-WED").last()
assert len(result) == 2
assert (result.index.dayofweek == [2, 2]).all()
assert result.iloc[0] == s["1/5/2005"]
assert result.iloc[1] == s["1/10/2005"]
result = s.resample("W-THU").last()
assert len(result) == 2
assert (result.index.dayofweek == [3, 3]).all()
assert result.iloc[0] == s["1/6/2005"]
assert result.iloc[1] == s["1/10/2005"]
result = s.resample("W-FRI").last()
assert len(result) == 2
assert (result.index.dayofweek == [4, 4]).all()
assert result.iloc[0] == s["1/7/2005"]
assert result.iloc[1] == s["1/10/2005"]
# to biz day
result = s.resample("B").last()
assert len(result) == 7
assert (result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all()
assert result.iloc[0] == s["1/2/2005"]
assert result.iloc[1] == s["1/3/2005"]
assert result.iloc[5] == s["1/9/2005"]
assert result.index.name == "index"
def test_resample_upsampling_picked_but_not_correct():
# Test for issue #3020
dates = date_range("01-Jan-2014", "05-Jan-2014", freq="D")
series = Series(1, index=dates)
result = series.resample("D").mean()
assert result.index[0] == dates[0]
# GH 5955
# incorrect deciding to upsample when the axis frequency matches the
# resample frequency
s = Series(
np.arange(1.0, 6), index=[datetime(1975, 1, i, 12, 0) for i in range(1, 6)]
)
expected = Series(
np.arange(1.0, 6), index=date_range("19750101", periods=5, freq="D")
)
result = s.resample("D").count()
tm.assert_series_equal(result, Series(1, index=expected.index))
result1 = s.resample("D").sum()
result2 = s.resample("D").mean()
tm.assert_series_equal(result1, expected)
tm.assert_series_equal(result2, expected)
def test_resample_frame_basic():
df = tm.makeTimeDataFrame()
b = Grouper(freq="M")
g = df.groupby(b)
# check all cython functions work
funcs = ["add", "mean", "prod", "min", "max", "var"]
for f in funcs:
g._cython_agg_general(f, alt=None, numeric_only=True)
result = df.resample("A").mean()
tm.assert_series_equal(result["A"], df["A"].resample("A").mean())
result = df.resample("M").mean()
tm.assert_series_equal(result["A"], df["A"].resample("M").mean())
df.resample("M", kind="period").mean()
df.resample("W-WED", kind="period").mean()
def test_resample_upsample():
# from daily
dti = date_range(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="D", name="index"
)
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample("Min").pad()
assert len(result) == 12961
assert result[0] == s[0]
assert result[-1] == s[-1]
assert result.index.name == "index"
def test_resample_how_method():
# GH9915
s = Series(
[11, 22],
index=[
Timestamp("2015-03-31 21:48:52.672000"),
Timestamp("2015-03-31 21:49:52.739000"),
],
)
expected = Series(
[11, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 22],
index=DatetimeIndex(
[
Timestamp("2015-03-31 21:48:50"),
Timestamp("2015-03-31 21:49:00"),
Timestamp("2015-03-31 21:49:10"),
Timestamp("2015-03-31 21:49:20"),
Timestamp("2015-03-31 21:49:30"),
Timestamp("2015-03-31 21:49:40"),
Timestamp("2015-03-31 21:49:50"),
],
freq="10s",
),
)
tm.assert_series_equal(s.resample("10S").mean(), expected)
def test_resample_extra_index_point():
# GH#9756
index = date_range(start="20150101", end="20150331", freq="BM")
expected = DataFrame({"A": Series([21, 41, 63], index=index)})
index = date_range(start="20150101", end="20150331", freq="B")
df = DataFrame({"A": Series(range(len(index)), index=index)}, dtype="int64")
result = df.resample("BM").last()
tm.assert_frame_equal(result, expected)
def test_upsample_with_limit():
rng = date_range("1/1/2000", periods=3, freq="5t")
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample("t").ffill(limit=2)
expected = ts.reindex(result.index, method="ffill", limit=2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("freq", ["5D", "10H", "5Min", "10S"])
@pytest.mark.parametrize("rule", ["Y", "3M", "15D", "30H", "15Min", "30S"])
def test_nearest_upsample_with_limit(tz_aware_fixture, freq, rule):
# GH 33939
rng = date_range("1/1/2000", periods=3, freq=freq, tz=tz_aware_fixture)
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample(rule).nearest(limit=2)
expected = ts.reindex(result.index, method="nearest", limit=2)
tm.assert_series_equal(result, expected)
def test_resample_ohlc(series):
s = series
grouper = Grouper(freq=Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample("5Min").ohlc()
assert len(result) == len(expect)
assert len(result.columns) == 4
xs = result.iloc[-2]
assert xs["open"] == s[-6]
assert xs["high"] == s[-6:-1].max()
assert xs["low"] == s[-6:-1].min()
assert xs["close"] == s[-2]
xs = result.iloc[0]
assert xs["open"] == s[0]
assert xs["high"] == s[:5].max()
assert xs["low"] == s[:5].min()
assert xs["close"] == s[4]
def test_resample_ohlc_result():
# GH 12332
index = date_range("1-1-2000", "2-15-2000", freq="h")
index = index.union(date_range("4-15-2000", "5-15-2000", freq="h"))
s = Series(range(len(index)), index=index)
a = s.loc[:"4-15-2000"].resample("30T").ohlc()
assert isinstance(a, DataFrame)
b = s.loc[:"4-14-2000"].resample("30T").ohlc()
assert isinstance(b, DataFrame)
# GH12348
# raising on odd period
rng = date_range("2013-12-30", "2014-01-07")
index = rng.drop(
[
Timestamp("2014-01-01"),
Timestamp("2013-12-31"),
Timestamp("2014-01-04"),
Timestamp("2014-01-05"),
]
)
df = DataFrame(data=np.arange(len(index)), index=index)
result = df.resample("B").mean()
expected = df.reindex(index=date_range(rng[0], rng[-1], freq="B"))
tm.assert_frame_equal(result, expected)
def test_resample_ohlc_dataframe():
df = (
DataFrame(
{
"PRICE": {
Timestamp("2011-01-06 10:59:05", tz=None): 24990,
Timestamp("2011-01-06 12:43:33", tz=None): 25499,
Timestamp("2011-01-06 12:54:09", tz=None): 25499,
},
"VOLUME": {
Timestamp("2011-01-06 10:59:05", tz=None): 1500000000,
Timestamp("2011-01-06 12:43:33", tz=None): 5000000000,
Timestamp("2011-01-06 12:54:09", tz=None): 100000000,
},
}
)
).reindex(["VOLUME", "PRICE"], axis=1)
res = df.resample("H").ohlc()
exp = pd.concat(
[df["VOLUME"].resample("H").ohlc(), df["PRICE"].resample("H").ohlc()],
axis=1,
keys=["VOLUME", "PRICE"],
)
tm.assert_frame_equal(exp, res)
df.columns = [["a", "b"], ["c", "d"]]
res = df.resample("H").ohlc()
exp.columns = pd.MultiIndex.from_tuples(
[
("a", "c", "open"),
("a", "c", "high"),
("a", "c", "low"),
("a", "c", "close"),
("b", "d", "open"),
("b", "d", "high"),
("b", "d", "low"),
("b", "d", "close"),
]
)
tm.assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index():
# GH 4812
# dup columns with resample raising
df = DataFrame(
np.random.randn(4, 12),
index=[2000, 2000, 2000, 2000],
columns=[Period(year=2000, month=i + 1, freq="M") for i in range(12)],
)
df.iloc[3, :] = np.nan
result = df.resample("Q", axis=1).mean()
expected = df.groupby(lambda x: int((x.month - 1) / 3), axis=1).mean()
expected.columns = [Period(year=2000, quarter=i + 1, freq="Q") for i in range(4)]
tm.assert_frame_equal(result, expected)
def test_resample_reresample():
dti = date_range(start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="D")
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample("B", closed="right", label="right").mean()
result = bs.resample("8H").mean()
assert len(result) == 22
assert isinstance(result.index.freq, offsets.DateOffset)
assert result.index.freq == offsets.Hour(8)
def test_resample_timestamp_to_period(simple_date_range_series):
ts = simple_date_range_series("1/1/1990", "1/1/2000")
result = ts.resample("A-DEC", kind="period").mean()
expected = ts.resample("A-DEC").mean()
expected.index = period_range("1990", "2000", freq="a-dec")
tm.assert_series_equal(result, expected)
result = ts.resample("A-JUN", kind="period").mean()
expected = ts.resample("A-JUN").mean()
expected.index = period_range("1990", "2000", freq="a-jun")
tm.assert_series_equal(result, expected)
result = ts.resample("M", kind="period").mean()
expected = ts.resample("M").mean()
expected.index = period_range("1990-01", "2000-01", freq="M")
tm.assert_series_equal(result, expected)
result = ts.resample("M", kind="period").mean()
expected = ts.resample("M").mean()
expected.index = period_range("1990-01", "2000-01", freq="M")
tm.assert_series_equal(result, expected)
def test_ohlc_5min():
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range("1/1/2000 00:00:00", "1/1/2000 5:59:50", freq="10s")
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample("5min", closed="right", label="right").ohlc()
assert (resampled.loc["1/1/2000 00:00"] == ts[0]).all()
exp = _ohlc(ts[1:31])
assert (resampled.loc["1/1/2000 00:05"] == exp).all()
exp = _ohlc(ts["1/1/2000 5:55:01":])
assert (resampled.loc["1/1/2000 6:00:00"] == exp).all()
def test_downsample_non_unique():
rng = date_range("1/1/2000", "2/29/2000")
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample("M").mean()
expected = ts.groupby(lambda x: x.month).mean()
assert len(result) == 2
tm.assert_almost_equal(result[0], expected[1])
tm.assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique():
# GH #1077
rng = date_range("1/1/2000", "2/29/2000")
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
ts.asfreq("B")
def test_resample_axis1():
rng = date_range("1/1/2000", "2/29/2000")
df = DataFrame(np.random.randn(3, len(rng)), columns=rng, index=["a", "b", "c"])
result = df.resample("M", axis=1).mean()
expected = df.T.resample("M").mean().T
tm.assert_frame_equal(result, expected)
def test_resample_anchored_ticks():
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the
# middle of a desired interval
rng = date_range("1/1/2000 04:00:00", periods=86400, freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ["t", "5t", "15t", "30t", "4h", "12h"]
for freq in freqs:
result = ts[2:].resample(freq, closed="left", label="left").mean()
expected = ts.resample(freq, closed="left", label="left").mean()
tm.assert_series_equal(result, expected)
def test_resample_single_group():
mysum = lambda x: x.sum()
rng = date_range("2000-1-1", "2000-2-10", freq="D")
ts = Series(np.random.randn(len(rng)), index=rng)
tm.assert_series_equal(ts.resample("M").sum(), ts.resample("M").apply(mysum))
rng = date_range("2000-1-1", "2000-1-10", freq="D")
ts = Series(np.random.randn(len(rng)), index=rng)
tm.assert_series_equal(ts.resample("M").sum(), ts.resample("M").apply(mysum))
# GH 3849
s = Series(
[30.1, 31.6],
index=[Timestamp("20070915 15:30:00"), Timestamp("20070915 15:40:00")],
)
expected = Series([0.75], index=DatetimeIndex([Timestamp("20070915")], freq="D"))
result = s.resample("D").apply(lambda x: np.std(x))
tm.assert_series_equal(result, expected)
def test_resample_offset():
# GH 31809
rng = date_range("1/1/2000 00:00:00", "1/1/2000 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample("5min", offset="2min").mean()
exp_rng = date_range("12/31/1999 23:57:00", "1/1/2000 01:57", freq="5min")
tm.assert_index_equal(resampled.index, exp_rng)
def test_resample_origin():
# GH 31809
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
exp_rng = date_range("1999-12-31 23:57:00", "2000-01-01 01:57", freq="5min")
resampled = ts.resample("5min", origin="1999-12-31 23:57:00").mean()
tm.assert_index_equal(resampled.index, exp_rng)
offset_timestamp = Timestamp(0) + Timedelta("2min")
resampled = ts.resample("5min", origin=offset_timestamp).mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("5min", origin="epoch", offset="2m").mean()
tm.assert_index_equal(resampled.index, exp_rng)
# origin of '1999-31-12 12:02:00' should be equivalent for this case
resampled = ts.resample("5min", origin="1999-12-31 12:02:00").mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("5min", offset="-3m").mean()
tm.assert_index_equal(resampled.index, exp_rng)
@pytest.mark.parametrize(
"origin", ["invalid_value", "epch", "startday", "startt", "2000-30-30", object()]
)
def test_resample_bad_origin(origin):
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
msg = (
"'origin' should be equal to 'epoch', 'start', 'start_day', "
"'end', 'end_day' or should be a Timestamp convertible type. Got "
f"'{origin}' instead."
)
with pytest.raises(ValueError, match=msg):
ts.resample("5min", origin=origin)
@pytest.mark.parametrize("offset", ["invalid_value", "12dayys", "2000-30-30", object()])
def test_resample_bad_offset(offset):
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
msg = f"'offset' should be a Timedelta convertible type. Got '{offset}' instead."
with pytest.raises(ValueError, match=msg):
ts.resample("5min", offset=offset)
def test_resample_origin_prime_freq():
# GH 31809
start, end = "2000-10-01 23:30:00", "2000-10-02 00:30:00"
rng = date_range(start, end, freq="7min")
ts = Series(np.random.randn(len(rng)), index=rng)
exp_rng = date_range("2000-10-01 23:14:00", "2000-10-02 00:22:00", freq="17min")
resampled = ts.resample("17min").mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("17min", origin="start_day").mean()
tm.assert_index_equal(resampled.index, exp_rng)
exp_rng = date_range("2000-10-01 23:30:00", "2000-10-02 00:21:00", freq="17min")
resampled = ts.resample("17min", origin="start").mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("17min", offset="23h30min").mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("17min", origin="start_day", offset="23h30min").mean()
tm.assert_index_equal(resampled.index, exp_rng)
exp_rng = date_range("2000-10-01 23:18:00", "2000-10-02 00:26:00", freq="17min")
resampled = ts.resample("17min", origin="epoch").mean()
tm.assert_index_equal(resampled.index, exp_rng)
exp_rng = date_range("2000-10-01 23:24:00", "2000-10-02 00:15:00", freq="17min")
resampled = ts.resample("17min", origin="2000-01-01").mean()
tm.assert_index_equal(resampled.index, exp_rng)
def test_resample_origin_with_tz():
# GH 31809
msg = "The origin must have the same timezone as the index."
tz = "Europe/Paris"
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s", tz=tz)
ts = Series(np.random.randn(len(rng)), index=rng)
exp_rng = date_range("1999-12-31 23:57:00", "2000-01-01 01:57", freq="5min", tz=tz)
resampled = ts.resample("5min", origin="1999-12-31 23:57:00+00:00").mean()
tm.assert_index_equal(resampled.index, exp_rng)
# origin of '1999-31-12 12:02:00+03:00' should be equivalent for this case
resampled = ts.resample("5min", origin="1999-12-31 12:02:00+03:00").mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("5min", origin="epoch", offset="2m").mean()
tm.assert_index_equal(resampled.index, exp_rng)
with pytest.raises(ValueError, match=msg):
ts.resample("5min", origin="12/31/1999 23:57:00").mean()
# if the series is not tz aware, origin should not be tz aware
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
with pytest.raises(ValueError, match=msg):
ts.resample("5min", origin="12/31/1999 23:57:00+03:00").mean()
def test_resample_origin_epoch_with_tz_day_vs_24h():
# GH 34474
start, end = "2000-10-01 23:30:00+0500", "2000-12-02 00:30:00+0500"
rng = date_range(start, end, freq="7min")
random_values = np.random.randn(len(rng))
ts_1 = Series(random_values, index=rng)
result_1 = ts_1.resample("D", origin="epoch").mean()
result_2 = ts_1.resample("24H", origin="epoch").mean()
tm.assert_series_equal(result_1, result_2)
# check that we have the same behavior with epoch even if we are not timezone aware
ts_no_tz = ts_1.tz_localize(None)
result_3 = ts_no_tz.resample("D", origin="epoch").mean()
result_4 = ts_no_tz.resample("24H", origin="epoch").mean()
tm.assert_series_equal(result_1, result_3.tz_localize(rng.tz), check_freq=False)
tm.assert_series_equal(result_1, result_4.tz_localize(rng.tz), check_freq=False)
# check that we have the similar results with two different timezones (+2H and +5H)
start, end = "2000-10-01 23:30:00+0200", "2000-12-02 00:30:00+0200"
rng = date_range(start, end, freq="7min")
ts_2 = Series(random_values, index=rng)
result_5 = ts_2.resample("D", origin="epoch").mean()
result_6 = ts_2.resample("24H", origin="epoch").mean()
tm.assert_series_equal(result_1.tz_localize(None), result_5.tz_localize(None))
tm.assert_series_equal(result_1.tz_localize(None), result_6.tz_localize(None))
def test_resample_origin_with_day_freq_on_dst():
# GH 31809
tz = "America/Chicago"
def _create_series(values, timestamps, freq="D"):
return Series(
values,
index=DatetimeIndex(
[Timestamp(t, tz=tz) for t in timestamps], freq=freq, ambiguous=True
),
)
# test classical behavior of origin in a DST context
start = Timestamp("2013-11-02", tz=tz)
end = Timestamp("2013-11-03 23:59", tz=tz)
rng = date_range(start, end, freq="1h")
ts = Series(np.ones(len(rng)), index=rng)
expected = _create_series([24.0, 25.0], ["2013-11-02", "2013-11-03"])
for origin in ["epoch", "start", "start_day", start, None]:
result = ts.resample("D", origin=origin).sum()
tm.assert_series_equal(result, expected)
# test complex behavior of origin/offset in a DST context
start = Timestamp("2013-11-03", tz=tz)
end = Timestamp("2013-11-03 23:59", tz=tz)
rng = date_range(start, end, freq="1h")
ts = Series(np.ones(len(rng)), index=rng)
expected_ts = ["2013-11-02 22:00-05:00", "2013-11-03 22:00-06:00"]
expected = _create_series([23.0, 2.0], expected_ts)
result = ts.resample("D", origin="start", offset="-2H").sum()
tm.assert_series_equal(result, expected)
expected_ts = ["2013-11-02 22:00-05:00", "2013-11-03 21:00-06:00"]
expected = _create_series([22.0, 3.0], expected_ts, freq="24H")
result = ts.resample("24H", origin="start", offset="-2H").sum()
tm.assert_series_equal(result, expected)
expected_ts = ["2013-11-02 02:00-05:00", "2013-11-03 02:00-06:00"]
expected = _create_series([3.0, 22.0], expected_ts)
result = ts.resample("D", origin="start", offset="2H").sum()
tm.assert_series_equal(result, expected)
expected_ts = ["2013-11-02 23:00-05:00", "2013-11-03 23:00-06:00"]
expected = _create_series([24.0, 1.0], expected_ts)
result = ts.resample("D", origin="start", offset="-1H").sum()
tm.assert_series_equal(result, expected)
expected_ts = ["2013-11-02 01:00-05:00", "2013-11-03 01:00:00-0500"]
expected = _create_series([1.0, 24.0], expected_ts)
result = ts.resample("D", origin="start", offset="1H").sum()
tm.assert_series_equal(result, expected)
def test_resample_daily_anchored():
rng = date_range("1/1/2000 0:00:00", periods=10000, freq="T")
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample("D", closed="left", label="left").mean()
expected = ts.resample("D", closed="left", label="left").mean()
tm.assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet():
# GH #1259
rng = date_range("1/1/2000", "12/31/2000")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample("M", kind="period").mean()
exp_index = period_range("Jan-2000", "Dec-2000", freq="M")
tm.assert_index_equal(result.index, exp_index)
def test_period_with_agg():
# aggregate a period resampler with a lambda
s2 = Series(
np.random.randint(0, 5, 50),
index=period_range("2012-01-01", freq="H", periods=50),
dtype="float64",
)
expected = s2.to_timestamp().resample("D").mean().to_period()
result = s2.resample("D").agg(lambda x: x.mean())
tm.assert_series_equal(result, expected)
def test_resample_segfault():
# GH 8573
# segfaulting in older versions
all_wins_and_wagers = [
(1, datetime(2013, 10, 1, 16, 20), 1, 0),
(2, datetime(2013, 10, 1, 16, 10), 1, 0),
(2, datetime(2013, 10, 1, 18, 15), 1, 0),
(2, datetime(2013, 10, 1, 16, 10, 31), 1, 0),
]
df = DataFrame.from_records(
all_wins_and_wagers, columns=("ID", "timestamp", "A", "B")
).set_index("timestamp")
result = df.groupby("ID").resample("5min").sum()
expected = df.groupby("ID").apply(lambda x: x.resample("5min").sum())
tm.assert_frame_equal(result, expected)
def test_resample_dtype_preservation():
# GH 12202
# validation tests for dtype preservation
df = DataFrame(
{
"date": date_range(start="2016-01-01", periods=4, freq="W"),
"group": [1, 1, 2, 2],
"val": Series([5, 6, 7, 8], dtype="int32"),
}
).set_index("date")
result = df.resample("1D").ffill()
assert result.val.dtype == np.int32
result = df.groupby("group").resample("1D").ffill()
assert result.val.dtype == np.int32
def test_resample_dtype_coercion():
pytest.importorskip("scipy.interpolate")
# GH 16361
df = {"a": [1, 3, 1, 4]}
df = DataFrame(df, index=date_range("2017-01-01", "2017-01-04"))
expected = df.astype("float64").resample("H").mean()["a"].interpolate("cubic")
result = df.resample("H")["a"].mean().interpolate("cubic")
tm.assert_series_equal(result, expected)
result = df.resample("H").mean()["a"].interpolate("cubic")
tm.assert_series_equal(result, expected)
def test_weekly_resample_buglet():
# #1327
rng = date_range("1/1/2000", freq="B", periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample("W").mean()
expected = ts.resample("W-SUN").mean()
tm.assert_series_equal(resampled, expected)
def test_monthly_resample_error():
# #1451
dates = date_range("4/16/2012 20:00", periods=5000, freq="h")
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample("M")
def test_nanosecond_resample_error():
# GH 12307 - Values falls after last bin when
# Resampling using pd.tseries.offsets.Nano as period
start = 1443707890427
exp_start = 1443707890400
indx = date_range(start=pd.to_datetime(start), periods=10, freq="100n")
ts = Series(range(len(indx)), index=indx)
r = ts.resample(pd.tseries.offsets.Nano(100))
result = r.agg("mean")
exp_indx = date_range(start=pd.to_datetime(exp_start), periods=10, freq="100n")
exp = Series(range(len(exp_indx)), index=exp_indx, dtype=float)
tm.assert_series_equal(result, exp)
def test_resample_anchored_intraday(simple_date_range_series):
# #1471, #1458
rng = date_range("1/1/2012", "4/1/2012", freq="100min")
df = DataFrame(rng.month, index=rng)
result = df.resample("M").mean()
expected = df.resample("M", kind="period").mean().to_timestamp(how="end")
expected.index += Timedelta(1, "ns") - Timedelta(1, "D")
expected.index = expected.index._with_freq("infer")
assert expected.index.freq == "M"
tm.assert_frame_equal(result, expected)
result = df.resample("M", closed="left").mean()
exp = df.shift(1, freq="D").resample("M", kind="period").mean()
exp = exp.to_timestamp(how="end")
exp.index = exp.index + Timedelta(1, "ns") - Timedelta(1, "D")
exp.index = exp.index._with_freq("infer")
assert exp.index.freq == "M"
tm.assert_frame_equal(result, exp)
rng = date_range("1/1/2012", "4/1/2012", freq="100min")
df = DataFrame(rng.month, index=rng)
result = df.resample("Q").mean()
expected = df.resample("Q", kind="period").mean().to_timestamp(how="end")
expected.index += Timedelta(1, "ns") - Timedelta(1, "D")
expected.index._data.freq = "Q"
expected.index._freq = lib.no_default
tm.assert_frame_equal(result, expected)
result = df.resample("Q", closed="left").mean()
expected = df.shift(1, freq="D").resample("Q", kind="period", closed="left").mean()
expected = expected.to_timestamp(how="end")
expected.index += Timedelta(1, "ns") - Timedelta(1, "D")
expected.index._data.freq = "Q"
expected.index._freq = lib.no_default
tm.assert_frame_equal(result, expected)
ts = simple_date_range_series("2012-04-29 23:00", "2012-04-30 5:00", freq="h")
resampled = ts.resample("M").mean()
assert len(resampled) == 1
def test_resample_anchored_monthstart(simple_date_range_series):
ts = simple_date_range_series("1/1/2000", "12/31/2002")
freqs = ["MS", "BMS", "QS-MAR", "AS-DEC", "AS-JUN"]
for freq in freqs:
ts.resample(freq).mean()
def test_resample_anchored_multiday():
# When resampling a range spanning multiple days, ensure that the
# start date gets used to determine the offset. Fixes issue where
# a one day period is not a multiple of the frequency.
#
# See: https://github.com/pandas-dev/pandas/issues/8683
index1 = date_range("2014-10-14 23:06:23.206", periods=3, freq="400L")
index2 = date_range("2014-10-15 23:00:00", periods=2, freq="2200L")
index = index1.union(index2)
s = Series(np.random.randn(5), index=index)
# Ensure left closing works
result = s.resample("2200L").mean()
assert result.index[-1] == Timestamp("2014-10-15 23:00:02.000")
# Ensure right closing works
result = s.resample("2200L", label="right").mean()
assert result.index[-1] == Timestamp("2014-10-15 23:00:04.200")
def test_corner_cases(simple_period_range_series, simple_date_range_series):
# miscellaneous test coverage
rng = date_range("1/1/2000", periods=12, freq="t")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample("5t", closed="right", label="left").mean()
ex_index = date_range("1999-12-31 23:55", periods=4, freq="5t")
tm.assert_index_equal(result.index, ex_index)
len0pts = simple_period_range_series("2007-01", "2010-05", freq="M")[:0]
# it works
result = len0pts.resample("A-DEC").mean()
assert len(result) == 0
# resample to periods
ts = simple_date_range_series("2000-04-28", "2000-04-30 11:00", freq="h")
result = ts.resample("M", kind="period").mean()
assert len(result) == 1
assert result.index[0] == Period("2000-04", freq="M")
def test_anchored_lowercase_buglet():
dates = date_range("4/16/2012 20:00", periods=50000, freq="s")
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample("d").mean()
def test_upsample_apply_functions():
# #1596
rng = date_range("2012-06-12", periods=4, freq="h")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample("20min").aggregate(["mean", "sum"])
assert isinstance(result, DataFrame)
def test_resample_not_monotonic():
rng = date_range("2012-06-12", periods=200, freq="h")
ts = Series(np.random.randn(len(rng)), index=rng)
ts = ts.take(np.random.permutation(len(ts)))
result = ts.resample("D").sum()
exp = ts.sort_index().resample("D").sum()
tm.assert_series_equal(result, exp)
def test_resample_median_bug_1688():
for dtype in ["int64", "int32", "float64", "float32"]:
df = DataFrame(
[1, 2],
index=[datetime(2012, 1, 1, 0, 0, 0), datetime(2012, 1, 1, 0, 5, 0)],
dtype=dtype,
)
result = df.resample("T").apply(lambda x: x.mean())
exp = df.asfreq("T")
if dtype == "float32":
# TODO: Empty groups cause x.mean() to return float64
exp = exp.astype("float64")
tm.assert_frame_equal(result, exp)
result = df.resample("T").median()
exp = df.asfreq("T")
tm.assert_frame_equal(result, exp)
def test_how_lambda_functions(simple_date_range_series):
ts = simple_date_range_series("1/1/2000", "4/1/2000")
result = ts.resample("M").apply(lambda x: x.mean())
exp = ts.resample("M").mean()
tm.assert_series_equal(result, exp)
foo_exp = ts.resample("M").mean()
foo_exp.name = "foo"
bar_exp = ts.resample("M").std()
bar_exp.name = "bar"
result = ts.resample("M").apply([lambda x: x.mean(), lambda x: x.std(ddof=1)])
result.columns = ["foo", "bar"]
tm.assert_series_equal(result["foo"], foo_exp)
tm.assert_series_equal(result["bar"], bar_exp)
# this is a MI Series, so comparing the names of the results
# doesn't make sense
result = ts.resample("M").aggregate(
{"foo": lambda x: x.mean(), "bar": lambda x: x.std(ddof=1)}
)
tm.assert_series_equal(result["foo"], foo_exp, check_names=False)
tm.assert_series_equal(result["bar"], bar_exp, check_names=False)
def test_resample_unequal_times():
# #1772
start = datetime(1999, 3, 1, 5)
# end hour is less than start
end = datetime(2012, 7, 31, 4)
bad_ind = date_range(start, end, freq="30min")
df = DataFrame({"close": 1}, index=bad_ind)
# it works!
df.resample("AS").sum()
def test_resample_consistency():
# GH 6418
# resample with bfill / limit / reindex consistency
i30 = date_range("2002-02-02", periods=4, freq="30T")
s = Series(np.arange(4.0), index=i30)
s[2] = np.NaN
# Upsample by factor 3 with reindex() and resample() methods:
i10 = date_range(i30[0], i30[-1], freq="10T")
s10 = s.reindex(index=i10, method="bfill")
s10_2 = s.reindex(index=i10, method="bfill", limit=2)
rl = s.reindex_like(s10, method="bfill", limit=2)
r10_2 = s.resample("10Min").bfill(limit=2)
r10 = s.resample("10Min").bfill()
# s10_2, r10, r10_2, rl should all be equal
tm.assert_series_equal(s10_2, r10)
tm.assert_series_equal(s10_2, r10_2)
tm.assert_series_equal(s10_2, rl)
def test_resample_timegrouper():
# GH 7227
dates1 = [
datetime(2014, 10, 1),
datetime(2014, 9, 3),
datetime(2014, 11, 5),
datetime(2014, 9, 5),
datetime(2014, 10, 8),
datetime(2014, 7, 15),
]
dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:]
dates3 = [pd.NaT] + dates1 + [pd.NaT]
for dates in [dates1, dates2, dates3]:
df = DataFrame({"A": dates, "B": np.arange(len(dates))})
result = df.set_index("A").resample("M").count()
exp_idx = DatetimeIndex(
["2014-07-31", "2014-08-31", "2014-09-30", "2014-10-31", "2014-11-30"],
freq="M",
name="A",
)
expected = DataFrame({"B": [1, 0, 2, 2, 1]}, index=exp_idx)
if df["A"].isna().any():
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(result, expected)
result = df.groupby(Grouper(freq="M", key="A")).count()
tm.assert_frame_equal(result, expected)
df = DataFrame(
{"A": dates, "B": np.arange(len(dates)), "C": np.arange(len(dates))}
)
result = df.set_index("A").resample("M").count()
expected = DataFrame(
{"B": [1, 0, 2, 2, 1], "C": [1, 0, 2, 2, 1]},
index=exp_idx,
columns=["B", "C"],
)
if df["A"].isna().any():
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(result, expected)
result = df.groupby(Grouper(freq="M", key="A")).count()
tm.assert_frame_equal(result, expected)
def test_resample_nunique():
# GH 12352
df = DataFrame(
{
"ID": {
Timestamp("2015-06-05 00:00:00"): "0010100903",
Timestamp("2015-06-08 00:00:00"): "0010150847",
},
"DATE": {
Timestamp("2015-06-05 00:00:00"): "2015-06-05",
Timestamp("2015-06-08 00:00:00"): "2015-06-08",
},
}
)
r = df.resample("D")
g = df.groupby(Grouper(freq="D"))
expected = df.groupby(Grouper(freq="D")).ID.apply(lambda x: x.nunique())
assert expected.name == "ID"
for t in [r, g]:
result = t.ID.nunique()
tm.assert_series_equal(result, expected)
result = df.ID.resample("D").nunique()
tm.assert_series_equal(result, expected)
result = df.ID.groupby(Grouper(freq="D")).nunique()
tm.assert_series_equal(result, expected)
def test_resample_nunique_preserves_column_level_names():
# see gh-23222
df = tm.makeTimeDataFrame(freq="1D").abs()
df.columns = pd.MultiIndex.from_arrays(
[df.columns.tolist()] * 2, names=["lev0", "lev1"]
)
result = df.resample("1h").nunique()
tm.assert_index_equal(df.columns, result.columns)
def test_resample_nunique_with_date_gap():
# GH 13453
index = date_range("1-1-2000", "2-15-2000", freq="h")
index2 = date_range("4-15-2000", "5-15-2000", freq="h")
index3 = index.append(index2)
s = Series(range(len(index3)), index=index3, dtype="int64")
r = s.resample("M")
# Since all elements are unique, these should all be the same
results = [r.count(), r.nunique(), r.agg(Series.nunique), r.agg("nunique")]
tm.assert_series_equal(results[0], results[1])
tm.assert_series_equal(results[0], results[2])
tm.assert_series_equal(results[0], results[3])
@pytest.mark.parametrize("n", [10000, 100000])
@pytest.mark.parametrize("k", [10, 100, 1000])
def test_resample_group_info(n, k):
# GH10914
# use a fixed seed to always have the same uniques
prng = np.random.RandomState(1234)
dr = date_range(start="2015-08-27", periods=n // 10, freq="T")
ts = Series(prng.randint(0, n // k, n).astype("int64"), index=prng.choice(dr, n))
left = ts.resample("30T").nunique()
ix = date_range(start=ts.index.min(), end=ts.index.max(), freq="30T")
vals = ts.values
bins = np.searchsorted(ix.values, ts.index, side="right")
sorter = np.lexsort((vals, bins))
vals, bins = vals[sorter], bins[sorter]
mask = np.r_[True, vals[1:] != vals[:-1]]
mask |= np.r_[True, bins[1:] != bins[:-1]]
arr = np.bincount(bins[mask] - 1, minlength=len(ix)).astype("int64", copy=False)
right = Series(arr, index=ix)
tm.assert_series_equal(left, right)
def test_resample_size():
n = 10000
dr = date_range("2015-09-19", periods=n, freq="T")
ts = Series(np.random.randn(n), index=np.random.choice(dr, n))
left = ts.resample("7T").size()
ix = date_range(start=left.index.min(), end=ts.index.max(), freq="7T")
bins = np.searchsorted(ix.values, ts.index.values, side="right")
val = np.bincount(bins, minlength=len(ix) + 1)[1:].astype("int64", copy=False)
right = Series(val, index=ix)
tm.assert_series_equal(left, right)
def test_resample_across_dst():
# The test resamples a DatetimeIndex with values before and after a
# DST change
# Issue: 14682
# The DatetimeIndex we will start with
# (note that DST happens at 03:00+02:00 -> 02:00+01:00)
# 2016-10-30 02:23:00+02:00, 2016-10-30 02:23:00+01:00
df1 = DataFrame([1477786980, 1477790580], columns=["ts"])
dti1 = DatetimeIndex(
pd.to_datetime(df1.ts, unit="s")
.dt.tz_localize("UTC")
.dt.tz_convert("Europe/Madrid")
)
# The expected DatetimeIndex after resampling.
# 2016-10-30 02:00:00+02:00, 2016-10-30 02:00:00+01:00
df2 = DataFrame([1477785600, 1477789200], columns=["ts"])
dti2 = DatetimeIndex(
pd.to_datetime(df2.ts, unit="s")
.dt.tz_localize("UTC")
.dt.tz_convert("Europe/Madrid"),
freq="H",
)
df = DataFrame([5, 5], index=dti1)
result = df.resample(rule="H").sum()
expected = DataFrame([5, 5], index=dti2)
tm.assert_frame_equal(result, expected)
def test_groupby_with_dst_time_change():
# GH 24972
index = DatetimeIndex(
[1478064900001000000, 1480037118776792000], tz="UTC"
).tz_convert("America/Chicago")
df = DataFrame([1, 2], index=index)
result = df.groupby(Grouper(freq="1d")).last()
expected_index_values = date_range(
"2016-11-02", "2016-11-24", freq="d", tz="America/Chicago"
)
index = DatetimeIndex(expected_index_values)
expected = DataFrame([1.0] + ([np.nan] * 21) + [2.0], index=index)
tm.assert_frame_equal(result, expected)
def test_resample_dst_anchor():
# 5172
dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz="US/Eastern")
df = DataFrame([5], index=dti)
dti = DatetimeIndex(df.index.normalize(), freq="D")
expected = DataFrame([5], index=dti)
tm.assert_frame_equal(df.resample(rule="D").sum(), expected)
df.resample(rule="MS").sum()
tm.assert_frame_equal(
df.resample(rule="MS").sum(),
DataFrame(
[5],
index=DatetimeIndex([datetime(2012, 11, 1)], tz="US/Eastern", freq="MS"),
),
)
dti = date_range("2013-09-30", "2013-11-02", freq="30Min", tz="Europe/Paris")
values = range(dti.size)
df = DataFrame({"a": values, "b": values, "c": values}, index=dti, dtype="int64")
how = {"a": "min", "b": "max", "c": "count"}
tm.assert_frame_equal(
df.resample("W-MON").agg(how)[["a", "b", "c"]],
DataFrame(
{
"a": [0, 48, 384, 720, 1056, 1394],
"b": [47, 383, 719, 1055, 1393, 1586],
"c": [48, 336, 336, 336, 338, 193],
},
index=date_range("9/30/2013", "11/4/2013", freq="W-MON", tz="Europe/Paris"),
),
"W-MON Frequency",
)
tm.assert_frame_equal(
df.resample("2W-MON").agg(how)[["a", "b", "c"]],
DataFrame(
{
"a": [0, 48, 720, 1394],
"b": [47, 719, 1393, 1586],
"c": [48, 672, 674, 193],
},
index=date_range(
"9/30/2013", "11/11/2013", freq="2W-MON", tz="Europe/Paris"
),
),
"2W-MON Frequency",
)
tm.assert_frame_equal(
df.resample("MS").agg(how)[["a", "b", "c"]],
DataFrame(
{"a": [0, 48, 1538], "b": [47, 1537, 1586], "c": [48, 1490, 49]},
index=date_range("9/1/2013", "11/1/2013", freq="MS", tz="Europe/Paris"),
),
"MS Frequency",
)
tm.assert_frame_equal(
df.resample("2MS").agg(how)[["a", "b", "c"]],
DataFrame(
{"a": [0, 1538], "b": [1537, 1586], "c": [1538, 49]},
index=date_range("9/1/2013", "11/1/2013", freq="2MS", tz="Europe/Paris"),
),
"2MS Frequency",
)
df_daily = df["10/26/2013":"10/29/2013"]
tm.assert_frame_equal(
df_daily.resample("D").agg({"a": "min", "b": "max", "c": "count"})[
["a", "b", "c"]
],
DataFrame(
{
"a": [1248, 1296, 1346, 1394],
"b": [1295, 1345, 1393, 1441],
"c": [48, 50, 48, 48],
},
index=date_range("10/26/2013", "10/29/2013", freq="D", tz="Europe/Paris"),
),
"D Frequency",
)
def test_downsample_across_dst():
# GH 8531
tz = pytz.timezone("Europe/Berlin")
dt = datetime(2014, 10, 26)
dates = date_range(tz.localize(dt), periods=4, freq="2H")
result = Series(5, index=dates).resample("H").mean()
expected = Series(
[5.0, np.nan] * 3 + [5.0],
index=date_range(tz.localize(dt), periods=7, freq="H"),
)
tm.assert_series_equal(result, expected)
def test_downsample_across_dst_weekly():
# GH 9119, GH 21459
df = DataFrame(
index=DatetimeIndex(
["2017-03-25", "2017-03-26", "2017-03-27", "2017-03-28", "2017-03-29"],
tz="Europe/Amsterdam",
),
data=[11, 12, 13, 14, 15],
)
result = df.resample("1W").sum()
expected = DataFrame(
[23, 42],
index=DatetimeIndex(
["2017-03-26", "2017-04-02"], tz="Europe/Amsterdam", freq="W"
),
)
tm.assert_frame_equal(result, expected)
idx = date_range("2013-04-01", "2013-05-01", tz="Europe/London", freq="H")
s = Series(index=idx, dtype=np.float64)
result = s.resample("W").mean()
expected = Series(
index=date_range("2013-04-07", freq="W", periods=5, tz="Europe/London"),
dtype=np.float64,
)
tm.assert_series_equal(result, expected)
def test_downsample_dst_at_midnight():
# GH 25758
start = datetime(2018, 11, 3, 12)
end = datetime(2018, 11, 5, 12)
index = date_range(start, end, freq="1H")
index = index.tz_localize("UTC").tz_convert("America/Havana")
data = list(range(len(index)))
dataframe = DataFrame(data, index=index)
result = dataframe.groupby(Grouper(freq="1D")).mean()
dti = date_range("2018-11-03", periods=3).tz_localize(
"America/Havana", ambiguous=True
)
dti = DatetimeIndex(dti, freq="D")
expected = DataFrame([7.5, 28.0, 44.5], index=dti)
tm.assert_frame_equal(result, expected)
def test_resample_with_nat():
# GH 13020
index = DatetimeIndex(
[
pd.NaT,
"1970-01-01 00:00:00",
pd.NaT,
"1970-01-01 00:00:01",
"1970-01-01 00:00:02",
]
)
frame = DataFrame([2, 3, 5, 7, 11], index=index)
index_1s = DatetimeIndex(
["1970-01-01 00:00:00", "1970-01-01 00:00:01", "1970-01-01 00:00:02"]
)
frame_1s = DataFrame([3.0, 7.0, 11.0], index=index_1s)
tm.assert_frame_equal(frame.resample("1s").mean(), frame_1s)
index_2s = DatetimeIndex(["1970-01-01 00:00:00", "1970-01-01 00:00:02"])
frame_2s = DataFrame([5.0, 11.0], index=index_2s)
tm.assert_frame_equal(frame.resample("2s").mean(), frame_2s)
index_3s = DatetimeIndex(["1970-01-01 00:00:00"])
frame_3s = DataFrame([7.0], index=index_3s)
tm.assert_frame_equal(frame.resample("3s").mean(), frame_3s)
tm.assert_frame_equal(frame.resample("60s").mean(), frame_3s)
def test_resample_datetime_values():
# GH 13119
# check that datetime dtype is preserved when NaT values are
# introduced by the resampling
dates = [datetime(2016, 1, 15), datetime(2016, 1, 19)]
df = DataFrame({"timestamp": dates}, index=dates)
exp = Series(
[datetime(2016, 1, 15), pd.NaT, datetime(2016, 1, 19)],
index=date_range("2016-01-15", periods=3, freq="2D"),
name="timestamp",
)
res = df.resample("2D").first()["timestamp"]
tm.assert_series_equal(res, exp)
res = df["timestamp"].resample("2D").first()
tm.assert_series_equal(res, exp)
def test_resample_apply_with_additional_args(series):
# GH 14615
def f(data, add_arg):
return np.mean(data) * add_arg
multiplier = 10
result = series.resample("D").apply(f, multiplier)
expected = series.resample("D").mean().multiply(multiplier)
tm.assert_series_equal(result, expected)
# Testing as kwarg
result = series.resample("D").apply(f, add_arg=multiplier)
expected = series.resample("D").mean().multiply(multiplier)
tm.assert_series_equal(result, expected)
# Testing dataframe
df = DataFrame({"A": 1, "B": 2}, index=date_range("2017", periods=10))
result = df.groupby("A").resample("D").agg(f, multiplier).astype(float)
expected = df.groupby("A").resample("D").mean().multiply(multiplier)
# TODO: GH 41137
expected = expected.astype("float64")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("k", [1, 2, 3])
@pytest.mark.parametrize(
"n1, freq1, n2, freq2",
[
(30, "S", 0.5, "Min"),
(60, "S", 1, "Min"),
(3600, "S", 1, "H"),
(60, "Min", 1, "H"),
(21600, "S", 0.25, "D"),
(86400, "S", 1, "D"),
(43200, "S", 0.5, "D"),
(1440, "Min", 1, "D"),
(12, "H", 0.5, "D"),
(24, "H", 1, "D"),
],
)
def test_resample_equivalent_offsets(n1, freq1, n2, freq2, k):
# GH 24127
n1_ = n1 * k
n2_ = n2 * k
s = Series(0, index=date_range("19910905 13:00", "19911005 07:00", freq=freq1))
s = s + range(len(s))
result1 = s.resample(str(n1_) + freq1).mean()
result2 = s.resample(str(n2_) + freq2).mean()
tm.assert_series_equal(result1, result2)
@pytest.mark.parametrize(
"first,last,freq,exp_first,exp_last",
[
("19910905", "19920406", "D", "19910905", "19920407"),
("19910905 00:00", "19920406 06:00", "D", "19910905", "19920407"),
("19910905 06:00", "19920406 06:00", "H", "19910905 06:00", "19920406 07:00"),
("19910906", "19920406", "M", "19910831", "19920430"),
("19910831", "19920430", "M", "19910831", "19920531"),
("1991-08", "1992-04", "M", "19910831", "19920531"),
],
)
def test_get_timestamp_range_edges(first, last, freq, exp_first, exp_last):
first = Period(first)
first = first.to_timestamp(first.freq)
last = Period(last)
last = last.to_timestamp(last.freq)
exp_first = Timestamp(exp_first, freq=freq)
exp_last = Timestamp(exp_last, freq=freq)
freq = pd.tseries.frequencies.to_offset(freq)
result = _get_timestamp_range_edges(first, last, freq)
expected = (exp_first, exp_last)
assert result == expected
@pytest.mark.parametrize("duplicates", [True, False])
def test_resample_apply_product(duplicates):
# GH 5586
index = date_range(start="2012-01-31", freq="M", periods=12)
ts = Series(range(12), index=index)
df = DataFrame({"A": ts, "B": ts + 2})
if duplicates:
df.columns = ["A", "A"]
result = df.resample("Q").apply(np.product)
expected = DataFrame(
np.array([[0, 24], [60, 210], [336, 720], [990, 1716]], dtype=np.int64),
index=DatetimeIndex(
["2012-03-31", "2012-06-30", "2012-09-30", "2012-12-31"], freq="Q-DEC"
),
columns=df.columns,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"first,last,freq_in,freq_out,exp_last",
[
(
"2020-03-28",
"2020-03-31",
"D",
"24H",
"2020-03-30 01:00",
), # includes transition into DST
(
"2020-03-28",
"2020-10-27",
"D",
"24H",
"2020-10-27 00:00",
), # includes transition into and out of DST
(
"2020-10-25",
"2020-10-27",
"D",
"24H",
"2020-10-26 23:00",
), # includes transition out of DST
(
"2020-03-28",
"2020-03-31",
"24H",
"D",
"2020-03-30 00:00",
), # same as above, but from 24H to D
("2020-03-28", "2020-10-27", "24H", "D", "2020-10-27 00:00"),
("2020-10-25", "2020-10-27", "24H", "D", "2020-10-26 00:00"),
],
)
def test_resample_calendar_day_with_dst(
first: str, last: str, freq_in: str, freq_out: str, exp_last: str
):
# GH 35219
ts = Series(1.0, date_range(first, last, freq=freq_in, tz="Europe/Amsterdam"))
result = ts.resample(freq_out).pad()
expected = Series(
1.0, date_range(first, exp_last, freq=freq_out, tz="Europe/Amsterdam")
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max", "first", "last"])
def test_resample_aggregate_functions_min_count(func):
# GH#37768
index = date_range(start="2020", freq="M", periods=3)
ser = Series([1, np.nan, np.nan], index)
result = getattr(ser.resample("Q"), func)(min_count=2)
expected = Series(
[np.nan],
index=DatetimeIndex(["2020-03-31"], dtype="datetime64[ns]", freq="Q-DEC"),
)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
scienceopen/radioutils | RainAttenuation.py | 1 | 3601 | #!/usr/bin/env python
"""
Plots reference figures from
https://www.itu.int/dms_pubrec/itu-r/rec/p/R-REC-P.838-3-200503-I!!PDF-E.pdf
and normally used to give rain attenuation vs. frequency
NOTE: to make this useful over a satellite-ground path,
consider factors like rain vs. altitude!
Example plot: 1-1000 GHz, 40 degree elevation angle, 10 mm/hour, vertical polarization (90 deg):
./RainAttenuation.py 10 -1 90 40
"""
import numpy as np
from matplotlib.pyplot import figure, show
from argparse import ArgumentParser
from radioutils.impairments import _rain_coeff, rain_attenuation
def main():
p = ArgumentParser()
p.add_argument("rainrate", help="rain rate [mm/hour]", type=float)
p.add_argument(
"freqHz",
help="frequency in Hz. Specifying -1 gives full-range frequency sweep plot",
type=float,
)
p.add_argument(
"polarizationDegrees",
help="polarization angle 0==horiz, 90==vert, 45==circ [degrees]",
type=float,
)
p.add_argument("elevationDegrees", help="elevation angle above horizon [degrees]", type=float)
p.add_argument("-v", "--verbose", help="reproduce report plots", action="store_true")
P = p.parse_args()
if P.freqHz <= 0 or P.verbose:
f = np.logspace(9, 12, 200)
dBkm = get_rain_atten(f, P.rainrate, P.polarizationDegrees, P.elevationDegrees, P.verbose)
ax = figure().gca()
ax.loglog(f / 1e9, dBkm)
ax.set_title(
f"ITU-R P.838-3 Rain attenuation\n {P.rainrate} mm/hour, elevation {P.elevationDegrees} degrees"
)
ax.set_xlabel("frequency [GHz]")
ax.set_ylabel("rain attenuation [dB/km]")
ax.grid(True, which="both")
show()
else:
f = P.freqHz
dBkm = get_rain_atten(f, P.rainrate, P.polarizationDegrees, P.elevationDegrees)
print(f"{dBkm:0.2e} dB/km attenuation")
def get_rain_atten(f, rainrate, polarization, elevation, verbose=False):
""" replicate figures from ITU report """
rain_atten_dBkm = rain_attenuation(f, rainrate, polarization, elevation)
if verbose:
ah, kh = _rain_coeff(f, "h", 0.0)
# %% Figure 1
ax = figure(1).gca()
ax.loglog(f / 1e9, kh)
ax.grid(True, which="both")
ax.set_title(r"Figure 1, $k$ coefficient for horizontal polarization")
ax.set_xlabel("frequency [GHz]")
ax.set_ylabel("$k_h$")
ax.set_ylim(1e-5, 10)
ax.set_xlim(1, 1000)
# %% Figure 2
ax = figure(2).gca()
ax.semilogx(f / 1e9, ah)
ax.grid(True, which="both")
ax.set_title(r"Figure 2, $\alpha$ coefficient for horizontal polarization")
ax.set_xlabel("frequency [GHz]")
ax.set_ylabel(r"$\alpha_h$")
ax.set_ylim(0.4, 1.8)
ax.set_xlim(1, 1000)
# %%
av, kv = _rain_coeff(f, "v", 0.0)
# %%
ax = figure(3).gca()
ax.loglog(f / 1e9, kv)
ax.grid(True, which="both")
ax.set_title(r"Figure 3, $k$ coefficient for vertical polarization")
ax.set_xlabel("frequency [GHz]")
ax.set_ylabel("$k_v$")
ax.set_ylim(1e-5, 10)
ax.set_xlim(1, 1000)
# %%
ax = figure(4).gca()
ax.semilogx(f / 1e9, av)
ax.grid(True, which="both")
ax.set_title(r"Figure 4, $\alpha$ coefficient for vertical polarization")
ax.set_xlabel("frequency [GHz]")
ax.set_ylabel(r"$\alpha_v$")
ax.set_ylim(0.4, 1.8)
ax.set_xlim(1, 1000)
return rain_atten_dBkm
if __name__ == "__main__":
main()
| gpl-3.0 |
mcanthony/Cello | benchmarks/graphs.py | 4 | 2204 | import numpy as np
from matplotlib import pyplot as plt
languages = np.array([
'C', 'C++', 'Cello',
'Java', 'Javascript', 'Python',
'Ruby', 'Lua', 'Lua JIT'])
experiments = [
'Array', 'Map', 'N-Bodies',
'Dictionary', 'Sudoku', 'Matrix',
'Garbage Collection']
results_array = np.array([
0.02, 0.02, 0.10,
0.14, 0.07, 0.11,
0.07, 2.34, 0.24])
results_map = np.array([
0.24, 0.05, 0.54,
0.60, 1.92, 9.73,
1.24, 5.46, 2.01])
results_nbodies = np.array([
0.01, 0.01, 0.07,
0.07, 0.09, 1.52,
1.45, 0.66, 0.02])
results_dict = np.array([
0.09, 0.13, 0.25,
0.24, 0.46, 0.18,
0.44, 0.23, 0.18])
results_sudoku = np.array([
0.14, 0.14, 0.15,
0.29, 0.44, 5.30,
9.64, 6.34, 0.49])
results_matrix = np.array([
0.03, 0.01, 0.02,
0.11, 0.23, 2.33,
7.62, 0.94, 0.03])
results_gc = np.array([
0.01, 0.01, 0.26,
0.06, 0.25, 3.34,
5.37, 8.02, 0.31])
results = [
results_array, results_map, results_nbodies,
results_dict, results_sudoku, results_matrix,
results_gc]
#cols = [
# '#006666', '#FF6600', '#991F00',
# '#339933', '#009999', '#006666',
# '#FF6600', '#991F00', '#339933']
cols = [
'#006666', '#FF6600', '#991F00',
'#006666', '#FF6600', '#991F00',
'#006666', '#FF6600', '#991F00']
ylims = [
3.25, 13.5, 2.2,
0.65, 13.5, 11, 11
]
for exp, result, ylim in zip(experiments, results, ylims):
fig = plt.figure(figsize=(3, 2.5))
ax = fig.add_subplot(1, 1, 1, axisbg='#FCFCFC')
bars = ax.bar(np.arange(len(result)), result)
for bar, col in zip(bars, cols):
bar.set_color(col)
bar.set_edgecolor('#555555')
height = bar.get_height()
ax.text(
bar.get_x()+bar.get_width()/1.5,
height + (ylim/20), '%0.2f' % height,
ha='center', va='bottom', rotation='vertical',
color='#555555')
plt.xticks(np.arange(len(result)) + 0.75 / 2, languages, rotation='vertical')
plt.gca().xaxis.grid(False)
plt.gca().yaxis.grid(False)
plt.ylim((0, ylim))
plt.tight_layout()
plt.show()
| bsd-2-clause |
darcyabjones/bioplotlib | setup.py | 1 | 1165 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
config = {
'name': 'bioplotlib',
'version': '0.0.1',
'description': long_description,
'author': 'Darcy Jones',
'author_email': '[email protected]',
'url': 'https://github.com/darcyabjones/bioplotlib',
'download_url': 'https://github.com/darcyabjones/bioplotlib',
'packages': find_packages(),
'install_requires': ['numpy', 'matplotlib'],
'scripts': [],
'extras_require': {
'dev': ['check-manifest'],
'test': ['coverage', 'pytest', 'tox'],
},
'package_data': {
'bioplotlib': ['data/*.json', 'data/*.csv', 'data/*.pkl'],
},
'include_package_data': True,
}
setup(**config)
| bsd-3-clause |
pnedunuri/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
taynaud/sparkit-learn | splearn/feature_extraction/text.py | 2 | 25038 | # -*- coding: utf-8 -*-
import numbers
import operator
from itertools import chain
import numpy as np
import scipy.sparse as sp
import six
from pyspark import AccumulatorParam
from sklearn.feature_extraction.text import (CountVectorizer,
HashingVectorizer,
TfidfTransformer,
_document_frequency,
_make_int_array)
from sklearn.utils.fixes import frombuffer_empty
from sklearn.utils.validation import check_is_fitted
from ..base import SparkBroadcasterMixin
from ..rdd import DictRDD
from ..utils.validation import check_rdd
class SparkCountVectorizer(CountVectorizer, SparkBroadcasterMixin):
"""Distributed implementation of CountVectorizer.
Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `tokenize == 'word'`. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 1 by default
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because
they occurred in either too many
(`max_df`) or in too few (`min_df`) documents.
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
"""
__transient__ = ['vocabulary_']
def to_scikit(self):
obj = CountVectorizer([])
obj.__dict__ = self.__dict__
return obj
def _init_vocab(self, analyzed_docs):
"""Create vocabulary
"""
class SetAccum(AccumulatorParam):
def zero(self, initialValue):
return set(initialValue)
def addInPlace(self, v1, v2):
v1 |= v2
return v1
if not self.fixed_vocabulary_:
accum = analyzed_docs._rdd.context.accumulator(set(), SetAccum())
analyzed_docs.foreach(
lambda x: accum.add(set(chain.from_iterable(x))))
vocabulary = {t: i for i, t in enumerate(accum.value)}
else:
vocabulary = self.vocabulary_
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
return vocabulary
def _count_vocab(self, analyzed_docs):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
vocabulary = self.vocabulary_
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in analyzed_docs:
for feature in doc:
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
if self.binary:
X.data.fill(1)
return X
def _sort_features(self, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return map_index
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = X.map(_document_frequency).sum()
tfs = X.map(lambda x: np.asarray(x.sum(axis=0))).sum().ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return kept_indices, removed_terms
def fit(self, Z):
"""Learn a vocabulary dictionary of all tokens in the raw documents in
the DictRDD's 'X' column.
Parameters
----------
raw_documents : iterable or DictRDD with column 'X'
An iterable which yields either str, unicode or file objects; or a
DictRDD with column 'X' containing such iterables.
Returns
-------
self
"""
self.fit_transform(Z)
return self
def fit_transform(self, Z):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
Z : iterable or DictRDD with column 'X'
An iterable of raw_documents which yields either str, unicode or
file objects; or a DictRDD with column 'X' containing such
iterables.
Returns
-------
X : array, [n_samples, n_features] or DictRDD
Document-term matrix.
"""
self._validate_vocabulary()
# map analyzer and cache result
analyze = self.build_analyzer()
A = Z.transform(lambda X: list(map(analyze, X)), column='X').persist()
# create vocabulary
X = A[:, 'X'] if isinstance(A, DictRDD) else A
self.vocabulary_ = self._init_vocab(X)
# transform according to vocabulary
mapper = self.broadcast(self._count_vocab, A.context)
Z = A.transform(mapper, column='X', dtype=sp.spmatrix)
if not self.fixed_vocabulary_:
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
# limit features according to min_df, max_df parameters
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
kept_indices, self.stop_words_ = self._limit_features(
X, self.vocabulary_, max_doc_count, min_doc_count, max_features)
# sort features
map_index = self._sort_features(self.vocabulary_)
# combined mask
mask = kept_indices[map_index]
Z = Z.transform(lambda x: x[:, mask], column='X', dtype=sp.spmatrix)
A.unpersist()
return Z
def transform(self, Z):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
analyze = self.build_analyzer()
mapper = self.broadcast(self._count_vocab, Z.context)
Z = Z.transform(lambda X: list(map(analyze, X)), column='X') \
.transform(mapper, column='X', dtype=sp.spmatrix)
return Z
class SparkHashingVectorizer(HashingVectorizer):
"""Distributed implementation of Hashingvectorizer.
Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Parameters
----------
input: string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents: {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer: string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor: callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer: callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range: tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words: string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
lowercase: boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern: string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, optional, (2 ** 20) by default
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, optional
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def transform(self, Z):
"""Transform an ArrayRDD (or DictRDD with column 'X') containing
sequence of documents to a document-term matrix.
Parameters
----------
Z : ArrayRDD or DictRDD with raw text documents
Samples. Each sample must be a text document (either bytes or
unicode strings) which will be tokenized and hashed.
Returns
-------
Z : SparseRDD/DictRDD containg scipy.sparse matrix
Document-term matrix.
"""
mapper = super(SparkHashingVectorizer, self).transform
return Z.transform(mapper, column='X', dtype=sp.spmatrix)
fit_transform = transform
def to_scikit(self):
obj = HashingVectorizer()
obj.__dict__ = self.__dict__
return obj
class SparkTfidfTransformer(TfidfTransformer, SparkBroadcasterMixin):
"""Distributed implementation of TfidfTransformer.
Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
__transient__ = ['_idf_diag']
def fit(self, Z):
"""Learn the idf vector (global term weights)
Parameters
----------
Z : ArrayRDD or DictRDD containing (sparse matrices|ndarray)
a matrix of term/token counts
Returns
-------
self : TfidfVectorizer
"""
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
check_rdd(X, (sp.spmatrix, np.ndarray))
def mapper(X, use_idf=self.use_idf):
if not sp.issparse(X):
X = sp.csc_matrix(X)
if use_idf:
return _document_frequency(X)
if self.use_idf:
n_samples, n_features = X.shape
df = X.map(mapper).treeReduce(operator.add)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log1p instead of log makes sure terms with zero idf don't get
# suppressed entirely
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, Z):
"""Transform an ArrayRDD (or DictRDD's 'X' column) containing count
matrices to a tf or tf-idf representation
Parameters
----------
Z : ArrayRDD/DictRDD with sparse matrices
a matrix of term/token counts
Returns
-------
Z : SparseRDD/DictRDD containing sparse matrices
"""
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
check_rdd(X, (sp.spmatrix, np.ndarray))
mapper = super(SparkTfidfTransformer, self).transform
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
mapper = self.broadcast(mapper, Z.context)
return Z.transform(mapper, column='X', dtype=sp.spmatrix)
def to_scikit(self):
obj = TfidfTransformer()
obj.__dict__ = self.__dict__
return obj
| apache-2.0 |
shakamunyi/tensorflow | tensorflow/examples/skflow/digits.py | 9 | 2380 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, cross_validation, metrics
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn import monitors
# Load dataset
digits = datasets.load_digits()
X = digits.images
y = digits.target
# Split it into train / test subsets
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y,
test_size=0.2,
random_state=42)
# Split X_train again to create validation data
X_train, X_val, y_train, y_val = cross_validation.train_test_split(X_train,
y_train,
test_size=0.2,
random_state=42)
# TensorFlow model using Scikit Flow ops
def conv_model(X, y):
X = tf.expand_dims(X, 3)
features = tf.reduce_max(tf.contrib.layers.conv2d(X, 12, [3, 3]), [1, 2])
features = tf.reshape(features, [-1, 12])
return learn.models.logistic_regression(features, y)
val_monitor = monitors.ValidationMonitor(X_val, y_val, every_n_steps=50)
# Create a classifier, train and predict.
classifier = learn.TensorFlowEstimator(model_fn=conv_model, n_classes=10,
steps=1000, learning_rate=0.05,
batch_size=128)
classifier.fit(X_train, y_train, monitors=[val_monitor])
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Test Accuracy: {0:f}'.format(score))
| apache-2.0 |
Shekharrajak/pydy | examples/double_pendulum/scipy/double_pendulum_integration.py | 8 | 3476 | #!/usr/bin/env python
# This is an example of integrating the equations of motion for a double
# pendulum which were generated with sympy.physics.mechanics. We make use of
# SciPy/NumPy for the integration routines and Matplotlib for plotting.
#
# Steps taken:
# 1. Turned on mechanics_printing() in sympy.physics.mechanics for proper
# output for copying the equations to this file.
# 2. Import zeros, sin, cos, linspace from NumPy, odeint from SciPy and pyplot
# from Matplotlib.
# 3. Write a function definition that returns the right hand side of the
# first order form of the equations of motion. rhd(y, t, *parameters)
# 4. Called odeint with rhs, y0, t and the parameters.
# 5. Plotted the results.
from numpy import sin, cos, linspace, zeros
import matplotlib.pyplot as plt
from scipy.integrate import odeint
## Integration ##
def rhs(y, t, l, m, g):
"""Returns the derivatives of the states at the given time for the given
set of parameters.
Parameters
----------
y : array_like, shape(n,)
An array of the current states.
t : float
The current time.
l : float
Pendulum length.
m : float
Pendulum mass.
g : float
Acceleration due to gravity.
Returns
-------
dydt : array_like, shape(n,)
An array of the current derivatives of the states.
Notes
-----
The units and order of the states, time and parameters should be
consistent.
"""
# Unpack the states so you can use the variable names in the
# sympy.physics.mechanics equations
q1 = y[0]
q2 = y[1]
u1 = y[2]
u2 = y[3]
# or you can make use of python's tuple unpacking for a one liner
# q1, q2, u1, u2 = y
# Initialize a vector for the derivatives.
dydt = zeros((len(y)))
# Compute the derivatives, these are pasted in from the
# sympy.physics.mechanics results.
dydt[0] = u1
dydt[1] = u2
dydt[2] = (-g*sin(q1)*sin(q2)**2 + 2*g*sin(q1) -
g*sin(q2)*cos(q1)*cos(q2) + 2*l*u1**2*sin(q1)*cos(q1)*cos(q2)**2 -
l*u1**2*sin(q1)*cos(q1) - 2*l*u1**2*sin(q2)*cos(q1)**2*cos(q2) +
l*u1**2*sin(q2)*cos(q2) + l*u2**2*sin(q1)*cos(q2) -
l*u2**2*sin(q2)*cos(q1))/(l*(sin(q1)**2*sin(q2)**2 +
2*sin(q1)*sin(q2)*cos(q1)*cos(q2) + cos(q1)**2*cos(q2)**2 - 2))
dydt[3] = (-sin(q1)*sin(q2)/2 - cos(q1)*cos(q2)/2)*(2*g*l*m*sin(q1) -
l**2*m*(-sin(q1)*cos(q2) +
sin(q2)*cos(q1))*u2**2)/(l**2*m*(sin(q1)*sin(q2)/2 +
cos(q1)*cos(q2)/2)*(sin(q1)*sin(q2) + cos(q1)*cos(q2)) -
l**2*m) + (g*l*m*sin(q2) - l**2*m*(sin(q1)*cos(q2) -
sin(q2)*cos(q1))*u1**2)/(l**2*m*(sin(q1)*sin(q2)/2 +
cos(q1)*cos(q2)/2)*(sin(q1)*sin(q2) + cos(q1)*cos(q2))
- l**2*m)
# Return the derivatives.
return dydt
# Specify the length, mass and acceleration due to gravity.
parameters = (1, 1, 9.8)
# Specify initial conditions for the states.
y0 = [.1, .2, 0, 0]
# Create a time vector.
t = linspace(0, 5)
# Integrate the equations of motion.
y = odeint(rhs, y0, t, parameters)
## Plotting ##
# Create an empty figure.
fig = plt.figure()
# Add a single axes to the figure.
ax = fig.add_subplot(1, 1, 1)
# Plot the states versus time.
ax.plot(t, y)
# Add a title, axes labels and a legend.
ax.set_title('Double Pendulum Example')
ax.set_xlabel('Time (s)')
ax.set_ylabel('Angle, Angluar rate (rad, rad/s)')
ax.legend(['q1', 'q2', 'u1', 'u2'])
# Display the figure.
plt.show()
| bsd-3-clause |
CoderHam/Machine_Learning_Projects | final_project/tester.py | 2 | 4224 | #!/usr/bin/pickle
""" a basic script for importing student's POI identifier,
and checking the results that they get from it
requires that the algorithm, dataset, and features list
be written to my_classifier.pkl, my_dataset.pkl, and
my_feature_list.pkl, respectively
that process should happen at the end of poi_id.py
"""
import pickle
import sys
from sklearn.cross_validation import StratifiedShuffleSplit
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
PERF_FORMAT_STRING = "\
\tAccuracy: {:>0.{display_precision}f}\tPrecision: {:>0.{display_precision}f}\t\
Recall: {:>0.{display_precision}f}\tF1: {:>0.{display_precision}f}\tF2: {:>0.{display_precision}f}"
RESULTS_FORMAT_STRING = "\tTotal predictions: {:4d}\tTrue positives: {:4d}\tFalse positives: {:4d}\tFalse negatives: {:4d}\tTrue negatives: {:4d}"
def test_classifier(clf, dataset, feature_list, folds = 1000):
data = featureFormat(dataset, feature_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
cv = StratifiedShuffleSplit(labels, folds, random_state = 42)
true_negatives = 0
false_negatives = 0
true_positives = 0
false_positives = 0
for train_idx, test_idx in cv:
features_train = []
features_test = []
labels_train = []
labels_test = []
for ii in train_idx:
features_train.append( features[ii] )
labels_train.append( labels[ii] )
for jj in test_idx:
features_test.append( features[jj] )
labels_test.append( labels[jj] )
### fit the classifier using training set, and test on test set
clf.fit(features_train, labels_train)
predictions = clf.predict(features_test)
for prediction, truth in zip(predictions, labels_test):
if prediction == 0 and truth == 0:
true_negatives += 1
elif prediction == 0 and truth == 1:
false_negatives += 1
elif prediction == 1 and truth == 0:
false_positives += 1
elif prediction == 1 and truth == 1:
true_positives += 1
else:
print "Warning: Found a predicted label not == 0 or 1."
print "All predictions should take value 0 or 1."
print "Evaluating performance for processed predictions:"
break
try:
total_predictions = true_negatives + false_negatives + false_positives + true_positives
accuracy = 1.0*(true_positives + true_negatives)/total_predictions
precision = 1.0*true_positives/(true_positives+false_positives)
recall = 1.0*true_positives/(true_positives+false_negatives)
f1 = 2.0 * true_positives/(2*true_positives + false_positives+false_negatives)
f2 = (1+2.0*2.0) * precision*recall/(4*precision + recall)
print clf
print PERF_FORMAT_STRING.format(accuracy, precision, recall, f1, f2, display_precision = 5)
print RESULTS_FORMAT_STRING.format(total_predictions, true_positives, false_positives, false_negatives, true_negatives)
print ""
except:
print "Got a divide by zero when trying out:", clf
print "Precision or recall may be undefined due to a lack of true positive predicitons."
CLF_PICKLE_FILENAME = "my_classifier.pkl"
DATASET_PICKLE_FILENAME = "my_dataset.pkl"
FEATURE_LIST_FILENAME = "my_feature_list.pkl"
def dump_classifier_and_data(clf, dataset, feature_list):
pickle.dump(clf, open(CLF_PICKLE_FILENAME, "w") )
pickle.dump(dataset, open(DATASET_PICKLE_FILENAME, "w") )
pickle.dump(feature_list, open(FEATURE_LIST_FILENAME, "w") )
def load_classifier_and_data():
clf = pickle.load(open(CLF_PICKLE_FILENAME, "r") )
dataset = pickle.load(open(DATASET_PICKLE_FILENAME, "r") )
feature_list = pickle.load(open(FEATURE_LIST_FILENAME, "r"))
return clf, dataset, feature_list
def main():
### load up student's classifier, dataset, and feature_list
clf, dataset, feature_list = load_classifier_and_data()
### Run testing script
test_classifier(clf, dataset, feature_list)
if __name__ == '__main__':
main()
| gpl-2.0 |
vandenheuvel/tribler | TriblerGUI/widgets/trustpageplot.py | 1 | 4966 | import matplotlib
from TriblerGUI.defs import PAGE_MARKET
matplotlib.use('Qt5Agg')
import datetime
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtWidgets import QWidget
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.dates import DateFormatter
from matplotlib.figure import Figure
from TriblerGUI.tribler_request_manager import TriblerRequestManager
class MplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget."""
def __init__(self, parent=None, width=5, height=5, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
fig.set_facecolor("#282828")
fig.set_tight_layout({"pad": 1})
self.axes = fig.add_subplot(111)
self.plot_data = None
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def compute_initial_figure(self):
pass
class TrustPlotMplCanvas(MplCanvas):
def compute_initial_figure(self):
self.axes.cla()
self.axes.set_title("MBytes given/taken over time", color="#e0e0e0")
self.axes.set_xlabel("Date")
self.axes.set_ylabel("Given/taken data (MBytes)")
self.axes.xaxis.set_major_formatter(DateFormatter('%d-%m-%y'))
self.axes.plot(self.plot_data[1], self.plot_data[0][0], label="MBytes given", marker='o')
self.axes.plot(self.plot_data[1], self.plot_data[0][1], label="MBytes taken", marker='o')
self.axes.grid(True)
for line in self.axes.get_xgridlines() + self.axes.get_ygridlines():
line.set_linestyle('--')
# Color the axes
if hasattr(self.axes, 'set_facecolor'): # Not available on Linux
self.axes.set_facecolor('#464646')
self.axes.xaxis.label.set_color('#e0e0e0')
self.axes.yaxis.label.set_color('#e0e0e0')
self.axes.tick_params(axis='x', colors='#e0e0e0')
self.axes.tick_params(axis='y', colors='#e0e0e0')
# Create the legend
handles, labels = self.axes.get_legend_handles_labels()
self.axes.legend(handles, labels)
if len(self.plot_data[0][0]) == 1: # If we only have one data point, don't show negative axis
self.axes.set_ylim(-0.3, 10)
self.axes.set_xlim(datetime.datetime.now() - datetime.timedelta(hours=1),
datetime.datetime.now() + datetime.timedelta(days=4))
self.draw()
class TrustPagePlot(QWidget):
"""
This page shows various trust statistics.
"""
def __init__(self):
QWidget.__init__(self)
self.trust_plot = None
self.public_key = None
self.request_mgr = None
self.statistics = None
self.blocks = None
self.byte_scale = 1024 * 1024
def initialize_trust_page(self):
vlayout = self.window().plot_widget.layout()
self.trust_plot = TrustPlotMplCanvas(self.window().plot_widget, dpi=100)
vlayout.addWidget(self.trust_plot)
def load_trust_statistics(self):
self.request_mgr = TriblerRequestManager()
self.request_mgr.perform_request("trustchain/statistics", self.received_trustchain_statistics)
def received_trustchain_statistics(self, statistics):
statistics = statistics["statistics"]
self.window().trust_contribution_amount_label.setText("%s MBytes" % (statistics["total_up"] / self.byte_scale))
self.window().trust_consumption_amount_label.setText("%s MBytes" % (statistics["total_down"] / self.byte_scale))
self.window().trust_people_helped_label.setText("%d" % statistics["peers_that_pk_helped"])
self.window().trust_people_helped_you_label.setText("%d" % statistics["peers_that_helped_pk"])
# Fetch the latest blocks of this user
self.public_key = statistics["id"]
self.request_mgr = TriblerRequestManager()
self.request_mgr.perform_request("trustchain/blocks/%s" % self.public_key, self.received_trustchain_blocks)
def received_trustchain_blocks(self, blocks):
self.blocks = blocks["blocks"]
self.plot_absolute_values()
def plot_absolute_values(self):
"""
Plot two lines of the absolute amounts of contributed and consumed bytes.
"""
plot_data = [[[], []], []]
# Convert all dates to a datetime object
for block in self.blocks:
plot_data[1].append(datetime.datetime.strptime(block["insert_time"], "%Y-%m-%d %H:%M:%S"))
plot_data[0][0].append(block["total_up"] / self.byte_scale)
plot_data[0][1].append(block["total_down"] / self.byte_scale)
if len(self.blocks) == 0:
# Create on single data point with 0mb up and 0mb down
plot_data = [[[0], [0]], [datetime.datetime.now()]]
self.trust_plot.plot_data = plot_data
self.trust_plot.compute_initial_figure()
| lgpl-3.0 |
murali-munna/scikit-learn | sklearn/neighbors/classification.py | 106 | 13987 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array, shape = (n_samples, n_features)
A 2-D array representing the test points.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
jakobworldpeace/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
rishikksh20/scikit-learn | sklearn/feature_selection/tests/test_mutual_info.py | 30 | 6881 | from __future__ import division
import numpy as np
from numpy.testing import run_module_suite
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_array_equal, assert_almost_equal,
assert_false, assert_raises, assert_equal,
assert_allclose, assert_greater)
from sklearn.feature_selection.mutual_info_ import (
mutual_info_regression, mutual_info_classif, _compute_mi)
def test_compute_mi_dd():
# In discrete case computations are straightforward and can be done
# by hand on given vectors.
x = np.array([0, 1, 1, 0, 0])
y = np.array([1, 0, 0, 0, 1])
H_x = H_y = -(3/5) * np.log(3/5) - (2/5) * np.log(2/5)
H_xy = -1/5 * np.log(1/5) - 2/5 * np.log(2/5) - 2/5 * np.log(2/5)
I_xy = H_x + H_y - H_xy
assert_almost_equal(_compute_mi(x, y, True, True), I_xy)
def test_compute_mi_cc():
# For two continuous variables a good approach is to test on bivariate
# normal distribution, where mutual information is known.
# Mean of the distribution, irrelevant for mutual information.
mean = np.zeros(2)
# Setup covariance matrix with correlation coeff. equal 0.5.
sigma_1 = 1
sigma_2 = 10
corr = 0.5
cov = np.array([
[sigma_1**2, corr * sigma_1 * sigma_2],
[corr * sigma_1 * sigma_2, sigma_2**2]
])
# True theoretical mutual information.
I_theory = (np.log(sigma_1) + np.log(sigma_2) -
0.5 * np.log(np.linalg.det(cov)))
np.random.seed(0)
Z = np.random.multivariate_normal(mean, cov, size=1000)
x, y = Z[:, 0], Z[:, 1]
# Theory and computed values won't be very close, assert that the
# first figures after decimal point match.
for n_neighbors in [3, 5, 7]:
I_computed = _compute_mi(x, y, False, False, n_neighbors)
assert_almost_equal(I_computed, I_theory, 1)
def test_compute_mi_cd():
# To test define a joint distribution as follows:
# p(x, y) = p(x) p(y | x)
# X ~ Bernoulli(p)
# (Y | x = 0) ~ Uniform(-1, 1)
# (Y | x = 1) ~ Uniform(0, 2)
# Use the following formula for mutual information:
# I(X; Y) = H(Y) - H(Y | X)
# Two entropies can be computed by hand:
# H(Y) = -(1-p)/2 * ln((1-p)/2) - p/2*log(p/2) - 1/2*log(1/2)
# H(Y | X) = ln(2)
# Now we need to implement sampling from out distribution, which is
# done easily using conditional distribution logic.
n_samples = 1000
np.random.seed(0)
for p in [0.3, 0.5, 0.7]:
x = np.random.uniform(size=n_samples) > p
y = np.empty(n_samples)
mask = x == 0
y[mask] = np.random.uniform(-1, 1, size=np.sum(mask))
y[~mask] = np.random.uniform(0, 2, size=np.sum(~mask))
I_theory = -0.5 * ((1 - p) * np.log(0.5 * (1 - p)) +
p * np.log(0.5 * p) + np.log(0.5)) - np.log(2)
# Assert the same tolerance.
for n_neighbors in [3, 5, 7]:
I_computed = _compute_mi(x, y, True, False, n_neighbors)
assert_almost_equal(I_computed, I_theory, 1)
def test_compute_mi_cd_unique_label():
# Test that adding unique label doesn't change MI.
n_samples = 100
x = np.random.uniform(size=n_samples) > 0.5
y = np.empty(n_samples)
mask = x == 0
y[mask] = np.random.uniform(-1, 1, size=np.sum(mask))
y[~mask] = np.random.uniform(0, 2, size=np.sum(~mask))
mi_1 = _compute_mi(x, y, True, False)
x = np.hstack((x, 2))
y = np.hstack((y, 10))
mi_2 = _compute_mi(x, y, True, False)
assert_equal(mi_1, mi_2)
# We are going test that feature ordering by MI matches our expectations.
def test_mutual_info_classif_discrete():
X = np.array([[0, 0, 0],
[1, 1, 0],
[2, 0, 1],
[2, 0, 1],
[2, 0, 1]])
y = np.array([0, 1, 2, 2, 1])
# Here X[:, 0] is the most informative feature, and X[:, 1] is weakly
# informative.
mi = mutual_info_classif(X, y, discrete_features=True)
assert_array_equal(np.argsort(-mi), np.array([0, 2, 1]))
def test_mutual_info_regression():
# We generate sample from multivariate normal distribution, using
# transformation from initially uncorrelated variables. The zero
# variables after transformation is selected as the target vector,
# it has the strongest correlation with the variable 2, and
# the weakest correlation with the variable 1.
T = np.array([
[1, 0.5, 2, 1],
[0, 1, 0.1, 0.0],
[0, 0.1, 1, 0.1],
[0, 0.1, 0.1, 1]
])
cov = T.dot(T.T)
mean = np.zeros(4)
np.random.seed(0)
Z = np.random.multivariate_normal(mean, cov, size=1000)
X = Z[:, 1:]
y = Z[:, 0]
mi = mutual_info_regression(X, y, random_state=0)
assert_array_equal(np.argsort(-mi), np.array([1, 2, 0]))
def test_mutual_info_classif_mixed():
# Here the target is discrete and there are two continuous and one
# discrete feature. The idea of this test is clear from the code.
np.random.seed(0)
X = np.random.rand(1000, 3)
X[:, 1] += X[:, 0]
y = ((0.5 * X[:, 0] + X[:, 2]) > 0.5).astype(int)
X[:, 2] = X[:, 2] > 0.5
mi = mutual_info_classif(X, y, discrete_features=[2], n_neighbors=3,
random_state=0)
assert_array_equal(np.argsort(-mi), [2, 0, 1])
for n_neighbors in [5, 7, 9]:
mi_nn = mutual_info_classif(X, y, discrete_features=[2],
n_neighbors=n_neighbors, random_state=0)
# Check that the continuous values have an higher MI with greater
# n_neighbors
assert_greater(mi_nn[0], mi[0])
assert_greater(mi_nn[1], mi[1])
# The n_neighbors should not have any effect on the discrete value
# The MI should be the same
assert_equal(mi_nn[2], mi[2])
def test_mutual_info_options():
X = np.array([[0, 0, 0],
[1, 1, 0],
[2, 0, 1],
[2, 0, 1],
[2, 0, 1]], dtype=float)
y = np.array([0, 1, 2, 2, 1], dtype=float)
X_csr = csr_matrix(X)
for mutual_info in (mutual_info_regression, mutual_info_classif):
assert_raises(ValueError, mutual_info_regression, X_csr, y,
discrete_features=False)
mi_1 = mutual_info(X, y, discrete_features='auto', random_state=0)
mi_2 = mutual_info(X, y, discrete_features=False, random_state=0)
mi_3 = mutual_info(X_csr, y, discrete_features='auto',
random_state=0)
mi_4 = mutual_info(X_csr, y, discrete_features=True,
random_state=0)
assert_array_equal(mi_1, mi_2)
assert_array_equal(mi_3, mi_4)
assert_false(np.allclose(mi_1, mi_3))
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
Bulochkin/tensorflow_pack | tensorflow/contrib/learn/python/learn/dataframe/dataframe.py | 27 | 4836 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataFrame is a container for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from .series import Series
from .transform import Transform
from tensorflow.python.util.deprecation import deprecated
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
@deprecated("2017-06-15", "contrib/learn/dataframe/** is deprecated.")
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if v is None:
del self._columns[k]
elif isinstance(v, Series):
self._columns[k] = v
elif isinstance(v, Transform) and v.input_valency() == 0:
self._columns[k] = v()
else:
raise TypeError(
"Column in assignment must be an inflow.Series, inflow.Transform,"
" or None; got type '%s'." % type(v).__name__)
def select_columns(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def exclude_columns(self, exclude_keys):
"""Returns a new DataFrame with all columns not excluded via exclude_keys.
Args:
exclude_keys: A list of strings. Each should be the name of a column in
the DataFrame. These columns will be excluded from the result.
Returns:
A new DataFrame containing all columns except those specified.
"""
result = type(self)()
for key, value in self._columns.items():
if key not in exclude_keys:
result[key] = value
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Series):
value = [value]
self.assign(**dict(zip(key, value)))
def __delitem__(self, key):
if isinstance(key, str):
key = [key]
value = [None for _ in key]
self.assign(**dict(zip(key, value)))
def build(self, **kwargs):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache, **kwargs)
for name, c in self._columns.items()}
return tensors
| apache-2.0 |
ntnu-tdat2004/machine-learning | 2_linear_regression_3d_visualization.py | 1 | 3709 | import numpy as np
import matplotlib
matplotlib.use('TkAgg')
from mpl_toolkits.mplot3d import axes3d, art3d
import matplotlib.pyplot as plt
from matplotlib import cm
matplotlib.rcParams.update({'font.size': 11})
# regarding the notations, see http://stats.stackexchange.com/questions/193908/in-machine-learning-why-are-superscripts-used-instead-of-subscripts
W_init=np.mat([[-0.2], [0.53]])
b_init=np.mat([[3.1]])
class LinearRegressionModel:
def __init__(self, W=W_init.copy(), b=b_init.copy()):
self.W = W
self.b = b
# predictor
def f(self, x): return x * self.W + self.b
# uses Mean Squared Error. The error function is also sometimes called cost or loss function
def error(self, x, y): return np.sum(np.power(self.f(x) - y, 2))
model = LinearRegressionModel()
# observed/training input and output
x_train = np.mat([[1, 4.5], [1.5, 2], [2, 1], [3, 3.5], [4, 3], [5, 1], [6, 2]])
y_train = np.mat([[5], [3.5], [3], [4], [3], [1.5], [2]])
fig = plt.figure("Linear regression: 3D")
plot1 = fig.add_subplot(111, projection='3d')
plot1.plot(x_train[:,0].A.squeeze(), x_train[:,1].A.squeeze(), y_train[:,0].A.squeeze(), 'o', label="$(\\hat x_1^{(i)}, \\hat x_2^{(i)},\\hat y^{(i)})$", color="blue")
plot1_f = plot1.plot_wireframe(np.array([[]]), np.array([[]]), np.array([[]]), color="green", label="$y = f(x) = xW+b$")
plot1_info = fig.text(0.01, 0.02, "")
plot1_error=[]
for i in range(0, x_train.shape[0]):
line, = plot1.plot([0, 0], [0, 0], [0, 0], color="red")
plot1_error.append(line)
if i == 0:
line.set_label("$|f(\\hat x^{(i)})-\\hat y^{(i)}|$")
plot1.set_xlabel("$x_1$")
plot1.set_ylabel("$x_2$")
plot1.set_zlabel("$y$")
plot1.legend(loc="upper left")
plot1.set_xticks([])
plot1.set_yticks([])
plot1.set_zticks([])
plot1.w_xaxis.line.set_lw(0)
plot1.w_yaxis.line.set_lw(0)
plot1.w_zaxis.line.set_lw(0)
plot1.quiver([0], [0], [0], [np.max(x_train[:,0]+1)], [0], [0], arrow_length_ratio=0.05, color="black")
plot1.quiver([0], [0], [0], [0], [np.max(x_train[:,1]+1)], [0], arrow_length_ratio=0.05, color="black")
plot1.quiver([0], [0], [0], [0], [0], [np.max(y_train[:,0]+1)], arrow_length_ratio=0.05, color="black")
def update_figure(event=None):
if(event is not None):
if event.key == "W":
model.W[0,0]+=0.01
elif event.key == "w":
model.W[0,0]-=0.01
elif event.key == "E":
model.W[1,0]+=0.01
elif event.key == "e":
model.W[1,0]-=0.01
elif event.key == "B":
model.b[0,0]+=0.05
elif event.key == "b":
model.b[0,0]-=0.05
elif event.key == "c":
model.W=W_init.copy()
model.b=b_init.copy()
global plot1_f
plot1_f.remove()
x1_grid, x2_grid = np.meshgrid(np.linspace(1, 6, 10), np.linspace(1, 4.5, 10))
y_grid=np.empty([10, 10])
for i in range(0, x1_grid.shape[0]):
for j in range(0, x1_grid.shape[1]):
y_grid[i,j]=model.f([[x1_grid[i,j], x2_grid[i,j]]])
plot1_f = plot1.plot_wireframe(x1_grid, x2_grid, y_grid, color='green')
for i in range(0, x_train.shape[0]):
plot1_error[i].set_data([x_train[i,0], x_train[i,0]], [x_train[i,1], x_train[i,1]])
plot1_error[i].set_3d_properties([y_train[i,0], model.f(x_train[i,:])])
plot1_info.set_text("$W=\\left[\\stackrel{%.2f}{%.2f}\\right]$\n$b=[%.2f]$\n$error = \\sum_i(f(\\hat x^{(i)}) - \\hat y^{(i)})^2 = %.2f$" % (model.W[0,0], model.W[1,0], model.b[0,0], model.error(x_train, y_train)))
fig.canvas.draw()
update_figure()
fig.canvas.mpl_connect('key_press_event', update_figure)
plt.show()
| mit |
matthewfranglen/spark | python/pyspark/sql/dataframe.py | 4 | 91760 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
if sys.version >= '3':
basestring = unicode = str
long = int
from functools import reduce
from html import escape as html_escape
else:
from itertools import imap as map
from cgi import escape as html_escape
import warnings
from pyspark import copy_func, since, _NoValue
from pyspark.rdd import RDD, _load_from_socket, _local_iterator_from_socket, \
ignore_unicode_prefix
from pyspark.serializers import BatchedSerializer, PickleSerializer, \
UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import *
from pyspark.sql.pandas.conversion import PandasConversionMixin
from pyspark.sql.pandas.map_ops import PandasMapOpsMixin
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SparkSession`::
people = spark.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the :class:`DataFrame`, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SparkSession
people = spark.read.parquet("...")
department = spark.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
# Check whether _repr_html is supported or not, we use it to avoid calling _jdf twice
# by __repr__ and _repr_html_ while eager evaluation opened.
self._support_repr_html = False
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
@ignore_unicode_prefix
@since(1.3)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
>>> df.toJSON().first()
u'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
@since(1.3)
def registerTempTable(self, name):
"""Registers this DataFrame as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
.. note:: Deprecated in 2.0, use createOrReplaceTempView instead.
"""
warnings.warn(
"Deprecated in 2.0, use createOrReplaceTempView instead.", DeprecationWarning)
self._jdf.createOrReplaceTempView(name)
@since(2.0)
def createTempView(self, name):
"""Creates a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
@since(2.0)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
@since(2.1)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this :class:`DataFrame`.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
@since(2.2)
def createOrReplaceGlobalTempView(self, name):
"""Creates or replaces a global temporary view using the given name.
The lifetime of this temporary view is tied to this Spark application.
>>> df.createOrReplaceGlobalTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceGlobalTempView("people")
>>> df3 = spark.sql("select * from global_temp.people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createOrReplaceGlobalTempView(name)
@property
@since(1.4)
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
:return: :class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
@since(2.0)
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. note:: Evolving.
:return: :class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
@since(1.3)
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except AttributeError as e:
raise Exception(
"Unable to parse datatype from schema. %s" % e)
return self._schema
@since(1.3)
def printSchema(self):
"""Prints out the schema in the tree format.
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
@since(1.3)
def explain(self, extended=None, mode=None):
"""Prints the (logical and physical) plans to the console for debugging purpose.
:param extended: boolean, default ``False``. If ``False``, prints only the physical plan.
:param mode: specifies the expected output format of plans.
* ``simple``: Print only a physical plan.
* ``extended``: Print both logical and physical plans.
* ``codegen``: Print a physical plan and generated codes if they are available.
* ``cost``: Print a logical plan and statistics if they are available.
* ``formatted``: Split explain output into two sections: a physical plan outline \
and node details.
>>> df.explain()
== Physical Plan ==
*(1) Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> df.explain(mode="formatted")
== Physical Plan ==
* Scan ExistingRDD (1)
(1) Scan ExistingRDD [codegen id : 1]
Output [2]: [age#0, name#1]
...
.. versionchanged:: 3.0.0
Added optional argument `mode` to specify the expected output format of plans.
"""
if extended is not None and mode is not None:
raise Exception("extended and mode can not be specified simultaneously")
# For the no argument case: df.explain()
is_no_argument = extended is None and mode is None
# For the cases below:
# explain(True)
# explain(extended=False)
is_extended_case = extended is not None and isinstance(extended, bool)
# For the mode specified: df.explain(mode="formatted")
is_mode_case = mode is not None and isinstance(mode, basestring)
if not is_no_argument and not (is_extended_case or is_mode_case):
if extended is not None:
err_msg = "extended (optional) should be provided as bool" \
", got {0}".format(type(extended))
else: # For mode case
err_msg = "mode (optional) should be provided as str, got {0}".format(type(mode))
raise TypeError(err_msg)
# Sets an explain mode depending on a given argument
if is_no_argument:
explain_mode = "simple"
elif is_extended_case:
explain_mode = "extended" if extended else "simple"
elif is_mode_case:
explain_mode = mode
print(self._sc._jvm.PythonSQLUtils.explainString(self._jdf.queryExecution(), explain_mode))
@since(2.4)
def exceptAll(self, other):
"""Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but
not in another :class:`DataFrame` while preserving duplicates.
This is equivalent to `EXCEPT ALL` in SQL.
>>> df1 = spark.createDataFrame(
... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.exceptAll(df2).show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| a| 2|
| c| 4|
+---+---+
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx)
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
@since(2.0)
def isStreaming(self):
"""Returns ``True`` if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. note:: Evolving
"""
return self._jdf.isStreaming()
@since(1.3)
def show(self, n=20, truncate=True, vertical=False):
"""Prints the first ``n`` rows to the console.
:param n: Number of rows to show.
:param truncate: If set to ``True``, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
:param vertical: If set to ``True``, print output rows vertically (one line
per column value).
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
>>> df.show(vertical=True)
-RECORD 0-----
age | 2
name | Alice
-RECORD 1-----
age | 5
name | Bob
"""
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20, vertical))
else:
print(self._jdf.showString(n, int(truncate), vertical))
def __repr__(self):
if not self._support_repr_html and self.sql_ctx._conf.isReplEagerEvalEnabled():
vertical = False
return self._jdf.showString(
self.sql_ctx._conf.replEagerEvalMaxNumRows(),
self.sql_ctx._conf.replEagerEvalTruncate(), vertical)
else:
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
def _repr_html_(self):
"""Returns a :class:`DataFrame` with html code when you enabled eager evaluation
by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are
using support eager evaluation with HTML.
"""
if not self._support_repr_html:
self._support_repr_html = True
if self.sql_ctx._conf.isReplEagerEvalEnabled():
max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0)
sock_info = self._jdf.getRowsToPython(
max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate())
rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
head = rows[0]
row_data = rows[1:]
has_more_data = len(row_data) > max_num_rows
row_data = row_data[:max_num_rows]
html = "<table border='1'>\n"
# generate table head
html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: html_escape(x), head))
# generate table rows
for row in row_data:
html += "<tr><td>%s</td></tr>\n" % "</td><td>".join(
map(lambda x: html_escape(x), row))
html += "</table>\n"
if has_more_data:
html += "only showing top %d %s\n" % (
max_num_rows, "row" if max_num_rows == 1 else "rows")
return html
else:
return None
@since(2.1)
def checkpoint(self, eager=True):
"""Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
logical plan of this :class:`DataFrame`, which is especially useful in iterative algorithms
where the plan may grow exponentially. It will be saved to files inside the checkpoint
directory set with :meth:`SparkContext.setCheckpointDir`.
:param eager: Whether to checkpoint this :class:`DataFrame` immediately
.. note:: Experimental
"""
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx)
@since(2.3)
def localCheckpoint(self, eager=True):
"""Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this :class:`DataFrame`, which is especially useful in
iterative algorithms where the plan may grow exponentially. Local checkpoints are
stored in the executors using the caching subsystem and therefore they are not reliable.
:param eager: Whether to checkpoint this :class:`DataFrame` immediately
.. note:: Experimental
"""
jdf = self._jdf.localCheckpoint(eager)
return DataFrame(jdf, self.sql_ctx)
@since(2.1)
def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
:param eventTime: the name of the column that contains the event time of the row.
:param delayThreshold: the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
.. note:: Evolving
>>> sdf.select('name', sdf.time.cast('timestamp')).withWatermark('time', '10 minutes')
DataFrame[name: string, time: timestamp]
"""
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise TypeError("delayThreshold should be provided as a string interval")
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sql_ctx)
@since(2.2)
def hint(self, name, *parameters):
"""Specifies some hint on the current :class:`DataFrame`.
:param name: A name of the hint.
:param parameters: Optional parameters.
:return: :class:`DataFrame`
>>> df.join(df2.hint("broadcast"), "name").show()
+----+---+------+
|name|age|height|
+----+---+------+
| Bob| 5| 85|
+----+---+------+
"""
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0]
if not isinstance(name, str):
raise TypeError("name should be provided as str, got {0}".format(type(name)))
allowed_types = (basestring, list, float, int)
for p in parameters:
if not isinstance(p, allowed_types):
raise TypeError(
"all parameters should be in {0}, got {1} of type {2}".format(
allowed_types, p, type(p)))
jdf = self._jdf.hint(name, self._jseq(parameters))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
>>> df.count()
2
"""
return int(self._jdf.count())
@ignore_unicode_prefix
@since(1.3)
def collect(self):
"""Returns all the records as a list of :class:`Row`.
>>> df.collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectToPython()
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
@ignore_unicode_prefix
@since(2.0)
def toLocalIterator(self, prefetchPartitions=False):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this
:class:`DataFrame`. With prefetch it may consume up to the memory of the 2 largest
partitions.
:param prefetchPartitions: If Spark should pre-fetch the next partition
before it is needed.
>>> list(df.toLocalIterator())
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.toPythonIterator(prefetchPartitions)
return _local_iterator_from_socket(sock_info, BatchedSerializer(PickleSerializer()))
@ignore_unicode_prefix
@since(1.3)
def limit(self, num):
"""Limits the result count to the number specified.
>>> df.limit(1).collect()
[Row(age=2, name=u'Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
>>> df.take(2)
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
return self.limit(num).collect()
@ignore_unicode_prefix
@since(3.0)
def tail(self, num):
"""
Returns the last ``num`` rows as a :class:`list` of :class:`Row`.
Running tail requires moving data into the application's driver process, and doing so with
a very large ``num`` can crash the driver process with OutOfMemoryError.
>>> df.tail(1)
[Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc):
sock_info = self._jdf.tailToPython(num)
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
@since(1.3)
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
@since(1.3)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
@since(1.3)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (`MEMORY_AND_DISK`).
.. note:: The default storage level has changed to `MEMORY_AND_DISK` to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
@since(1.3)
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_AND_DISK`).
.. note:: The default storage level has changed to `MEMORY_AND_DISK` to match Scala in 2.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
@since(2.1)
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
@since(1.3)
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. note:: `blocking` default has changed to ``False`` to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
@since(1.4)
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
:param numPartitions: int, to specify the target number of partitions
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions. If a larger number of partitions is requested,
it will stay at the current number of partitions.
However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
this may result in your computation taking place on fewer nodes than
you like (e.g. one node in the case of numPartitions = 1). To avoid this,
you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever
the current partitioning is).
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
@since(1.3)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is hash partitioned.
:param numPartitions:
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (basestring, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
@since("2.4.0")
def repartitionByRange(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is range partitioned.
:param numPartitions:
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
At least one partition-by expression must be specified.
When no explicit sort order is specified, "ascending nulls first" is assumed.
Note that due to performance reasons this method uses sampling to estimate the ranges.
Hence, the output may not be consistent, since sampling can return different values.
The sample size can be controlled by the config
`spark.sql.execution.rangeExchange.sampleSizePerPartition`.
>>> df.repartitionByRange(2, "age").rdd.getNumPartitions()
2
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.repartitionByRange(1, "age").rdd.getNumPartitions()
1
>>> data = df.repartitionByRange("age")
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return ValueError("At least one partition-by expression must be specified.")
else:
return DataFrame(
self._jdf.repartitionByRange(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (basestring, Column)):
cols = (numPartitions,) + cols
return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int, string or Column")
@since(1.3)
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
@since(1.3)
def sample(self, withReplacement=None, fraction=None, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
:param withReplacement: Sample with replacement or not (default ``False``).
:param fraction: Fraction of rows to generate, range [0.0, 1.0].
:param seed: Seed for sampling (default a random seed).
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
.. note:: `fraction` is required and, `withReplacement` and `seed` are optional.
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
7
>>> df.sample(fraction=0.5, seed=3).count()
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = \
withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args):
argtypes = [
str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]
raise TypeError(
"withReplacement (optional), fraction (required) and seed (optional)"
" should be a bool, float and number; however, "
"got [%s]." % ", ".join(argtypes))
if is_withReplacement_omitted_args:
if fraction is not None:
seed = fraction
fraction = withReplacement
withReplacement = None
seed = long(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sql_ctx)
@since(1.5)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
:param col: column that defines strata
:param fractions:
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
:param seed: random seed
:return: a new :class:`DataFrame` that represents the stratified sample
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 3|
| 1| 6|
+---+-----+
>>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count()
33
.. versionchanged:: 3.0
Added sampling by a column of :class:`Column`
"""
if isinstance(col, basestring):
col = Column(col)
elif not isinstance(col, Column):
raise ValueError("col must be a string or a column, but got %r" % type(col))
if not isinstance(fractions, dict):
raise ValueError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, long, basestring)):
raise ValueError("key must be float, int, long, or string, but got %r" % type(k))
fractions[k] = float(v)
col = col._jc
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
@since(1.4)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
:param weights: list of doubles as weights with which to split the :class:`DataFrame`.
Weights will be normalized if they don't sum up to 1.0.
:param seed: The seed for sampling.
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
2
>>> splits[1].count()
2
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), long(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
@since(1.3)
def dtypes(self):
"""Returns all column names and their data types as a list.
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
@since(1.3)
def columns(self):
"""Returns all column names as a list.
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
@since(2.3)
def colRegex(self, colName):
"""
Selects column based on the column name specified as a regex and returns it
as :class:`Column`.
:param colName: string, column name specified as a regex.
>>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"])
>>> df.select(df.colRegex("`(Col1)?+.+`")).show()
+----+
|Col2|
+----+
| 1|
| 2|
| 3|
+----+
"""
if not isinstance(colName, basestring):
raise ValueError("colName should be provided as string")
jc = self._jdf.colRegex(colName)
return Column(jc)
@ignore_unicode_prefix
@since(1.3)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
:param alias: string, an alias name to be set for the :class:`DataFrame`.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age") \
.sort(desc("df_as1.name")).collect()
[Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]
"""
assert isinstance(alias, basestring), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
@ignore_unicode_prefix
@since(2.1)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
:param other: Right side of the cartesian product.
>>> df.select("age", "name").collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df2.select("name", "height").collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85),
Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
:param other: Right side of the join
:param on: a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
:param how: str, default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``fullouter``, ``full_outer``, ``left``, ``leftouter``, ``left_outer``,
``right``, ``rightouter``, ``right_outer``, ``semi``, ``leftsemi``, ``left_semi``,
``anti``, ``leftanti`` and ``left_anti``.
The following performs a full outer join between ``df1`` and ``df2``.
>>> from pyspark.sql.functions import desc
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height) \
.sort(desc("name")).collect()
[Row(name=u'Bob', height=85), Row(name=u'Alice', height=None), Row(name=None, height=80)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').sort(desc("name")).collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name=u'Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name=u'Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], basestring):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
if on is None:
on = self._jseq([])
assert isinstance(how, basestring), "how should be basestring"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
@since(1.6)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
@since("1.3.1")
def describe(self, *cols):
"""Computes basic statistics for numeric and string columns.
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
Use summary for expanded statistics and control over which statistics to compute.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since("2.3.0")
def summary(self, *statistics):
"""Computes specified statistics for numeric and string columns. Available statistics are:
- count
- mean
- stddev
- min
- max
- arbitrary approximate percentiles specified as a percentage (eg, 75%)
If no statistics are given, this function computes count, mean, stddev, min,
approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
>>> df.summary().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| 25%| 2| null|
| 50%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+------------------+-----+
>>> df.summary("count", "min", "25%", "75%", "max").show()
+-------+---+-----+
|summary|age| name|
+-------+---+-----+
| count| 2| 2|
| min| 2|Alice|
| 25%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+---+-----+
To do a summary for specific columns first select them:
>>> df.select("age", "name").summary("count").show()
+-------+---+----+
|summary|age|name|
+-------+---+----+
| count| 2| 2|
+-------+---+----+
See also describe for basic statistics.
"""
if len(statistics) == 1 and isinstance(statistics[0], list):
statistics = statistics[0]
jdf = self._jdf.summary(self._jseq(statistics))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def head(self, n=None):
"""Returns the first ``n`` rows.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
:param n: int, default 1. Number of rows to return.
:return: If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
>>> df.head()
Row(age=2, name=u'Alice')
>>> df.head(1)
[Row(age=2, name=u'Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
@ignore_unicode_prefix
@since(1.3)
def first(self):
"""Returns the first row as a :class:`Row`.
>>> df.first()
Row(age=2, name=u'Alice')
"""
return self.head()
@ignore_unicode_prefix
@since(1.3)
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name=u'Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name=u'Bob')]
"""
if isinstance(item, basestring):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
@since(1.3)
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
@ignore_unicode_prefix
@since(1.3)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
:param cols: list of column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current :class:`DataFrame`.
>>> df.select('*').collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.select('name', 'age').collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
:param condition: a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
>>> df.filter(df.age > 3).collect()
[Row(age=5, name=u'Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name=u'Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name=u'Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name=u'Alice')]
"""
if isinstance(condition, basestring):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
:param cols: list of columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.4)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.4)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregations on them.
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.3)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy.agg()``).
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return self.union(other)
@since(2.3)
def unionByName(self, other):
""" Returns a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`distinct`.
The difference between this function and :func:`union` is that this function
resolves columns by name (not by position):
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+
"""
return DataFrame(self._jdf.unionByName(other._jdf), self.sql_ctx)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this :class:`DataFrame` and another :class:`DataFrame`.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
@since(2.4)
def intersectAll(self, other):
""" Return a new :class:`DataFrame` containing rows in both this :class:`DataFrame`
and another :class:`DataFrame` while preserving duplicates.
This is equivalent to `INTERSECT ALL` in SQL.
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersectAll(df2).sort("C1", "C2").show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| b| 3|
+---+---+
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this :class:`DataFrame`
but not in another :class:`DataFrame`.
This is equivalent to `EXCEPT DISTINCT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
@since(1.4)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and system will accordingly limit the state. In addition, too late data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
| 10| 80|Alice|
+---+------+-----+
>>> df.dropDuplicates(['name', 'height']).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
+---+------+-----+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
@since("1.3.1")
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
:param how: 'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
:param thresh: int, default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
:param subset: optional list of column names to consider.
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
@since("1.3.1")
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
:param value: int, long, float, string, bool or dict.
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, long, float, boolean, or string.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df5.na.fill(False).show()
+----+-------+-----+
| age| name| spy|
+----+-------+-----+
| 10| Alice|false|
| 5| Bob|false|
|null|Mallory| true|
+----+-------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, long, basestring, bool, dict)):
raise ValueError("value should be a float, int, long, string, bool or dict")
# Note that bool validates isinstance(int), but we don't want to
# convert bools to floats
if not isinstance(value, bool) and isinstance(value, (int, long)):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
@since(1.4)
def replace(self, to_replace, value=_NoValue, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
:param to_replace: bool, int, long, float, string, list or dict.
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
:param value: bool, int, long, float, string, list or None.
The replacement value must be a bool, int, long, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace({'Alice': None}).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if value is _NoValue:
if isinstance(to_replace, dict):
value = None
else:
raise TypeError("value argument is required when to_replace is not a dictionary.")
# Helper functions
def all_of(types):
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(basestring)(["a", 1])
False
"""
def all_of_(xs):
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(basestring)
all_of_numeric = all_of((float, int, long))
# Validate input types
valid_types = (bool, float, int, long, basestring, list, tuple)
if not isinstance(to_replace, valid_types + (dict, )):
raise ValueError(
"to_replace should be a bool, float, int, long, string, list, tuple, or dict. "
"Got {0}".format(type(to_replace)))
if not isinstance(value, valid_types) and value is not None \
and not isinstance(to_replace, dict):
raise ValueError("If to_replace is not a dict, value should be "
"a bool, float, int, long, string, list, tuple or None. "
"Got {0}".format(type(value)))
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length. "
"Got {0} and {1}".format(len(to_replace), len(value)))
if not (subset is None or isinstance(subset, (list, tuple, basestring))):
raise ValueError("subset should be a list or tuple of column names, "
"column name or None. Got {0}".format(type(subset)))
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, long, basestring)):
to_replace = [to_replace]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
if isinstance(value, (float, int, long, basestring)) or value is None:
value = [value for _ in range(len(to_replace))]
rep_dict = dict(zip(to_replace, value))
if isinstance(subset, basestring):
subset = [subset]
# Verify we were not passed in mixed type generics.
if not any(all_of_type(rep_dict.keys())
and all_of_type(x for x in rep_dict.values() if x is not None)
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):
raise ValueError("Mixed type replacements are not supported")
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
@since(2.0)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a
:class:`DataFrame`.
The result of this algorithm has the following deterministic bound:
If the :class:`DataFrame` has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the :class:`DataFrame` so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[https://doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation.
For columns only containing null values, an empty list is returned.
:param col: str, list.
Can be a single column name, or a list of names for multiple columns.
:param probabilities: a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
:param relativeError: The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
:return: the approximate quantiles at the given probabilities. If
the input `col` is a string, the output is a list of floats. If the
input `col` is a list or tuple of strings, the output is also a
list, but each element in it is a list of floats, i.e., the output
is a list of list of floats.
.. versionchanged:: 2.2
Added support for multiple columns.
"""
if not isinstance(col, (basestring, list, tuple)):
raise ValueError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, basestring)
if isinstance(col, tuple):
col = list(col)
elif isStr:
col = [col]
for c in col:
if not isinstance(c, basestring):
raise ValueError("columns should be strings, but got %r" % type(c))
col = _to_list(self._sc, col)
if not isinstance(probabilities, (list, tuple)):
raise ValueError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int, long)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int, long) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int, long)) or relativeError < 0:
raise ValueError("relativeError should be numerical (float, int, long) >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
jaq_list = [list(j) for j in jaq]
return jaq_list[0] if isStr else jaq_list
@since(1.4)
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a :class:`DataFrame` as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
:param col1: The name of the first column
:param col2: The name of the second column
:param method: The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
@since(1.4)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
:param col1: The name of the first column
:param col2: The name of the second column
"""
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
@since(1.4)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
:param col1: The name of the first column. Distinct items will make the first item of
each row.
:param col2: The name of the second column. Distinct items will make the column names
of the :class:`DataFrame`.
"""
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
@since(1.4)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
:param cols: Names of the columns to calculate frequent items for as a list or tuple of
strings.
:param support: The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
The column expression must be an expression over this :class:`DataFrame`; attempting to add
a column from some other :class:`DataFrame` will raise an error.
:param colName: string, name of the new column.
:param col: a :class:`Column` expression for the new column.
.. note:: This method introduces a projection internally. Therefore, calling it multiple
times, for instance, via loops in order to add multiple columns can generate big
plans which can cause performance issues and even `StackOverflowException`.
To avoid this, use :func:`select` with the multiple columns at once.
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)]
"""
assert isinstance(col, Column), "col should be Column"
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
:param existing: string, name of the existing column to rename.
:param new: string, new name of the column.
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
@since(1.4)
@ignore_unicode_prefix
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
:param cols: a string name of the column to drop, or a
:class:`Column` to drop, or a list of string name of the columns to drop.
>>> df.drop('age').collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.drop(df.age).collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name=u'Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name=u'Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, basestring):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, basestring):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
def toDF(self, *cols):
"""Returns a new class:`DataFrame` that with new specified column names
:param cols: list of new column names (string)
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2=u'Alice'), Row(f1=5, f2=u'Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since(3.0)
def transform(self, func):
"""Returns a new class:`DataFrame`. Concise syntax for chaining custom transformations.
:param func: a function that takes and returns a class:`DataFrame`.
>>> from pyspark.sql.functions import col
>>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", "float"])
>>> def cast_all_to_int(input_df):
... return input_df.select([col(col_name).cast("int") for col_name in input_df.columns])
>>> def sort_columns_asc(input_df):
... return input_df.select(*sorted(input_df.columns))
>>> df.transform(cast_all_to_int).transform(sort_columns_asc).show()
+-----+---+
|float|int|
+-----+---+
| 1| 1|
| 2| 2|
+-----+---+
"""
result = func(self)
assert isinstance(result, DataFrame), "Func returned an instance of type [%s], " \
"should have been DataFrame." % type(result)
return result
@since(3.1)
def sameSemantics(self, other):
"""
Returns `True` when the logical query plans inside both :class:`DataFrame`\\s are equal and
therefore return same results.
.. note:: The equality comparison here is simplified by tolerating the cosmetic differences
such as attribute names.
.. note:: This API can compare both :class:`DataFrame`\\s very fast but can still return
`False` on the :class:`DataFrame` that return the same results, for instance, from
different plans. Such false negative semantic can be useful when caching as an example.
.. note:: DeveloperApi
>>> df1 = spark.range(10)
>>> df2 = spark.range(10)
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id * 2))
True
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id + 2))
False
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col0", df2.id * 2))
True
"""
if not isinstance(other, DataFrame):
raise ValueError("other parameter should be of DataFrame; however, got %s"
% type(other))
return self._jdf.sameSemantics(other._jdf)
@since(3.1)
def semanticHash(self):
"""
Returns a hash code of the logical query plan against this :class:`DataFrame`.
.. note:: Unlike the standard hash code, the hash is calculated against the query plan
simplified by tolerating the cosmetic differences such as attribute names.
.. note:: DeveloperApi
>>> spark.range(10).selectExpr("id as col0").semanticHash() # doctest: +SKIP
1855039936
>>> spark.range(10).selectExpr("id as col1").semanticHash() # doctest: +SKIP
1855039936
"""
return self._jdf.semanticHash()
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
# Two aliases below were added for pandas compatibility many years ago.
# There are too many differences compared to pandas and we cannot just
# make it "compatible" by adding aliases. Therefore, we stop adding such
# aliases as of Spark 3.0. Two methods below remain just
# for legacy users currently.
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value=_NoValue, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
from pyspark.sql.functions import from_unixtime
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF()
globs['df3'] = sc.parallelize([Row(name='Alice', age=2),
Row(name='Bob', age=5)]).toDF()
globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80),
Row(name='Bob', age=5, height=None),
Row(name='Tom', age=None, height=None),
Row(name=None, age=None, height=None)]).toDF()
globs['df5'] = sc.parallelize([Row(name='Alice', spy=False, age=10),
Row(name='Bob', spy=None, age=5),
Row(name='Mallory', spy=True, age=None)]).toDF()
globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
Row(name='Bob', time=1479442946)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| mit |
paninski-lab/yass | examples/stability.py | 1 | 1887 | """
Stability metric example
"""
import os.path as path
import numpy as np
import matplotlib.pyplot as plt
from stability_evaluation import (RecordingBatchIterator, MeanWaveCalculator,
RecordingAugmentation,
SpikeSortingEvaluation)
ROOT = path.join(path.expanduser('~'), 'data/yass')
path_to_spike_train = path.join(ROOT, 'ej49_spikeTrain1_1.csv')
path_to_data = path.join(ROOT, 'ej49_data1_set1.bin')
path_to_geom = path.join(ROOT, 'ej49_geometry1.txt')
path_to_augmented = path.join(ROOT, 'augmented.bin')
spike_train = np.loadtxt(path_to_spike_train, dtype='int32', delimiter=',')
spike_train
br = RecordingBatchIterator(path_to_data, path_to_geom, sample_rate=30000,
batch_time_samples=1000000, n_batches=5,
n_chan=200, radius=100, whiten=False)
mwc = MeanWaveCalculator(br, spike_train)
# plot some of the recovered templates
for i in range(2):
plt.plot(mwc.templates[:, :, i])
plt.show()
# here we indicate what is the length of the augmented data in terms of
# batches (with respect to the batch iterator object.)
stab = RecordingAugmentation(mwc, augment_rate=0.25, move_rate=0.2)
# New ground truth spike train
new_gt_spt, status = stab.save_augment_recording(path_to_augmented, 5)
# Creating evaluation object for matching, TP, and FP
spt_ = spike_train[spike_train[:, 0] < 1e6, :]
tmp_ = mwc.templates[:, :, np.unique(spt_[:, 1])]
# Let's create a fake new spike train with only 100
# first units of the ground truth as clusters
spt_2 = spt_[spt_[:, 1] < 100, :]
tmp_2 = tmp_[:, :, :100]
# Here we just demonstrate with the sampe spike train
# The second argument should be a different spike train
ev = SpikeSortingEvaluation(spt_, spt_2, tmp_, tmp_2)
print(ev.true_positive)
print(ev.false_positive)
print(ev.unit_cluster_map)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.