repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
vortex-exoplanet/VIP
|
vip_hci/preproc/recentering.py
|
2
|
65675
|
#! /usr/bin/env python
"""
Module containing functions for cubes frame registration.
"""
__author__ = 'Carlos Alberto Gomez Gonzalez, V. Christiaens, G. Ruane'
__all__ = ['frame_shift',
'cube_shift',
'frame_center_radon',
'frame_center_satspots',
'cube_recenter_satspots',
'cube_recenter_radon',
'cube_recenter_dft_upsampling',
'cube_recenter_2dfit',
'cube_recenter_via_speckles']
import numpy as np
import warnings
try:
import cv2
no_opencv = False
except ImportError:
msg = "Opencv python bindings are missing."
warnings.warn(msg, ImportWarning)
no_opencv = True
from hciplot import plot_frames
from scipy.ndimage import fourier_shift
from scipy.ndimage import shift
from skimage.transform import radon
from skimage.feature import register_translation
from multiprocessing import cpu_count
from matplotlib import pyplot as plt
from . import frame_crop
from ..conf import time_ini, timing, Progressbar
from ..conf.utils_conf import vip_figsize, check_array
from ..conf.utils_conf import pool_map, iterable
from ..stats import frame_basic_stats
from ..var import (get_square, frame_center, get_annulus_segments,
fit_2dmoffat, fit_2dgaussian, fit_2dairydisk,
fit_2d2gaussian, cube_filter_lowpass, cube_filter_highpass)
from ..preproc import cube_crop_frames
def frame_shift(array, shift_y, shift_x, imlib='opencv',
interpolation='lanczos4', border_mode='reflect'):
""" Shifts a 2D array by shift_y, shift_x. Boundaries are filled with zeros.
Parameters
----------
array : numpy ndarray
Input 2d array.
shift_y, shift_x: float
Shifts in y and x directions.
imlib : {'opencv', 'ndimage-fourier', 'ndimage-interp'}, string optional
Library or method used for performing the image shift.
'ndimage-fourier', does a fourier shift operation and preserves better
the pixel values (therefore the flux and photometry). Interpolation
based shift ('opencv' and 'ndimage-interp') is faster than the fourier
shift. 'opencv' is recommended when speed is critical.
interpolation : str, optional
Only used in case of imlib is set to 'opencv' or 'ndimage-interp'
(Scipy.ndimage), where the images are shifted via interpolation.
For Scipy.ndimage the options are: 'nearneig', bilinear', 'bicuadratic',
'bicubic', 'biquartic' or 'biquintic'. The 'nearneig' interpolation is
the fastest and the 'biquintic' the slowest. The 'nearneig' is the
poorer option for interpolation of noisy astronomical images.
For Opencv the options are: 'nearneig', 'bilinear', 'bicubic' or
'lanczos4'. The 'nearneig' interpolation is the fastest and the
'lanczos4' the slowest and accurate. 'lanczos4' is the default for
Opencv and 'biquartic' for Scipy.ndimage.
border_mode : {'reflect', 'nearest', 'constant', 'mirror', 'wrap'}
Points outside the boundaries of the input are filled accordingly.
With 'reflect', the input is extended by reflecting about the edge of
the last pixel. With 'nearest', the input is extended by replicating the
last pixel. With 'constant', the input is extended by filling all values
beyond the edge with zeros. With 'mirror', the input is extended by
reflecting about the center of the last pixel. With 'wrap', the input is
extended by wrapping around to the opposite edge. Default is 'reflect'.
Returns
-------
array_shifted : numpy ndarray
Shifted 2d array.
"""
check_array(array, dim=2)
image = array.copy()
if imlib == 'ndimage-fourier':
shift_val = (shift_y, shift_x)
array_shifted = fourier_shift(np.fft.fftn(image), shift_val)
array_shifted = np.fft.ifftn(array_shifted)
array_shifted = array_shifted.real
elif imlib == 'ndimage-interp':
if interpolation == 'nearneig':
order = 0
elif interpolation == 'bilinear':
order = 1
elif interpolation == 'bicuadratic':
order = 2
elif interpolation == 'bicubic':
order = 3
elif interpolation == 'biquartic' or interpolation == 'lanczos4':
order = 4
elif interpolation == 'biquintic':
order = 5
else:
raise ValueError('Scipy.ndimage interpolation method not '
'recognized')
if border_mode not in ['reflect', 'nearest', 'constant', 'mirror',
'wrap']:
raise ValueError('`border_mode` not recognized')
array_shifted = shift(image, (shift_y, shift_x), order=order,
mode=border_mode)
elif imlib == 'opencv':
if no_opencv:
msg = 'Opencv python bindings cannot be imported. Install opencv or'
msg += ' set imlib to ndimage-fourier or ndimage-interp'
raise RuntimeError(msg)
if interpolation == 'bilinear':
intp = cv2.INTER_LINEAR
elif interpolation == 'bicubic':
intp = cv2.INTER_CUBIC
elif interpolation == 'nearneig':
intp = cv2.INTER_NEAREST
elif interpolation == 'lanczos4':
intp = cv2.INTER_LANCZOS4
else:
raise ValueError('Opencv interpolation method not recognized')
if border_mode == 'mirror':
bormo = cv2.BORDER_REFLECT_101 # gfedcb|abcdefgh|gfedcba
elif border_mode == 'reflect':
bormo = cv2.BORDER_REFLECT # fedcba|abcdefgh|hgfedcb
elif border_mode == 'wrap':
bormo = cv2.BORDER_WRAP # cdefgh|abcdefgh|abcdefg
elif border_mode == 'constant':
bormo = cv2.BORDER_CONSTANT # iiiiii|abcdefgh|iiiiiii
elif border_mode == 'nearest':
bormo = cv2.BORDER_REPLICATE # aaaaaa|abcdefgh|hhhhhhh
else:
raise ValueError('`border_mode` not recognized')
image = np.float32(image)
y, x = image.shape
M = np.float32([[1, 0, shift_x], [0, 1, shift_y]])
array_shifted = cv2.warpAffine(image, M, (x, y), flags=intp,
borderMode=bormo)
else:
raise ValueError('Image transformation library not recognized')
return array_shifted
def cube_shift(cube, shift_y, shift_x, imlib='opencv',
interpolation='lanczos4'):
""" Shifts the X-Y coordinates of a cube or 3D array by x and y values.
Parameters
----------
cube : numpy ndarray, 3d
Input cube.
shift_y, shift_x: float, list of floats or np.ndarray of floats
Shifts in y and x directions for each frame. If the a single value is
given then all the frames will be shifted by the same amount.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
Returns
-------
cube_out : numpy ndarray, 3d
Cube with shifted frames.
"""
check_array(cube, dim=3)
nfr = cube.shape[0]
cube_out = np.zeros_like(cube)
if isinstance(shift_x, (int, float)):
shift_x = np.ones((nfr)) * shift_x
if isinstance(shift_y, (int, float)):
shift_y = np.ones((nfr)) * shift_y
for i in range(cube.shape[0]):
cube_out[i] = frame_shift(cube[i], shift_y[i], shift_x[i], imlib,
interpolation)
return cube_out
def frame_center_satspots(array, xy, subi_size=19, sigfactor=6, shift=False,
imlib='opencv', interpolation='lanczos4',
fit_type='moff', debug=False, verbose=True):
""" Finds the center of a frame with waffle/satellite spots (e.g. for
VLT/SPHERE). The method used to determine the center is by centroiding the
4 spots via a 2d Gaussian fit and finding the intersection of the
lines they create (see Notes). This method is very sensitive to the SNR of
the satellite spots, therefore thresholding of the background pixels is
performed. If the results are too extreme, the debug parameter will allow to
see in depth what is going on with the fit (maybe you'll need to adjust the
sigfactor for the background pixels thresholding).
Parameters
----------
array : numpy ndarray, 2d
Image or frame.
xy : tuple of 4 tuples
Tuple with coordinates X,Y of the 4 satellite spots. When the spots are
in an X configuration, the order is the following: top-left, top-right,
bottom-left and bottom-right. When the spots are in an + (cross-like)
configuration, the order is the following: top, right, left, bottom.
subi_size : int, optional
Size of subimage where the fitting is done.
sigfactor : int, optional
The background pixels will be thresholded before fitting a 2d Gaussian
to the data using sigma clipped statistics. All values smaller than
(MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
noise.
shift : bool, optional
If True the image is shifted.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
fit_type: str, optional {'gaus','moff'}
Type of 2d fit to infer the centroid of the satellite spots.
debug : bool, optional
If True debug information is printed and plotted.
verbose : bool, optional
If True the intersection and shifts information is printed out.
Returns
-------
array_rec
Shifted images. *Only returned if ``shift=True``.*
shifty, shiftx
Shift Y,X to get to the true center.
Notes
-----
linear system:
.. code-block: none
A1 * x + B1 * y = C1
A2 * x + B2 * y = C2
Cramer's rule - solution can be found in determinants:
.. code-block: none
x = Dx/D
y = Dy/D
where D is main determinant of the system:
.. code-block: none
A1 B1
A2 B2
and Dx and Dy can be found from matrices:
.. code-block: none
C1 B1
C2 B2
and
.. code-block: none
A1 C1
A2 C2
C column consequently substitutes the coef. columns of x and y
L stores our coefs A, B, C of the line equations.
.. code-block: none
For D: L1[0] L1[1] for Dx: L1[2] L1[1] for Dy: L1[0] L1[2]
L2[0] L2[1] L2[2] L2[1] L2[0] L2[2]
"""
def line(p1, p2):
""" produces coefs A, B, C of line equation by 2 points
"""
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0] * p2[1] - p2[0] * p1[1])
return A, B, -C
def intersection(L1, L2):
""" finds intersection point (if any) of 2 lines provided by coefs
"""
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x, y
else:
return None
# --------------------------------------------------------------------------
check_array(array, dim=2)
if fit_type not in ['gaus','moff']:
raise TypeError('fit_type is not recognized')
if not isinstance(xy, (tuple, list)) or len(xy) != 4:
raise TypeError('Input waffle spot coordinates in wrong format (must '
'be a tuple of 4 tuples')
cy, cx = frame_center(array)
centx = []
centy = []
subims = []
for i in range(len(xy)):
sim, y, x = get_square(array, subi_size, xy[i][1], xy[i][0],
position=True, verbose=False)
if fit_type=='gaus':
cent2dgy, cent2dgx = fit_2dgaussian(sim, crop=False, threshold=True,
sigfactor=sigfactor, debug=debug,
full_output=False)
else:
cent2dgy, cent2dgx = fit_2dmoffat(sim, crop=False, threshold=True,
sigfactor=sigfactor, debug=debug,
full_output=False)
centx.append(cent2dgx + x)
centy.append(cent2dgy + y)
subims.append(sim)
cent2dgx_1, cent2dgx_2, cent2dgx_3, cent2dgx_4 = centx
cent2dgy_1, cent2dgy_2, cent2dgy_3, cent2dgy_4 = centy
si1, si2, si3, si4 = subims
if debug:
plot_frames((si1, si2, si3, si4), colorbar=True)
print('Centroids X,Y:')
print(cent2dgx_1, cent2dgy_1)
print(cent2dgx_2, cent2dgy_2)
print(cent2dgx_3, cent2dgy_3)
print(cent2dgx_4, cent2dgy_4)
L1 = line([cent2dgx_1, cent2dgy_1], [cent2dgx_4, cent2dgy_4])
L2 = line([cent2dgx_2, cent2dgy_2], [cent2dgx_3, cent2dgy_3])
R = intersection(L1, L2)
msgerr = "Check that the order of the tuples in `xy` is correct and"
msgerr += " the satellite spots have good S/N"
if R is not None:
shiftx = cx - R[0]
shifty = cy - R[1]
if np.abs(shiftx) < cx * 2 and np.abs(shifty) < cy * 2:
if debug or verbose:
print('Intersection coordinates (X,Y):', R[0], R[1], '\n')
print('Shifts (X,Y): {:.3f}, {:.3f}'.format(shiftx, shifty))
if shift:
array_rec = frame_shift(array, shifty, shiftx, imlib=imlib,
interpolation=interpolation)
return array_rec, shifty, shiftx, centy, centx
else:
return shifty, shiftx
else:
raise RuntimeError("Too large shifts. " + msgerr)
else:
raise RuntimeError("Something went wrong, no intersection found. " +
msgerr)
def cube_recenter_satspots(array, xy, subi_size=19, sigfactor=6, plot=True,
fit_type='moff', lbda=None, debug=False, verbose=True,
full_output=False):
""" Function analog to frame_center_satspots but for image sequences. It
actually will call frame_center_satspots for each image in the cube. The
function also returns the shifted images (not recommended to use when the
shifts are of a few percents of a pixel) and plots the histogram of the
shifts and calculate its statistics. This is important to assess the
dispersion of the star center by using artificial waffle/satellite spots
(like those in VLT/SPHERE images) and evaluate the uncertainty of the
position of the center. The use of the shifted images is not recommended.
Parameters
----------
array : numpy ndarray, 3d
Input cube.
xy : tuple
Tuple with coordinates X,Y of the 4 satellite spots. When the spots are
in an X configuration, the order is the following: top-left, top-right,
bottom-left and bottom-right. When the spots are in an + (cross-like)
configuration, the order is the following: top, right, left, bottom.
If wavelength vector is not provided, assumes all sat spots of the cube
are at a similar location. If wavelength is provided, only coordinates
of the sat spots in the first channel should be provided. The boxes
location in other channels will be scaled accordingly.
subi_size : int, optional
Size of subimage where the fitting is done.
sigfactor : int, optional
The background pixels will be thresholded before fitting a 2d Gaussian
to the data using sigma clipped statistics. All values smaller than
(MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
noise.
plot : bool, optional
Whether to plot the shifts.
fit_type: str, optional {'gaus','moff'}
Type of 2d fit to infer the centroid of the satellite spots.
lbda: 1d array or list, opt
Wavelength vector. If provided, the subimages will be scaled accordingly
to follow the motion of the satellite spots.
debug : bool, optional
If True debug information is printed and plotted (fit and residuals,
intersections and shifts). This has to be used carefully as it can
produce too much output and plots.
verbose : bool, optional
Whether to print to stdout the timing and additional info.
full_output : bool, optional
Whether to return 2 1d arrays of shifts along with the recentered cube
or not.
Returns
-------
array_rec
The shifted cube.
shift_y, shift_x
[full_output==True] Shifts Y,X to get to the true center for each image.
sat_y, sat_x
[full_output==True] Y,X positions of the satellite spots in each image.
Order: top-left, top-right, bottom-left and bottom-right.
"""
check_array(array, dim=3)
if verbose:
start_time = time_ini()
n_frames = array.shape[0]
shift_x = np.zeros((n_frames))
shift_y = np.zeros((n_frames))
sat_y = np.zeros([n_frames,4])
sat_x = np.zeros([n_frames,4])
array_rec = []
if lbda is not None:
cy, cx = frame_center(array[0])
final_xy = []
rescal = lbda/lbda[0]
for i in range(n_frames):
xy_new = []
for s in range(4):
xy_new.append((cx+rescal[i]*(xy[s][0]-cx),cy+rescal[i]*(xy[s][1]-cy)))
xy_new = tuple(xy_new)
final_xy.append(xy_new)
else:
final_xy = [xy for i in range(n_frames)]
if verbose:
print("Final xy positions for sat spots:", final_xy)
print('Looping through the frames, fitting the intersections:')
for i in Progressbar(range(n_frames), verbose=verbose):
res = frame_center_satspots(array[i], final_xy[i], debug=debug, shift=True,
subi_size=subi_size, sigfactor=sigfactor,
fit_type=fit_type, verbose=False)
array_rec.append(res[0])
shift_y[i] = res[1]
shift_x[i] = res[2]
sat_y[i] = res[3]
sat_x[i] = res[4]
if verbose:
timing(start_time)
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(shift_x, 'o-', label='Shifts in x', alpha=0.5)
plt.plot(shift_y, 'o-', label='Shifts in y', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n_frames))
la = 'Histogram'
_ = plt.hist(shift_x, bins=b, alpha=0.5, label=la + ' shifts X')
_ = plt.hist(shift_y, bins=b, alpha=0.5, label=la + ' shifts Y')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
if verbose:
msg1 = 'MEAN X,Y: {:.3f}, {:.3f}'
print(msg1.format(np.mean(shift_x), np.mean(shift_y)))
msg2 = 'MEDIAN X,Y: {:.3f}, {:.3f}'
print(msg2.format(np.median(shift_x), np.median(shift_y)))
msg3 = 'STDDEV X,Y: {:.3f}, {:.3f}'
print(msg3.format(np.std(shift_x), np.std(shift_y)))
array_rec = np.array(array_rec)
if full_output:
return array_rec, shift_y, shift_x, sat_y, sat_x
else:
return array_rec
def frame_center_radon(array, cropsize=101, hsize=0.4, step=0.01,
mask_center=None, nproc=None, satspots=False,
full_output=False, verbose=True, plot=True, debug=False):
""" Finding the center of a broadband (co-added) frame with speckles and
satellite spots elongated towards the star (center). We use the radon
transform implementation from scikit-image.
Parameters
----------
array : numpy ndarray
Input 2d array or image.
cropsize : odd int, optional
Size in pixels of the cropped central area of the input array that will
be used. It should be large enough to contain the satellite spots.
hsize : float, optional
Size of the box for the grid search. The frame is shifted to each
direction from the center in a hsize length with a given step.
step : float, optional
The step of the coordinates change.
mask_center : None or int, optional
If None the central area of the frame is kept. If int a centered zero
mask will be applied to the frame. By default the center isn't masked.
nproc : int, optional
Number of processes for parallel computing. If None the number of
processes will be set to cpu_count()/2.
verbose : bool optional
Whether to print to stdout some messages and info.
plot : bool, optional
Whether to plot the radon cost function.
debug : bool, optional
Whether to print and plot intermediate info.
Returns
-------
optimy, optimx : float
Values of the Y, X coordinates of the center of the frame based on the
radon optimization.
If full_output is True then the radon cost function surface is returned
along with the optimal x and y.
Notes
-----
Based on Pueyo et al. 2014: http://arxiv.org/abs/1409.6388
"""
from .cosmetics import frame_crop
if array.ndim != 2:
raise TypeError('Input array is not a frame or 2d array')
if verbose:
start_time = time_ini()
frame = array.copy()
frame = frame_crop(frame, cropsize, verbose=False)
listyx = np.linspace(start=-hsize, stop=hsize, num=2*hsize/step+1,
endpoint=True)
if not mask_center:
radint = 0
else:
if not isinstance(mask_center, int):
raise TypeError
radint = mask_center
coords = [(y, x) for y in listyx for x in listyx]
cent, _ = frame_center(frame)
frame = get_annulus_segments(frame, radint, cent-radint, mode="mask")[0]
if debug:
if satspots:
samples = 10
theta = np.hstack((np.linspace(start=40, stop=50, num=samples,
endpoint=False),
np.linspace(start=130, stop=140, num=samples,
endpoint=False),
np.linspace(start=220, stop=230, num=samples,
endpoint=False),
np.linspace(start=310, stop=320, num=samples,
endpoint=False)))
sinogram = radon(frame, theta=theta, circle=True)
plot_frames((frame, sinogram))
print(np.sum(np.abs(sinogram[int(cent), :])))
else:
theta = np.linspace(start=0, stop=360, num=cent*2, endpoint=False)
sinogram = radon(frame, theta=theta, circle=True)
plot_frames((frame, sinogram))
print(np.sum(np.abs(sinogram[int(cent), :])))
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
if satspots:
costfkt = _radon_costf2
else:
costfkt = _radon_costf
if nproc == 1:
costf = []
for coord in coords:
res = costfkt(frame, cent, radint, coord)
costf.append(res)
costf = np.array(costf)
elif nproc > 1:
res = pool_map(nproc, costfkt, frame, cent, radint, iterable(coords))
costf = np.array(res)
if verbose:
msg = 'Done {} radon transform calls distributed in {} processes'
print(msg.format(len(coords), nproc))
cost_bound = costf.reshape(listyx.shape[0], listyx.shape[0])
if plot:
plt.contour(cost_bound, cmap='CMRmap', origin='lower', lw=1, hold='on')
plt.imshow(cost_bound, cmap='CMRmap', origin='lower',
interpolation='nearest')
plt.colorbar()
plt.grid('off')
plt.show()
# argm = np.argmax(costf) # index of 1st max in 1d cost function 'surface'
# optimy, optimx = coords[argm]
# maxima in the 2d cost function surface
num_max = np.where(cost_bound == cost_bound.max())[0].shape[0]
ind_maximay, ind_maximax = np.where(cost_bound == cost_bound.max())
argmy = ind_maximay[int(np.ceil(num_max/2)) - 1]
argmx = ind_maximax[int(np.ceil(num_max/2)) - 1]
y_grid = np.array(coords)[:, 0].reshape(listyx.shape[0], listyx.shape[0])
x_grid = np.array(coords)[:, 1].reshape(listyx.shape[0], listyx.shape[0])
optimy = y_grid[argmy, 0]
optimx = x_grid[0, argmx]
if verbose:
print('Cost function max: {}'.format(costf.max()))
print('Cost function # maxima: {}'.format(num_max))
msg = 'Finished grid search radon optimization. Y={:.5f}, X={:.5f}'
print(msg.format(optimy, optimx))
timing(start_time)
if full_output:
return cost_bound, optimy, optimx
else:
return optimy, optimx
def _radon_costf(frame, cent, radint, coords):
""" Radon cost function used in frame_center_radon().
"""
frame_shifted = frame_shift(frame, coords[0], coords[1])
frame_shifted_ann = get_annulus_segments(frame_shifted, radint,
cent-radint, mode="mask")[0]
theta = np.linspace(start=0, stop=360, num=frame_shifted_ann.shape[0],
endpoint=False)
sinogram = radon(frame_shifted_ann, theta=theta, circle=True)
costf = np.sum(np.abs(sinogram[int(cent), :]))
return costf
def _radon_costf2(frame, cent, radint, coords):
""" Radon cost function used in frame_center_radon().
"""
frame_shifted = frame_shift(frame, coords[0], coords[1])
frame_shifted_ann = get_annulus_segments(frame_shifted, radint, cent-radint,
mode="mask")[0]
samples = 10
theta = np.hstack((np.linspace(start=40, stop=50, num=samples,
endpoint=False),
np.linspace(start=130, stop=140, num=samples,
endpoint=False),
np.linspace(start=220, stop=230, num=samples,
endpoint=False),
np.linspace(start=310, stop=320, num=samples,
endpoint=False)))
sinogram = radon(frame_shifted_ann, theta=theta, circle=True)
costf = np.sum(np.abs(sinogram[int(cent), :]))
return costf
def cube_recenter_radon(array, full_output=False, verbose=True, imlib='opencv',
interpolation='lanczos4', **kwargs):
""" Recenters a cube looping through its frames and calling the
``frame_center_radon`` function.
Parameters
----------
array : numpy ndarray
Input 3d array or cube.
full_output : {False, True}, bool optional
If True the recentered cube is returned along with the y and x shifts.
verbose : {True, False}, bool optional
Whether to print timing and intermediate information to stdout.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
cropsize : odd int, optional
Size in pixels of the cropped central area of the input array that will
be used. It should be large enough to contain the satellite spots.
hsize : float, optional
Size of the box for the grid search. The frame is shifted to each
direction from the center in a hsize length with a given step.
step : float, optional
The step of the coordinates change.
mask_center : None or int, optional
If None the central area of the frame is kept. If int a centered zero
mask will be applied to the frame. By default the center isn't masked.
nproc : int, optional
Number of processes for parallel computing. If None the number of
processes will be set to cpu_count()/2.
debug : bool, optional
Whether to print and plot intermediate info from ``frame_center_radon``.
Returns
-------
array_rec : 3d ndarray
Recentered cube.
y, x : 1d arrays of floats
[full_output] Shifts in y and x.
"""
check_array(array, dim=3)
if verbose:
start_time = time_ini()
n_frames = array.shape[0]
x = np.zeros((n_frames))
y = np.zeros((n_frames))
array_rec = array.copy()
for i in Progressbar(range(n_frames), desc="frames", verbose=verbose):
y[i], x[i] = frame_center_radon(array[i], verbose=False, plot=False,
**kwargs)
array_rec[i] = frame_shift(array[i], y[i], x[i], imlib=imlib,
interpolation=interpolation)
if verbose:
timing(start_time)
if full_output:
return array_rec, y, x
else:
return array_rec
def cube_recenter_dft_upsampling(array, center_fr1=None, negative=False,
fwhm=4, subi_size=None, upsample_factor=100,
imlib='opencv', interpolation='lanczos4',
full_output=False, verbose=True, nproc=1,
save_shifts=False, debug=False, plot=True):
""" Recenters a cube of frames using the DFT upsampling method as
proposed in Guizar et al. 2008 and implemented in the
``register_translation`` function from scikit-image.
The algorithm (DFT upsampling) obtains an initial estimate of the
cross-correlation peak by an FFT and then refines the shift estimation by
upsampling the DFT only in a small neighborhood of that estimate by means
of a matrix-multiply DFT.
Parameters
----------
array : numpy ndarray
Input cube.
center_fr1 = (cy_1, cx_1) : Tuple, optional
Coordinates of the center of the subimage for fitting a 2d Gaussian and
centroiding the 1st frame.
negative : bool, optional
If True the centroiding of the 1st frames is done with a negative
2d Gaussian fit.
fwhm : float, optional
FWHM size in pixels.
subi_size : int or None, optional
Size of the square subimage sides in pixels, used to centroid to first
frame. If subi_size is None then the first frame is assumed to be
centered already.
nproc : int or None, optional
Number of processes (>1) for parallel computing. If 1 then it runs in
serial. If None the number of processes will be set to (cpu_count()/2).
upsample_factor : int, optional
Upsampling factor (default 100). Images will be registered to within
1/upsample_factor of a pixel.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
full_output : bool, optional
Whether to return 2 1d arrays of shifts along with the recentered cube
or not.
verbose : bool, optional
Whether to print to stdout the timing or not.
save_shifts : bool, optional
Whether to save the shifts to a file in disk.
debug : bool, optional
Whether to print to stdout the shifts or not.
plot : bool, optional
If True, the shifts are plotted.
Returns
-------
array_recentered : numpy ndarray
The recentered cube.
y : numpy ndarray
[full_output=True] 1d array with the shifts in y.
x : numpy ndarray
[full_output=True] 1d array with the shifts in x.
Notes
-----
Using the implementation from scikit-image of the algorithm described in
Guizar-Sicairos et al. "Efficient subpixel image registration algorithms,"
Opt. Lett. 33, 156-158 (2008). This algorithm registers two images (2-D
rigid translation) within a fraction of a pixel specified by the user.
Instead of computing a zero-padded FFT (fast Fourier transform), this code
uses selective upsampling by a matrix-multiply DFT (discrete FT) to
dramatically reduce computation time and memory without sacrificing
accuracy. With this procedure all the image points are used to compute the
upsampled cross-correlation in a very small neighborhood around its peak.
"""
if verbose:
start_time = time_ini()
check_array(array, dim=3)
n_frames, sizey, sizex = array.shape
if subi_size is not None:
if center_fr1 is None:
print('`cx_1` or `cy_1` not be provided')
print('Using the coordinated of the 1st frame center for '
'the Gaussian 2d fit')
cy_1, cx_1 = frame_center(array[0])
else:
cy_1, cx_1 = center_fr1
if not isinstance(subi_size, int):
raise ValueError('subi_size must be an integer or None')
if subi_size < fwhm:
raise ValueError('`subi_size` (value in pixels) is too small')
if sizey % 2 == 0:
if subi_size % 2 != 0:
subi_size += 1
print('`subi_size` is odd (while frame size is even)')
print('Setting `subi_size` to {} pixels'.format(subi_size))
else:
if subi_size % 2 == 0:
subi_size += 1
print('`subi_size` is even (while frame size is odd)')
print('Setting `subi_size` to {} pixels'.format(subi_size))
n_frames = array.shape[0]
x = np.zeros((n_frames))
y = np.zeros((n_frames))
array_rec = array.copy()
cy, cx = frame_center(array[0])
# Centroiding first frame with 2d gaussian and shifting
msg0 = "The rest of the frames will be shifted by cross-correlation wrt the" \
" 1st"
if subi_size is not None:
y1, x1 = _centroid_2dg_frame(array_rec, 0, subi_size, cy_1, cx_1,
negative, debug, fwhm)
x[0] = cx - x1
y[0] = cy - y1
array_rec[0] = frame_shift(array_rec[0], shift_y=y[0], shift_x=x[0],
imlib=imlib, interpolation=interpolation)
if verbose:
msg = "Shift for first frame X,Y=({:.3f}, {:.3f})"
print(msg.format(x[0], y[0]))
print(msg0)
if debug:
titd = "original / shifted 1st frame subimage"
plot_frames((frame_crop(array[0], subi_size, verbose=False),
frame_crop(array_rec[0], subi_size, verbose=False)),
grid=True, title=titd)
else:
if verbose:
print("The first frame is assumed to be well centered wrt the"
"center of the array")
print(msg0)
x[0] = 0
y[0] = 0
# Finding the shifts with DTF upsampling of each frame wrt the first
if nproc == 1:
for i in Progressbar(range(1, n_frames), desc="frames", verbose=verbose):
y[i], x[i], array_rec[i] = _shift_dft(array_rec, array, i,
upsample_factor, interpolation,
imlib)
elif nproc > 1:
res = pool_map(nproc, _shift_dft, array_rec, array,
iterable(range(1, n_frames)),
upsample_factor, interpolation, imlib)
res = np.array(res)
y[1:] = res[:,0]
x[1:] = res[:,1]
array_rec[1:] = [frames for frames in res[:,2]]
if debug:
print("\nShifts in X and Y")
for i in range(n_frames):
print(x[i], y[i])
if verbose:
timing(start_time)
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(y, 'o-', label='shifts in y', alpha=0.5)
plt.plot(x, 'o-', label='shifts in x', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n_frames))
la = 'Histogram'
_ = plt.hist(x, bins=b, alpha=0.5, label=la + ' shifts X')
_ = plt.hist(y, bins=b, alpha=0.5, label=la + ' shifts Y')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
if save_shifts:
np.savetxt('recent_dft_shifts.txt', np.transpose([y, x]), fmt='%f')
if full_output:
return array_rec, y, x
else:
return array_rec
def _shift_dft(array_rec, array, frnum, upsample_factor, interpolation, imlib):
"""
function used in recenter_dft_unsampling
"""
shift_yx, _, _ = register_translation(array_rec[0], array[frnum],
upsample_factor=upsample_factor)
y_i, x_i = shift_yx
array_rec_i = frame_shift(array[frnum], shift_y=y_i, shift_x=x_i,
imlib=imlib, interpolation=interpolation)
return y_i, x_i, array_rec_i
def cube_recenter_2dfit(array, xy=None, fwhm=4, subi_size=5, model='gauss',
nproc=1, imlib='opencv', interpolation='lanczos4',
offset=None, negative=False, threshold=False,
sigfactor=2, fix_neg=False, params_2g=None,
save_shifts=False, full_output=False, verbose=True,
debug=False, plot=True):
""" Recenters the frames of a cube. The shifts are found by fitting a 2d
Gaussian or Moffat to a subimage centered at ``xy``. This assumes the frames
don't have too large shifts (>5px). The frames are shifted using the
function frame_shift().
Parameters
----------
array : numpy ndarray
Input cube.
xy : tuple of integers or floats
Integer coordinates of the center of the subimage (wrt the original frame).
For the double gaussian fit with fixed negative gaussian, this should
correspond to the exact location of the center of the negative gaussiam
(e.g. the center of the coronagraph mask) - in that case a tuple of
floats is also accepted.
fwhm : float or numpy ndarray
FWHM size in pixels, either one value (float) that will be the same for
the whole cube, or an array of floats with the same dimension as the
0th dim of array, containing the fwhm for each channel (e.g. in the case
of an ifs cube, where the fwhm varies with wavelength)
subi_size : int, optional
Size of the square subimage sides in pixels.
model : str, optional
Sets the type of fit to be used. 'gauss' for a 2d Gaussian fit,
'moff' for a 2d Moffat fit, 'airy' for a 2d Airy disk fit, and
'2gauss' for a 2d double Gaussian (positive+negative) fit.
nproc : int or None, optional
Number of processes (>1) for parallel computing. If 1 then it runs in
serial. If None the number of processes will be set to (cpu_count()/2).
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
offset : tuple of floats, optional
If None the region of the frames used for the 2d Gaussian/Moffat fit is
shifted to the center of the images (2d arrays). If a tuple is given it
serves as the offset of the fitted area wrt the center of the 2d arrays.
negative : bool, optional
If True a negative 2d Gaussian/Moffat fit is performed.
fix_neg: bool, optional
In case of a double gaussian fit, whether to fix the parameters of the
megative gaussian. If True, they should be provided in params_2g.
params_2g: None or dictionary, optional
In case of a double gaussian fit, dictionary with either fixed or first
guess parameters for the double gaussian. E.g.:
params_2g = {'fwhm_neg': 3.5, 'fwhm_pos': (3.5,4.2), 'theta_neg': 48.,
'theta_pos':145., 'neg_amp': 0.5}
fwhm_neg: float or tuple with fwhm of neg gaussian
fwhm_pos: can be a tuple for x and y axes of pos gaussian (replaces fwhm)
theta_neg: trigonometric angle of the x axis of the neg gaussian (deg)
theta_pos: trigonometric angle of the x axis of the pos gaussian (deg)
neg_amp: amplitude of the neg gaussian wrt the amp of the positive one
Note: it is always recommended to provide theta_pos and theta_neg for a
better fit.
threshold : bool, optional
If True the background pixels (estimated using sigma clipped statistics)
will be replaced by small random Gaussian noise (recommended for 2g).
sigfactor: float, optional
If thresholding is performed, set the the threshold in terms of
gaussian sigma in the subimage (will depend on your cropping size).
save_shifts : bool, optional
Whether to save the shifts to a file in disk.
full_output : bool, optional
Whether to return 2 1d arrays of shifts along with the recentered cube
or not.
verbose : bool, optional
Whether to print to stdout the timing or not.
debug : bool, optional
If True the details of the fitting are shown. Won't work when the cube
contains >20 frames (as it might produce an extremely long output).
plot : bool, optional
If True, the shifts are plotted.
Returns
-------
array_rec: numpy ndarray
The recentered cube.
y : numpy ndarray
[full_output=True] 1d array with the shifts in y.
x : numpy ndarray
[full_output=True] 1d array with the shifts in x.
"""
if verbose:
start_time = time_ini()
check_array(array, dim=3)
n_frames, sizey, sizex = array.shape
if not isinstance(subi_size, int):
raise ValueError('`subi_size` must be an integer')
if sizey % 2 == 0:
if subi_size % 2 != 0:
subi_size += 1
print('`subi_size` is odd (while frame size is even)')
print('Setting `subi_size` to {} pixels'.format(subi_size))
else:
if subi_size % 2 == 0:
subi_size += 1
print('`subi_size` is even (while frame size is odd)')
print('Setting `subi_size` to {} pixels'.format(subi_size))
if isinstance(fwhm, (float, int, np.float32, np.float64)):
fwhm = np.ones(n_frames) * fwhm
if debug and array.shape[0] > 20:
msg = 'Debug with a big array will produce a very long output. '
msg += 'Try with less than 20 frames in debug mode'
raise RuntimeWarning(msg)
if xy is not None:
pos_x, pos_y = xy
cond = model != '2gauss'
if (not isinstance(pos_x, int) or not isinstance(pos_y, int)) and cond:
raise TypeError('`xy` must be a tuple of integers')
else:
pos_y, pos_x = frame_center(array[0])
cy, cx = frame_center(array[0])
array_rec = np.empty_like(array)
if model == 'gauss':
func = _centroid_2dg_frame
elif model == 'moff':
func = _centroid_2dm_frame
elif model == 'airy':
func = _centroid_2da_frame
elif model == '2gauss':
func = _centroid_2d2g_frame
else:
raise ValueError('model not recognized')
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
if nproc == 1:
res = []
print('2d {}-fitting'.format(model))
for i in Progressbar(range(n_frames), desc="frames", verbose=verbose):
if model == "2gauss":
args = [array, i, subi_size, pos_y, pos_x, debug, fwhm[i],
fix_neg, params_2g, threshold, sigfactor]
else:
args = [array, i, subi_size, pos_y, pos_x, negative, debug,
fwhm[i], threshold, sigfactor]
res.append(func(*args))
res = np.array(res)
elif nproc > 1:
if model == "2gauss":
args = [array, iterable(range(n_frames)), subi_size, pos_y, pos_x,
debug, iterable(fwhm), fix_neg, params_2g, threshold,
sigfactor]
else:
args = [array, iterable(range(n_frames)), subi_size, pos_y, pos_x,
negative, debug, iterable(fwhm), threshold, sigfactor]
res = pool_map(nproc, func, *args)
res = np.array(res)
y = cy - res[:, 0]
x = cx - res[:, 1]
if model == "2gauss" and not fix_neg:
y_neg = res[:, 2]
x_neg = res[:, 3]
fwhm_x = res[:, 4]
fwhm_y = res[:, 5]
fwhm_neg_x = res[:, 6]
fwhm_neg_y = res[:, 7]
theta = res[:, 8]
theta_neg = res[:, 9]
amp_pos = res[:,10]
amp_neg = res[:, 11]
if offset is not None:
offx, offy = offset
y -= offy
x -= offx
for i in Progressbar(range(n_frames), desc="Shifting", verbose=verbose):
if debug:
print("\nShifts in X and Y")
print(x[i], y[i])
array_rec[i] = frame_shift(array[i], y[i], x[i], imlib=imlib,
interpolation=interpolation)
if verbose:
timing(start_time)
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(y, 'o-', label='shifts in y', alpha=0.5)
plt.plot(x, 'o-', label='shifts in x', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n_frames))
la = 'Histogram'
_ = plt.hist(x, bins=b, alpha=0.5, label=la + ' shifts X')
_ = plt.hist(y, bins=b, alpha=0.5, label=la + ' shifts Y')
if model == "2gauss" and not fix_neg:
_ = plt.hist(cx-x_neg, bins=b, alpha=0.5,
label=la + ' shifts X (neg gaussian)')
_ = plt.hist(cy-y_neg, bins=b, alpha=0.5,
label=la + ' shifts Y (neg gaussian)')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
if save_shifts:
np.savetxt('recent_gauss_shifts.txt', np.transpose([y, x]), fmt='%f')
if full_output:
if model == "2gauss" and not fix_neg:
return (array_rec, y, x, y_neg, x_neg, fwhm_x, fwhm_y, fwhm_neg_x,
fwhm_neg_y, theta, theta_neg, amp_pos, amp_neg)
return array_rec, y, x
else:
return array_rec
def _centroid_2dg_frame(cube, frnum, size, pos_y, pos_x, negative, debug,
fwhm, threshold=False, sigfactor=1):
""" Finds the centroid by using a 2d gaussian fitting in one frame from a
cube.
"""
sub_image, y1, x1 = get_square(cube[frnum], size=size, y=pos_y, x=pos_x,
position=True)
# negative gaussian fit
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dgaussian(sub_image, crop=False, fwhmx=fwhm, fwhmy=fwhm,
threshold=threshold, sigfactor=sigfactor, debug=debug,
full_output=False)
y_i = y1 + y_i
x_i = x1 + x_i
return y_i, x_i
def _centroid_2dm_frame(cube, frnum, size, pos_y, pos_x, negative, debug,
fwhm, threshold=False, sigfactor=1):
""" Finds the centroid by using a 2d moffat fitting in one frame from a
cube.
"""
sub_image, y1, x1 = get_square(cube[frnum], size=size, y=pos_y, x=pos_x,
position=True)
# negative fit
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dmoffat(sub_image, crop=False, fwhm=fwhm, debug=debug,
threshold=threshold, sigfactor=sigfactor,
full_output=False)
y_i = y1 + y_i
x_i = x1 + x_i
return y_i, x_i
def _centroid_2da_frame(cube, frnum, size, pos_y, pos_x, negative, debug,
fwhm, threshold=False, sigfactor=1):
""" Finds the centroid by using a 2d Airy disk fitting in one frame from a
cube.
"""
sub_image, y1, x1 = get_square(cube[frnum], size=size, y=pos_y, x=pos_x,
position=True)
# negative fit
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dairydisk(sub_image, crop=False, fwhm=fwhm,
threshold=threshold, sigfactor=sigfactor,
full_output=False, debug=debug)
y_i = y1 + y_i
x_i = x1 + x_i
return y_i, x_i
def _centroid_2d2g_frame(cube, frnum, size, pos_y, pos_x, debug=False, fwhm=4,
fix_neg=True, params_2g=None, threshold=False,
sigfactor=1):
""" Finds the centroid by using a 2d double gaussian (positive+negative)
fitting in one frame from a cube. To be called from within
cube_recenter_doublegauss2d_fit().
"""
size = min(cube[frnum].shape[0],cube[frnum].shape[1],size)
#sub_image, y1, x1 = get_square_robust(cube[frnum], size=size, y=pos_y,
# x=pos_x, position=True)
if isinstance(params_2g,dict):
fwhm_neg = params_2g.get('fwhm_neg', 0.8*fwhm)
fwhm_pos = params_2g.get('fwhm_pos', 2*fwhm)
theta_neg = params_2g.get('theta_neg', 0.)
theta_pos = params_2g.get('theta_pos', 0.)
neg_amp = params_2g.get('neg_amp', 1)
res_DF = fit_2d2gaussian(cube[frnum], crop=True, cent=(pos_x,pos_y),
cropsize=size, fwhm_neg=fwhm_neg, fwhm_pos=fwhm_pos,
neg_amp=neg_amp, fix_neg=fix_neg, theta_neg=theta_neg,
theta_pos=theta_pos, threshold=threshold,
sigfactor=sigfactor, full_output=True, debug=debug)
y_i = res_DF['centroid_y']
x_i = res_DF['centroid_x']
if not fix_neg:
y_neg = res_DF['centroid_y_neg']
x_neg = res_DF['centroid_x_neg']
fwhm_x = res_DF['fwhm_x']
fwhm_y = res_DF['fwhm_y']
fwhm_neg_x = res_DF['fwhm_x_neg']
fwhm_neg_y = res_DF['fwhm_y_neg']
theta = res_DF['theta']
theta_neg = res_DF['theta_neg']
amp_pos = res_DF['amplitude']
amp_neg = res_DF['amplitude_neg']
return (y_i, x_i, y_neg, x_neg, fwhm_x, fwhm_y, fwhm_neg_x, fwhm_neg_y,
theta, theta_neg, amp_pos, amp_neg)
return y_i, x_i
# TODO: make parameter names match the API
def cube_recenter_via_speckles(cube_sci, cube_ref=None, alignment_iter=5,
gammaval=1, min_spat_freq=0.5, max_spat_freq=3,
fwhm=4, debug=False, recenter_median=False,
fit_type='gaus', negative=True, crop=True,
subframesize=21, imlib='opencv',
interpolation='lanczos4', plot=True,
full_output=False):
""" Registers frames based on the median speckle pattern. Optionally centers
based on the position of the vortex null in the median frame. Images are
filtered to isolate speckle spatial frequencies.
Parameters
----------
cube_sci : numpy ndarray
Science cube.
cube_ref : numpy ndarray
Reference cube (e.g. for NIRC2 data in RDI mode).
alignment_iter : int, optional
Number of alignment iterations (recomputes median after each iteration).
gammaval : int, optional
Applies a gamma correction to emphasize speckles (useful for faint
stars).
min_spat_freq : float, optional
Spatial frequency for low pass filter.
max_spat_freq : float, optional
Spatial frequency for high pass filter.
fwhm : float, optional
Full width at half maximum.
debug : bool, optional
Outputs extra info.
recenter_median : bool, optional
Recenter the frames at each iteration based on a 2d fit.
fit_type : str, optional
If recenter_median is True, this is the model to which the image is
fitted to for recentering. 'gaus' works well for NIRC2_AGPM data.
'ann' works better for NACO+AGPM data.
negative : bool, optional
If True, uses a negative gaussian fit to determine the center of the
median frame.
crop: bool, optional
Whether to calculate the recentering on a cropped version of the cube
that is speckle-dominated (recommended).
subframesize : int, optional
Sub-frame window size used. Should cover the region where speckles are
the dominant noise source.
imlib : str, optional
Image processing library to use.
interpolation : str, optional
Interpolation method to use.
plot : bool, optional
If True, the shifts are plotted.
full_ouput: bool, optional
Whether to return more varibales, useful for debugging.
Returns
-------
if full_output is False, returns:
cube_reg_sci: Registered science cube (numpy 3d ndarray)
If cube_ref is not None, also returns:
cube_reg_ref: Ref. cube registered to science frames (np 3d ndarray)
If full_output is True, returns in addition to the above:
cube_sci_lpf: Low+high-pass filtered science cube (np 3d ndarray)
cube_stret: Cube with stretched values used for cross-corr (np 3d ndarray)
cum_x_shifts_sci: Vector of x shifts for science frames (np 1d array)
cum_y_shifts_sci: Vector of y shifts for science frames (np 1d array)
And if cube_ref is not None, also returns:
cum_x_shifts_ref: Vector of x shifts for ref. frames.
cum_y_shifts_ref: Vector of y shifts for ref. frames.
"""
n, y, x = cube_sci.shape
check_array(cube_sci, dim=3)
if recenter_median and fit_type not in {'gaus','ann'}:
raise TypeError("fit type not recognized. Should be 'ann' or 'gaus'")
if crop and not subframesize < y/2.:
raise ValueError('`Subframesize` is too large')
if cube_ref is not None:
ref_star = True
nref = cube_ref.shape[0]
else:
ref_star = False
if crop:
cube_sci_subframe = cube_crop_frames(cube_sci, subframesize, verbose=False)
if ref_star:
cube_ref_subframe = cube_crop_frames(cube_ref, subframesize,
verbose=False)
else:
subframesize = cube_sci.shape[-1]
cube_sci_subframe = cube_sci.copy()
if ref_star:
cube_ref_subframe = cube_ref.copy()
ceny, cenx = frame_center(cube_sci_subframe[0])
print('Sub frame shape: {}'.format(cube_sci_subframe.shape))
print('Center pixel: ({}, {})'.format(ceny, cenx))
# Filtering cubes. Will be used for alignment purposes
cube_sci_lpf = cube_sci_subframe.copy()
if ref_star:
cube_ref_lpf = cube_ref_subframe.copy()
cube_sci_lpf = cube_sci_lpf + np.abs(np.min(cube_sci_lpf))
if ref_star:
cube_ref_lpf = cube_ref_lpf + np.abs(np.min(cube_ref_lpf))
median_size = int(fwhm * max_spat_freq)
# Remove spatial frequencies <0.5 lam/D and >3lam/D to isolate speckles
cube_sci_hpf = cube_filter_highpass(cube_sci_lpf, 'median-subt',
median_size=median_size, verbose=False)
if min_spat_freq>0:
cube_sci_lpf = cube_filter_lowpass(cube_sci_hpf, 'gauss',
fwhm_size=min_spat_freq * fwhm, verbose=False)
else:
cube_sci_lpf = cube_sci_hpf
if ref_star:
cube_ref_hpf = cube_filter_highpass(cube_ref_lpf, 'median-subt',
median_size=median_size,
verbose=False)
if min_spat_freq>0:
cube_ref_lpf = cube_filter_lowpass(cube_ref_hpf, 'gauss',
fwhm_size=min_spat_freq * fwhm,
verbose=False)
else:
cube_ref_lpf = cube_ref_hpf
if ref_star:
alignment_cube = np.zeros((1 + n + nref, subframesize, subframesize))
alignment_cube[1:(n + 1), :, :] = cube_sci_lpf
alignment_cube[(n + 1):(n + 2 + nref), :, :] = cube_ref_lpf
else:
alignment_cube = np.zeros((1 + n, subframesize, subframesize))
alignment_cube[1:(n + 1), :, :] = cube_sci_lpf
n_frames = alignment_cube.shape[0] # 1+n or 1+n+nref
cum_y_shifts = 0
cum_x_shifts = 0
for i in range(alignment_iter):
alignment_cube[0] = np.median(alignment_cube[1:(n + 1)], axis=0)
if recenter_median:
# Recenter the median frame using a 2d fit
if fit_type == 'gaus':
crop_sz = int(fwhm)
else:
crop_sz = int(6*fwhm)
if not crop_sz%2:
crop_sz+=1
sub_image, y1, x1 = get_square(alignment_cube[0], size=crop_sz,
y=ceny, x=cenx, position=True)
if fit_type == 'gaus':
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dgaussian(sub_image, crop=False, threshold=False,
sigfactor=1, debug=debug,
full_output=False)
elif fit_type == 'ann':
y_i, x_i, rad = _fit_2dannulus(sub_image, fwhm=fwhm, crop=False,
hole_rad=0.5, sampl_cen=0.1,
sampl_rad=0.2, ann_width=0.5,
unc_in=2.)
yshift = ceny - (y1 + y_i)
xshift = cenx - (x1 + x_i)
alignment_cube[0] = frame_shift(alignment_cube[0, :, :], yshift,
xshift, imlib=imlib,
interpolation=interpolation)
# center the cube with stretched values
cube_stret = np.log10((np.abs(alignment_cube) + 1) ** gammaval)
_, y_shift, x_shift = cube_recenter_dft_upsampling(cube_stret,
(ceny, cenx),
fwhm=fwhm,
subi_size=None,
full_output=True,
verbose=False,
plot=False)
sqsum_shifts = np.sum(np.sqrt(y_shift ** 2 + x_shift ** 2))
print('Square sum of shift vecs: ' + str(sqsum_shifts))
for j in range(1, n_frames):
alignment_cube[j] = frame_shift(alignment_cube[j], y_shift[j],
x_shift[j], imlib=imlib,
interpolation=interpolation)
cum_y_shifts += y_shift
cum_x_shifts += x_shift
cube_reg_sci = cube_sci.copy()
cum_y_shifts_sci = cum_y_shifts[1:(n + 1)]
cum_x_shifts_sci = cum_x_shifts[1:(n + 1)]
for i in range(n):
cube_reg_sci[i] = frame_shift(cube_sci[i], cum_y_shifts_sci[i],
cum_x_shifts_sci[i], imlib=imlib,
interpolation=interpolation)
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(cum_x_shifts_sci, 'o-', label='Shifts in x', alpha=0.5)
plt.plot(cum_y_shifts_sci, 'o-', label='Shifts in y', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n))
la = 'Histogram'
_ = plt.hist(cum_x_shifts_sci, bins=b, alpha=0.5, label=la+' shifts X')
_ = plt.hist(cum_y_shifts_sci, bins=b, alpha=0.5, label=la+' shifts Y')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
if ref_star:
cube_reg_ref = cube_ref.copy()
cum_y_shifts_ref = cum_y_shifts[(n + 1):]
cum_x_shifts_ref = cum_x_shifts[(n + 1):]
for i in range(nref):
cube_reg_ref[i] = frame_shift(cube_ref[i], cum_y_shifts_ref[i],
cum_x_shifts_ref[i], imlib=imlib,
interpolation=interpolation)
if ref_star:
if full_output:
return (cube_reg_sci, cube_reg_ref, cube_sci_lpf, cube_stret,
cum_x_shifts_sci, cum_y_shifts_sci, cum_x_shifts_ref,
cum_y_shifts_ref)
else:
return (cube_reg_sci, cube_reg_ref)
else:
if full_output:
return (cube_reg_sci, cube_sci_lpf, cube_stret,
cum_x_shifts_sci, cum_y_shifts_sci)
else:
return cube_reg_sci
def _fit_2dannulus(array, fwhm=4, crop=False, cent=None, cropsize=15,
hole_rad=0.5, sampl_cen=0.1, sampl_rad=None, ann_width=0.5,
unc_in=2.):
"""Finds the center the center of a donut-shape signal (e.g. a coronagraphic
PSF) by fitting an annulus, using a grid of positions for the center and
radius of the annulus. The best fit is found by maximizing the mean flux
measured in the annular mask. Requires the image to be already roughly
centered (by an uncertainty provided by unc_in).
Parameters
----------
array : array_like
Image with a single donut-like source, already approximately at the
center of the frame.
fwhm : float
Gaussian PSF full width half maximum from fitting (in pixels).
hole_rad: float, opt
First estimate of the hole radius (in terms of fwhm). The grid search
on the radius of the optimal annulus goes from 0.5 to 2 times hole_rad.
Note: for the AGPM PSF of VLT/NACO, the optimal hole_rad ~ 0.5FWHM.
sampl_cen: float, opt
Precision of the grid sampling to find the center of the annulus (in
pixels)
sampl_rad: float, opt or None.
Precision of the grid sampling to find the optimal radius of the
annulus (in pixels). If set to None, there is no grid search for the
optimal radius of the annulus, the value given by hole_rad is used.
ann_width: float, opt
Width of the annulus in FWHM; default is 0.5 FWHM.
unc_in: float, opt
Initial uncertainty on the center location (with respect to center of
input subframe) in pixels; this will set the grid width.
Returns
-------
mean_y : float
Source centroid y position on the full image from fitting.
mean_x : float
Source centroid x position on the full image from fitting.
if sampl_rad is not None, also returns final_hole_rad:
final_hole_rad : float
Best fit radius of the hole, in terms of fwhm.
"""
if cent is None:
ceny, cenx = frame_center(array)
else:
cenx, ceny = cent
if crop:
x_sub_px = cenx%1
y_sub_px = ceny%1
imside = array.shape[0]
psf_subimage, suby, subx = get_square(array, min(cropsize, imside),
int(ceny), int(cenx),
position=True)
ceny, cenx = frame_center(psf_subimage)
ceny+=y_sub_px
cenx+=x_sub_px
else:
psf_subimage = array.copy()
ann_sz = ann_width*fwhm
grid_sh_x = np.arange(-unc_in,unc_in,sampl_cen)
grid_sh_y = np.arange(-unc_in,unc_in,sampl_cen)
if sampl_rad is None:
rads = [hole_rad*fwhm]
else:
rads = np.arange(0.5*hole_rad*fwhm,2*hole_rad*fwhm,sampl_rad)
flux_ann = np.zeros([grid_sh_x.shape[0],grid_sh_y.shape[0]])
best_rad = np.zeros([grid_sh_x.shape[0],grid_sh_y.shape[0]])
for ii, xx in enumerate(grid_sh_x):
for jj, yy in enumerate(grid_sh_y):
tmp_tmp = frame_shift(array,yy,xx)
for rr, rad in enumerate(rads):
# mean flux in the annulus
tmp = frame_basic_stats(tmp_tmp, 'annulus',inner_radius=rad,
size=ann_sz, plot=False)
if tmp > flux_ann[ii,jj]:
flux_ann[ii,jj] = tmp
best_rad[ii,jj] = rad
i_max,j_max = np.unravel_index(np.argmax(flux_ann),flux_ann.shape)
mean_x = cenx - grid_sh_x[i_max]
mean_y = ceny - grid_sh_y[j_max]
if sampl_rad is None:
return mean_y, mean_x
else:
final_hole_rad = best_rad[i_max,j_max]/fwhm
return mean_y, mean_x, final_hole_rad
|
mit
|
stevenliuit/neon
|
neon/diagnostics/visualize_rnn.py
|
4
|
6174
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Visualization for recurrent neural networks
"""
import numpy as np
from neon.util.compat import range
class VisualizeRNN(object):
"""
Visualzing weight matrices during training
"""
def __init__(self):
import matplotlib.pyplot
self.plt = matplotlib.pyplot
self.plt.interactive(1)
def plot_weights(self, weights_in, weights_rec, weights_out):
"""
Visizualize the three weight matrices after every epoch. Serves to
check that weights are structured, not exploding, and get upated
"""
self.plt.figure(2)
self.plt.clf()
self.plt.subplot(1, 3, 1)
self.plt.imshow(weights_in.T, vmin=-1, vmax=1, interpolation='nearest')
self.plt.title('input.T')
self.plt.subplot(1, 3, 2)
self.plt.imshow(weights_rec, vmin=-1, vmax=1, interpolation='nearest')
self.plt.title('recurrent')
self.plt.subplot(1, 3, 3)
self.plt.imshow(weights_out, vmin=-1, vmax=1, interpolation='nearest')
self.plt.title('output')
self.plt.colorbar()
self.plt.draw()
self.plt.show()
def plot_lstm_wts(self, lstm_layer, scale=1, fig=4):
"""
Visizualize the three weight matrices after every epoch. Serves to
check that weights are structured, not exploding, and get upated
"""
self.plt.figure(fig)
self.plt.clf()
pltidx = 1
for lbl, wts in zip(lstm_layer.param_names, lstm_layer.params[:4]):
self.plt.subplot(2, 4, pltidx)
self.plt.imshow(wts.asnumpyarray().T, vmin=-scale, vmax=scale,
interpolation='nearest')
self.plt.title(lbl + ' Wx.T')
pltidx += 1
for lbl, wts, bs in zip(lstm_layer.param_names,
lstm_layer.params[4:8],
lstm_layer.params[8:12]):
self.plt.subplot(2, 4, pltidx)
self.plt.imshow(np.hstack((wts.asnumpyarray(),
bs.asnumpyarray(),
bs.asnumpyarray())).T,
vmin=-scale, vmax=scale, interpolation='nearest')
self.plt.title(lbl + ' Wh.T')
pltidx += 1
self.plt.draw()
self.plt.show()
def plot_lstm_acts(self, lstm_layer, scale=1, fig=4):
acts_lbl = ['i_t', 'f_t', 'o_t', 'g_t', 'net_i', 'c_t', 'c_t', 'c_phi']
acts_stp = [0, 0, 0, 1, 0, 0, 1, 1]
self.plt.figure(fig)
self.plt.clf()
for idx, lbl in enumerate(acts_lbl):
act_tsr = getattr(lstm_layer, lbl)[acts_stp[idx]]
self.plt.subplot(2, 4, idx+1)
self.plt.imshow(act_tsr.asnumpyarray().T,
vmin=-scale, vmax=scale, interpolation='nearest')
self.plt.title(lbl + '[' + str(acts_stp[idx]) + '].T')
self.plt.draw()
self.plt.show()
def plot_error(self, suberror_list, error_list):
self.plt.figure(1)
self.plt.clf()
self.plt.plot(np.arange(len(suberror_list)) /
np.float(len(suberror_list)) *
len(error_list), suberror_list)
self.plt.plot(error_list, linewidth=2)
self.plt.ylim((min(suberror_list), max(error_list)))
self.plt.draw()
self.plt.show()
def plot_activations(self, pre1, out1, pre2, out2, targets):
"""
Loop over tau unrolling steps, at each time step show the pre-acts
and outputs of the recurrent layer and output layer. Note that the
pre-acts are actually the g', so if the activation is linear it will
be one.
"""
self.plt.figure(3)
self.plt.clf()
for i in range(len(pre1)): # loop over unrolling
self.plt.subplot(len(pre1), 5, 5 * i + 1)
self.plt.imshow(pre1[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('pre1 or g\'1')
self.plt.subplot(len(pre1), 5, 5 * i + 2)
self.plt.imshow(out1[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('out1')
self.plt.subplot(len(pre1), 5, 5 * i + 3)
self.plt.imshow(pre2[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('pre2 or g\'2')
self.plt.subplot(len(pre1), 5, 5 * i + 4)
self.plt.imshow(out2[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('out2')
self.plt.subplot(len(pre1), 5, 5 * i + 5)
self.plt.imshow(targets[i].asnumpyarray(),
vmin=-1, vmax=1, interpolation='nearest')
if i == 0:
self.plt.title('target')
self.plt.draw()
self.plt.show()
def print_text(self, inputs, outputs):
"""
Moved this here so it's legal to use numpy.
"""
print("Prediction inputs")
print(np.argmax(inputs, 0).asnumpyarray().astype(np.int8).view('c'))
print("Prediction outputs")
print(np.argmax(outputs, 0).asnumpyarray().astype(np.int8).view('c'))
|
apache-2.0
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/delaunay/triangulate.py
|
2
|
9909
|
from __future__ import print_function
import warnings
# 2.3 compatibility
try:
set
except NameError:
import sets
set = sets.Set
from itertools import izip
import numpy as np
from matplotlib._delaunay import delaunay
from interpolate import LinearInterpolator, NNInterpolator
__all__ = ['Triangulation', 'DuplicatePointWarning']
class DuplicatePointWarning(RuntimeWarning):
"""Duplicate points were passed in to the triangulation routine.
"""
class Triangulation(object):
"""A Delaunay triangulation of points in a plane.
Triangulation(x, y)
x, y -- the coordinates of the points as 1-D arrays of floats
Let us make the following definitions:
npoints = number of points input
nedges = number of edges in the triangulation
ntriangles = number of triangles in the triangulation
point_id = an integer identifying a particular point (specifically, an
index into x and y), range(0, npoints)
edge_id = an integer identifying a particular edge, range(0, nedges)
triangle_id = an integer identifying a particular triangle
range(0, ntriangles)
Attributes: (all should be treated as read-only to maintain consistency)
x, y -- the coordinates of the points as 1-D arrays of floats.
circumcenters -- (ntriangles, 2) array of floats giving the (x,y)
coordinates of the circumcenters of each triangle (indexed by a
triangle_id).
edge_db -- (nedges, 2) array of point_id's giving the points forming
each edge in no particular order; indexed by an edge_id.
triangle_nodes -- (ntriangles, 3) array of point_id's giving the points
forming each triangle in counter-clockwise order; indexed by a
triangle_id.
triangle_neighbors -- (ntriangles, 3) array of triangle_id's giving the
neighboring triangle; indexed by a triangle_id.
The value can also be -1 meaning that that edge is on the convex hull of
the points and there is no neighbor on that edge. The values are ordered
such that triangle_neighbors[tri, i] corresponds with the edge
*opposite* triangle_nodes[tri, i]. As such, these neighbors are also in
counter-clockwise order.
hull -- list of point_id's giving the nodes which form the convex hull
of the point set. This list is sorted in counter-clockwise order.
Duplicate points.
If there are no duplicate points, Triangulation stores the specified
x and y arrays and there is no difference between the client's and
Triangulation's understanding of point indices used in edge_db,
triangle_nodes and hull.
If there are duplicate points, they are removed from the stored
self.x and self.y as the underlying delaunay code cannot deal with
duplicates. len(self.x) is therefore equal to len(x) minus the
number of duplicate points. Triangulation's edge_db, triangle_nodes
and hull refer to point indices in self.x and self.y, for internal
consistency within Triangulation and the corresponding Interpolator
classes. Client code must take care to deal with this in one of
two ways:
1. Ignore the x,y it specified in Triangulation's constructor and
use triangulation.x and triangulation.y instead, as these are
consistent with edge_db, triangle_nodes and hull.
2. If using the x,y the client specified then edge_db,
triangle_nodes and hull should be passed through the function
to_client_point_indices() first.
"""
def __init__(self, x, y):
self.x = np.asarray(x, dtype=np.float64)
self.y = np.asarray(y, dtype=np.float64)
if self.x.shape != self.y.shape or len(self.x.shape) != 1:
raise ValueError("x,y must be equal-length 1-D arrays")
self.old_shape = self.x.shape
duplicates = self._get_duplicate_point_indices()
if len(duplicates) > 0:
warnings.warn(
"Input data contains duplicate x,y points; some values are ignored.",
DuplicatePointWarning,
)
# self.j_unique is the array of non-duplicate indices, in
# increasing order.
self.j_unique = np.delete(np.arange(len(self.x)), duplicates)
self.x = self.x[self.j_unique]
self.y = self.y[self.j_unique]
else:
self.j_unique = None
# If there are duplicate points, need a map of point indices used
# by delaunay to those used by client. If there are no duplicate
# points then the map is not needed. Either way, the map is
# conveniently the same as j_unique, so share it.
self._client_point_index_map = self.j_unique
self.circumcenters, self.edge_db, self.triangle_nodes, \
self.triangle_neighbors = delaunay(self.x, self.y)
self.hull = self._compute_convex_hull()
def _get_duplicate_point_indices(self):
"""Return array of indices of x,y points that are duplicates of
previous points. Indices are in no particular order.
"""
# Indices of sorted x,y points.
j_sorted = np.lexsort(keys=(self.x, self.y))
mask_duplicates = np.hstack([
False,
(np.diff(self.x[j_sorted]) == 0) & (np.diff(self.y[j_sorted]) == 0),
])
# Array of duplicate point indices, in no particular order.
return j_sorted[mask_duplicates]
def _compute_convex_hull(self):
"""Extract the convex hull from the triangulation information.
The output will be a list of point_id's in counter-clockwise order
forming the convex hull of the data set.
"""
border = (self.triangle_neighbors == -1)
edges = {}
edges.update(dict(izip(self.triangle_nodes[border[:,0]][:,1],
self.triangle_nodes[border[:,0]][:,2])))
edges.update(dict(izip(self.triangle_nodes[border[:,1]][:,2],
self.triangle_nodes[border[:,1]][:,0])))
edges.update(dict(izip(self.triangle_nodes[border[:,2]][:,0],
self.triangle_nodes[border[:,2]][:,1])))
# Take an arbitrary starting point and its subsequent node
hull = list(edges.popitem())
while edges:
hull.append(edges.pop(hull[-1]))
# hull[-1] == hull[0], so remove hull[-1]
hull.pop()
return hull
def to_client_point_indices(self, array):
"""Converts any array of point indices used within this class to
refer to point indices within the (x,y) arrays specified in the
constructor before duplicates were removed.
"""
if self._client_point_index_map is not None:
return self._client_point_index_map[array]
else:
return array
def linear_interpolator(self, z, default_value=np.nan):
"""Get an object which can interpolate within the convex hull by
assigning a plane to each triangle.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = np.asarray(z, dtype=np.float64)
if z.shape != self.old_shape:
raise ValueError("z must be the same shape as x and y")
if self.j_unique is not None:
z = z[self.j_unique]
return LinearInterpolator(self, z, default_value)
def nn_interpolator(self, z, default_value=np.nan):
"""Get an object which can interpolate within the convex hull by
the natural neighbors method.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = np.asarray(z, dtype=np.float64)
if z.shape != self.old_shape:
raise ValueError("z must be the same shape as x and y")
if self.j_unique is not None:
z = z[self.j_unique]
return NNInterpolator(self, z, default_value)
def prep_extrapolator(self, z, bbox=None):
if bbox is None:
bbox = (self.x[0], self.x[0], self.y[0], self.y[0])
minx, maxx, miny, maxy = np.asarray(bbox, np.float64)
minx = min(minx, np.minimum.reduce(self.x))
miny = min(miny, np.minimum.reduce(self.y))
maxx = max(maxx, np.maximum.reduce(self.x))
maxy = max(maxy, np.maximum.reduce(self.y))
M = max((maxx-minx)/2, (maxy-miny)/2)
midx = (minx + maxx)/2.0
midy = (miny + maxy)/2.0
xp, yp= np.array([[midx+3*M, midx, midx-3*M],
[midy, midy+3*M, midy-3*M]])
x1 = np.hstack((self.x, xp))
y1 = np.hstack((self.y, yp))
newtri = self.__class__(x1, y1)
# do a least-squares fit to a plane to make pseudo-data
xy1 = np.ones((len(self.x), 3), np.float64)
xy1[:,0] = self.x
xy1[:,1] = self.y
from numpy.dual import lstsq
c, res, rank, s = lstsq(xy1, z)
zp = np.hstack((z, xp*c[0] + yp*c[1] + c[2]))
return newtri, zp
def nn_extrapolator(self, z, bbox=None, default_value=np.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.nn_interpolator(zp, default_value)
def linear_extrapolator(self, z, bbox=None, default_value=np.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.linear_interpolator(zp, default_value)
def node_graph(self):
"""Return a graph of node_id's pointing to node_id's.
The arcs of the graph correspond to the edges in the triangulation.
{node_id: set([node_id, ...]), ...}
"""
g = {}
for i, j in self.edge_db:
s = g.setdefault(i, set())
s.add(j)
s = g.setdefault(j, set())
s.add(i)
return g
|
mit
|
andnovar/ggplot
|
ggplot/tests/test_geom_bar.py
|
12
|
2959
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from . import get_assert_same_ggplot, cleanup
assert_same_ggplot = get_assert_same_ggplot(__file__)
from ggplot import *
from ggplot.exampledata import diamonds
import numpy as np
import pandas as pd
import datetime
def _build_testing_df():
df = pd.DataFrame({
"x": np.arange(0, 10),
"y": np.arange(0, 10),
"z": np.arange(0, 10),
"a": [1,1,1,1,1,2,2,2,3,3],
"b": ["a","a","a","a","a","b","b","b","c","c"]
})
df['facets'] = np.where(df.x > 4, 'over', 'under')
df['facets2'] = np.where((df.x % 2) == 0, 'even', 'uneven')
return df
@cleanup
def test_labels_auto():
df = pd.DataFrame({ "y" : [3.362, 1.2, 3.424, 2.574, 0.679],
"x" : ["BF","BF","Flann","FastMatch","FastMatch2"],
"c" : ["a", "b", "a", "a","a"]})
p = ggplot(df, aes(x = 'x', y = 'y', fill="c"))
gg = p + geom_bar(stat="bar")
assert_same_ggplot(gg, "labels_auto")
@cleanup
def test_labels_manual():
df = pd.DataFrame({ "y" : [3.362, 1.2, 3.424, 2.574, 0.679],
"x" : ["BF","BF","Flann","FastMatch","FastMatch2"],
"c" : ["a", "b", "a", "a","a"]})
p = ggplot(df, aes(x = 'x', y = 'y', fill="c"))
gg2 = p + geom_bar(stat="bar", labels=["BF","Flann","FastMatch"])
assert_same_ggplot(gg2, "labels_manual")
@cleanup
def test_facet_grid_discrete():
df = _build_testing_df()
gg = ggplot(aes(x='a', y='y', fill='y'), data=df)
assert_same_ggplot(gg + geom_bar(stat='bar') + facet_grid(x="facets", y="facets2"),
"faceting_grid_discrete")
@cleanup
def test_facet_wrap_discrete():
df = _build_testing_df()
gg = ggplot(aes(x='a', y='y'), data=df)
assert_same_ggplot(gg + geom_bar(stat='bar') + facet_wrap(x="facets"), "faceting_wrap_discrete")
@cleanup
def test_facet_colors():
gg = ggplot(diamonds, aes(x = 'clarity', fill = 'cut', color='cut')) +\
stat_bin(binwidth=1200) + facet_wrap("color")
assert_same_ggplot(gg, "facet_colors")
# @cleanup
# def test_date_hist():
# dates = [datetime.date(2014, 3, i) for i in range(1, 31)]
# gg = ggplot(pd.DataFrame({"x": dates}), aes(x='x')) + geom_histogram()
# assert_same_ggplot(gg, "geom_hist_date")
@cleanup
def test_color_hist():
data = { "a" : np.concatenate([np.repeat("a", int(3.262*100)),
np.repeat("b", int(2.574*100))]),
"c" : np.concatenate([np.repeat("c1", int(3.262*40)+1),
np.repeat("c2", int(3.262*60)),
np.repeat("c1", int(2.574*55)+1),
np.repeat("c2", int(2.574*45))])}
df2 = pd.DataFrame(data)
gg = ggplot(df2, aes(x = 'a', fill="c")) + geom_histogram()
assert_same_ggplot(gg, "color_hist")
|
bsd-2-clause
|
tomchor/pymicra
|
docs/conf.py
|
1
|
9346
|
# -*- coding: utf-8 -*-
#
# pymicra documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 4 11:10:41 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from shutil import copyfile
copyfile('README.rst', '../README.rst')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
import pymicra as pm
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
# 'numpydoc'
]
ipython_exec_lines = [
'import numpy as np',
'import pandas as pd',
# This ensures correct rendering on system with console encoding != utf8
# (windows). It forces pandas to encode its output reprs using utf8
# whereever the docs are built. The docs' target is the browser, not
# the console, so this is fine.
'pd.options.display.encoding="utf8"'
]
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
todo_include_todos=True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pymicra'
copyright = u'2016, Tomás Chor'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
try:
#version = open('../pymicra/version').read().strip()
version = pm.__version__
except:
# we seem to have a local copy not installed without setuptools
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
logo = '_static/starry2.jpg'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bizstyle'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = logo
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['globaltoc.html', 'sourcelink.html', 'searchbox.html', 'localtoc.html', 'relations.html'],
'using/windows': ['windowssidebar.html', 'searchbox.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pymicradoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# Gets rid of blank pages
'classoptions': ',oneside',
'babel': r'\usepackage[english]{babel}'
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pymicra-v{}.tex'.format(version), u'Pymicra Documentation',
u'Tomás Chor', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = logo
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pymicra', u'pymicra Documentation',
[u'Tomás Chor'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pymicra', u'pymicra Documentation',
u'Tomás Chor', 'pymicra', 'A Python tool for Micrometeorological Analyses.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
gpl-3.0
|
johankaito/fufuka
|
microblog/flask/venv/lib/python2.7/site-packages/numpy/linalg/linalg.py
|
35
|
67345
|
"""Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
return wrap(gufunc(a, signature=signature, extobj=extobj).astype(result_t))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _ssyevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t))
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be always be of complex type. When `a` is real
the resulting eigenvalues will be real (0 imaginary part) or
occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
A : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues, not necessarily ordered.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _ssyevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t))
vt = vt.astype(result_t)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t)
s = s.astype(_realType(result_t))
vt = vt.astype(result_t)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t))
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[0]/s[-1]
else:
return norm(x, p)*norm(inv(x), p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 1.6.0.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
return sign.astype(result_t), logdet.astype(real_t)
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(2, 2, 2
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
return _umath_linalg.det(a, signature=signature).astype(result_t)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute the extreme singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None):
"""
Matrix or vector norm.
This function is able to return one of seven different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([6, 6])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
# Check the default case first and handle it immediately.
if ord is None and axis is None:
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
return sqrt(sqnorm)
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis)
elif ord == -Inf:
return abs(x).min(axis=axis)
elif ord == 0:
# Zero norm
return (x != 0).sum(axis=axis)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis % nd == col_axis % nd:
raise ValueError('Duplicate axes given.')
if ord == 2:
return _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
return _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
return add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
return add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
return add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
return add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
return sqrt(add.reduce((x.conj() * x).real, axis=axis))
else:
raise ValueError("Invalid norm order for matrices.")
else:
raise ValueError("Improper number of dimensions to norm.")
|
apache-2.0
|
glouppe/scikit-learn
|
sklearn/svm/tests/test_bounds.py
|
280
|
2541
|
import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
|
bsd-3-clause
|
soylentdeen/BlurryApple
|
Control/invert.py
|
1
|
2158
|
import scipy
import numpy
import pyfits
from scipy.linalg import *
import matplotlib.pyplot as pyplot
import inversion
#"""
def pseudoInverse(filename, numFilteredModes=50):
A = scipy.matrix(pyfits.getdata(filename))
dims = A.shape
U,S,V = svd(A)
D = 1.0/(S[0:-numFilteredModes])
#S[-numFilteredModes+1:-1] = 0.0
S[-numFilteredModes:] = 0.0
newS = numpy.zeros([dims[0], dims[1]])
I = [i for i in range(dims[1])]
for i in range(len(D)):
newS[i][i] = D[i]
#S = newS.copy()
retval = scipy.matrix(V.T.dot(newS.T.dot(U.T)), dtype=numpy.float32)
singular_values = newS.diagonal()
svs = singular_values[singular_values.nonzero()[0]]
#print asdf
return retval, numpy.max(svs)/numpy.min(svs), retval.dot(A)
#"""
datadir = '/home/deen/Data/GRAVITY/InteractionMatrices/'
HODM_IMdf = datadir+'HODM_28nov.fits'
#HODM_IMdf = 'HODM_Calibration_150813.fits'
#HODM_IMdf = 'HO_IM_1021.fits'
HODM_CMdf = 'HODM_CM'
TTM_IMdf = datadir+'TTM_HighSNR_IM_1.fits'
#TTM_IMdf = datadir+'TT_IM.fits'
TTM_CMdf = 'TTM_CM.fits'
A = scipy.matrix(pyfits.getdata(TTM_IMdf)).getI()
cns = []
fmodes = []
for i in range(57):
inv, cn, junk = pseudoInverse(HODM_IMdf, i+1)
fmodes.append(i+1)
cns.append(cn)
#CM = inv
CM = numpy.resize(inv, (62, 136))
CM[-2] = A[0]
CM[-1] = A[1]
pyfits.writeto('Output/'+HODM_CMdf+str(i)+'.fits', CM, clobber=True)
pyfits.writeto('Output/ident_'+str(i)+'.fits', junk.T, clobber=True)
zeros = numpy.zeros(CM.shape, dtype=numpy.float32)
pyfits.writeto("Output/Zeros.fits", zeros, clobber=True)
addone = zeros.copy()
addone[9][37] = 1.0
pyfits.writeto("Output/test.fits", addone, clobber=True)
just_TT = zeros.copy()
#just_TT[-2] = #A[0]
just_TT[-1] = numpy.ones(len(A[0]), dtype = numpy.float32)
pyfits.writeto("Output/justTT.fits", just_TT, clobber=True)
f = pyplot.figure(0)
f.clear()
ax = f.add_axes([0.1, 0.1, 0.8, 0.8])
ax.set_yscale('log')
ax.plot(fmodes, cns)
ax.set_xlabel('Number of Filtered Modes')
ax.set_ylabel('Condition Number')
f.show()
f.savefig('ConditionNumbers.png')
print 'done!'
pyfits.writeto(TTM_CMdf, A, clobber=True)
|
gpl-2.0
|
loli/sklearn-ensembletrees
|
examples/plot_classifier_comparison.py
|
181
|
4699
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
|
bsd-3-clause
|
vivekmishra1991/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
138
|
14048
|
import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
|
bsd-3-clause
|
piskvorky/gensim
|
docs/src/gallery/core/run_similarity_queries.py
|
8
|
9576
|
r"""
Similarity Queries
==================
Demonstrates querying a corpus for similar documents.
"""
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
###############################################################################
#
# Creating the Corpus
# -------------------
#
# First, we need to create a corpus to work with.
# This step is the same as in the previous tutorial;
# if you completed it, feel free to skip to the next section.
from collections import defaultdict
from gensim import corpora
documents = [
"Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey",
]
# remove common words and tokenize
stoplist = set('for a of the and to in'.split())
texts = [
[word for word in document.lower().split() if word not in stoplist]
for document in documents
]
# remove words that appear only once
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [
[token for token in text if frequency[token] > 1]
for text in texts
]
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
###############################################################################
# Similarity interface
# --------------------
#
# In the previous tutorials on
# :ref:`sphx_glr_auto_examples_core_run_corpora_and_vector_spaces.py`
# and
# :ref:`sphx_glr_auto_examples_core_run_topics_and_transformations.py`,
# we covered what it means to create a corpus in the Vector Space Model and how
# to transform it between different vector spaces. A common reason for such a
# charade is that we want to determine **similarity between pairs of
# documents**, or the **similarity between a specific document and a set of
# other documents** (such as a user query vs. indexed documents).
#
# To show how this can be done in gensim, let us consider the same corpus as in the
# previous examples (which really originally comes from Deerwester et al.'s
# `"Indexing by Latent Semantic Analysis" <http://www.cs.bham.ac.uk/~pxt/IDA/lsa_ind.pdf>`_
# seminal 1990 article).
# To follow Deerwester's example, we first use this tiny corpus to define a 2-dimensional
# LSI space:
from gensim import models
lsi = models.LsiModel(corpus, id2word=dictionary, num_topics=2)
###############################################################################
# For the purposes of this tutorial, there are only two things you need to know about LSI.
# First, it's just another transformation: it transforms vectors from one space to another.
# Second, the benefit of LSI is that enables identifying patterns and relationships between terms (in our case, words in a document) and topics.
# Our LSI space is two-dimensional (`num_topics = 2`) so there are two topics, but this is arbitrary.
# If you're interested, you can read more about LSI here: `Latent Semantic Indexing <https://en.wikipedia.org/wiki/Latent_semantic_indexing>`_:
#
# Now suppose a user typed in the query `"Human computer interaction"`. We would
# like to sort our nine corpus documents in decreasing order of relevance to this query.
# Unlike modern search engines, here we only concentrate on a single aspect of possible
# similarities---on apparent semantic relatedness of their texts (words). No hyperlinks,
# no random-walk static ranks, just a semantic extension over the boolean keyword match:
doc = "Human computer interaction"
vec_bow = dictionary.doc2bow(doc.lower().split())
vec_lsi = lsi[vec_bow] # convert the query to LSI space
print(vec_lsi)
###############################################################################
# In addition, we will be considering `cosine similarity <http://en.wikipedia.org/wiki/Cosine_similarity>`_
# to determine the similarity of two vectors. Cosine similarity is a standard measure
# in Vector Space Modeling, but wherever the vectors represent probability distributions,
# `different similarity measures <http://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence#Symmetrised_divergence>`_
# may be more appropriate.
#
# Initializing query structures
# ++++++++++++++++++++++++++++++++
#
# To prepare for similarity queries, we need to enter all documents which we want
# to compare against subsequent queries. In our case, they are the same nine documents
# used for training LSI, converted to 2-D LSA space. But that's only incidental, we
# might also be indexing a different corpus altogether.
from gensim import similarities
index = similarities.MatrixSimilarity(lsi[corpus]) # transform corpus to LSI space and index it
###############################################################################
# .. warning::
# The class :class:`similarities.MatrixSimilarity` is only appropriate when the whole
# set of vectors fits into memory. For example, a corpus of one million documents
# would require 2GB of RAM in a 256-dimensional LSI space, when used with this class.
#
# Without 2GB of free RAM, you would need to use the :class:`similarities.Similarity` class.
# This class operates in fixed memory, by splitting the index across multiple files on disk, called shards.
# It uses :class:`similarities.MatrixSimilarity` and :class:`similarities.SparseMatrixSimilarity` internally,
# so it is still fast, although slightly more complex.
#
# Index persistency is handled via the standard :func:`save` and :func:`load` functions:
index.save('/tmp/deerwester.index')
index = similarities.MatrixSimilarity.load('/tmp/deerwester.index')
###############################################################################
# This is true for all similarity indexing classes (:class:`similarities.Similarity`,
# :class:`similarities.MatrixSimilarity` and :class:`similarities.SparseMatrixSimilarity`).
# Also in the following, `index` can be an object of any of these. When in doubt,
# use :class:`similarities.Similarity`, as it is the most scalable version, and it also
# supports adding more documents to the index later.
#
# Performing queries
# ++++++++++++++++++
#
# To obtain similarities of our query document against the nine indexed documents:
sims = index[vec_lsi] # perform a similarity query against the corpus
print(list(enumerate(sims))) # print (document_number, document_similarity) 2-tuples
###############################################################################
# Cosine measure returns similarities in the range `<-1, 1>` (the greater, the more similar),
# so that the first document has a score of 0.99809301 etc.
#
# With some standard Python magic we sort these similarities into descending
# order, and obtain the final answer to the query `"Human computer interaction"`:
sims = sorted(enumerate(sims), key=lambda item: -item[1])
for doc_position, doc_score in sims:
print(doc_score, documents[doc_position])
###############################################################################
# The thing to note here is that documents no. 2 (``"The EPS user interface management system"``)
# and 4 (``"Relation of user perceived response time to error measurement"``) would never be returned by
# a standard boolean fulltext search, because they do not share any common words with ``"Human
# computer interaction"``. However, after applying LSI, we can observe that both of
# them received quite high similarity scores (no. 2 is actually the most similar!),
# which corresponds better to our intuition of
# them sharing a "computer-human" related topic with the query. In fact, this semantic
# generalization is the reason why we apply transformations and do topic modelling
# in the first place.
#
# Where next?
# ------------
#
# Congratulations, you have finished the tutorials -- now you know how gensim works :-)
# To delve into more details, you can browse through the :ref:`apiref`,
# see the :ref:`wiki` or perhaps check out :ref:`distributed` in `gensim`.
#
# Gensim is a fairly mature package that has been used successfully by many individuals and companies, both for rapid prototyping and in production.
# That doesn't mean it's perfect though:
#
# * there are parts that could be implemented more efficiently (in C, for example), or make better use of parallelism (multiple machines cores)
# * new algorithms are published all the time; help gensim keep up by `discussing them <http://groups.google.com/group/gensim>`_ and `contributing code <https://github.com/piskvorky/gensim/wiki/Developer-page>`_
# * your **feedback is most welcome** and appreciated (and it's not just the code!):
# `bug reports <https://github.com/piskvorky/gensim/issues>`_ or
# `user stories and general questions <http://groups.google.com/group/gensim/topics>`_.
#
# Gensim has no ambition to become an all-encompassing framework, across all NLP (or even Machine Learning) subfields.
# Its mission is to help NLP practitioners try out popular topic modelling algorithms
# on large datasets easily, and to facilitate prototyping of new algorithms for researchers.
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread('run_similarity_queries.png')
imgplot = plt.imshow(img)
_ = plt.axis('off')
|
lgpl-2.1
|
hlin117/scikit-learn
|
sklearn/cluster/tests/test_bicluster.py
|
143
|
9461
|
"""Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.model_selection import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
|
bsd-3-clause
|
shenzebang/scikit-learn
|
sklearn/lda.py
|
72
|
17751
|
"""
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.std_[:, np.newaxis] * s * sc.std_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
"""Linear Discriminant Analysis (LDA).
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = self.priors
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
|
bsd-3-clause
|
HolgerPeters/scikit-learn
|
examples/ensemble/plot_adaboost_hastie_10_2.py
|
355
|
3576
|
"""
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
|
bsd-3-clause
|
chrsrds/scikit-learn
|
doc/conf.py
|
2
|
10857
|
# -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import warnings
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.imgconverter',
'sphinx_gallery.gen_gallery',
'sphinx_issues',
'custom_references_resolver'
]
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
numpydoc_class_members_toctree = False
# For maths, use mathjax by default and svg if NO_MATHJAX env variable is set
# (useful for viewing the doc offline)
if os.environ.get('NO_MATHJAX'):
extensions.append('sphinx.ext.imgmath')
imgmath_image_format = 'svg'
else:
extensions.append('sphinx.ext.mathjax')
mathjax_path = ('https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/'
'MathJax.js?config=TeX-AMS_SVG')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'scikit-learn'
copyright = '2007 - 2019, scikit-learn developers (BSD License)'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'templates', 'includes', 'themes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# sklearn uses a custom extension: `custom_references_resolver` to modify
# the order of link resolution for the 'any' role. It resolves python class
# links first before resolving 'std' domain links. Unresolved roles are
# considered to be <code> blocks.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}
\usepackage{morefloats}\usepackage{enumitem} \setlistdepth{10}
"""
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', 'scikit-learn user guide',
'scikit-learn developers', 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
# intersphinx configuration
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(
sys.version_info), None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'joblib': ('https://joblib.readthedocs.io/en/latest/', None),
}
sphinx_gallery_conf = {
'doc_module': 'sklearn',
'backreferences_dir': os.path.join('modules', 'generated'),
'reference_url': {
'sklearn': None}
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600,
'sphx_glr_plot_anomaly_comparison_001.png': 372,
'sphx_glr_plot_gpr_co2_001.png': 350,
'sphx_glr_plot_adaboost_twoclass_001.png': 372,
'sphx_glr_plot_compare_methods_001.png': 349}
# enable experimental module so that experimental estimators can be
# discovered properly by sphinx
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.experimental import enable_iterative_imputer # noqa
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
# Config for sphinx_issues
# we use the issues path for PRs since the issues URL will forward
issues_github_path = 'scikit-learn/scikit-learn'
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.add_javascript('js/extra.js')
app.connect('build-finished', make_carousel_thumbs)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
warnings.filterwarnings("ignore", category=UserWarning,
message='Matplotlib is currently using agg, which is a'
' non-GUI backend, so cannot show the figure.')
# Reduces the output of estimators
sklearn.set_config(print_changed_only=True)
|
bsd-3-clause
|
cmoutard/mne-python
|
mne/viz/tests/test_evoked.py
|
2
|
4616
|
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises
from mne import io, read_events, Epochs, pick_types, read_cov
from mne.viz.evoked import _butterfly_onselect
from mne.viz.utils import _fake_click
from mne.utils import slow_test, run_tests_if_main
from mne.channels import read_layout
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 0.1
n_chan = 6
layout = read_layout('Vectorview-all')
def _get_raw():
return io.Raw(raw_fname, preload=False)
def _get_events():
return read_events(event_name)
def _get_picks(raw):
return pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
def _get_epochs():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
# Use a subset of channels for plotting speed
picks = picks[np.round(np.linspace(0, len(picks) - 1, n_chan)).astype(int)]
picks[0] = 2 # make sure we have a magnetometer
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
epochs.info['bads'] = [epochs.ch_names[-1]]
return epochs
def _get_epochs_delayed_ssp():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
reject = dict(mag=4e-12)
epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
proj='delayed', reject=reject)
return epochs_delayed_ssp
@slow_test
def test_plot_evoked():
"""Test plotting of evoked
"""
import matplotlib.pyplot as plt
evoked = _get_epochs().average()
with warnings.catch_warnings(record=True):
fig = evoked.plot(proj=True, hline=[1], exclude=[], window_title='foo')
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax,
[line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax,
[ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
# plot with bad channels excluded & spatial_colors
evoked.plot(exclude='bads')
evoked.plot(exclude=evoked.info['bads'], spatial_colors=True, gfp=True)
# test selective updating of dict keys is working.
evoked.plot(hline=[1], units=dict(mag='femto foo'))
evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
evoked_delayed_ssp.plot(proj='interactive')
evoked_delayed_ssp.apply_proj()
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive')
evoked_delayed_ssp.info['projs'] = []
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive')
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive', axes='foo')
plt.close('all')
# test GFP only
evoked.plot(gfp='only')
assert_raises(ValueError, evoked.plot, gfp='foo')
evoked.plot_image(proj=True)
# plot with bad channels excluded
evoked.plot_image(exclude='bads')
evoked.plot_image(exclude=evoked.info['bads']) # does the same thing
plt.close('all')
evoked.plot_topo() # should auto-find layout
_butterfly_onselect(0, 200, ['mag'], evoked) # test averaged topomap
plt.close('all')
cov = read_cov(cov_fname)
cov['method'] = 'empirical'
evoked.plot_white(cov)
evoked.plot_white([cov, cov])
# Hack to test plotting of maxfiltered data
evoked_sss = evoked.copy()
evoked_sss.info['proc_history'] = [dict(max_info=None)]
evoked_sss.plot_white(cov)
evoked_sss.plot_white(cov_fname)
plt.close('all')
run_tests_if_main()
|
bsd-3-clause
|
datapythonista/pandas
|
pandas/tests/indexes/categorical/test_category.py
|
2
|
14225
|
import numpy as np
import pytest
from pandas._libs import index as libindex
from pandas._libs.arrays import NDArrayBacked
import pandas as pd
from pandas import (
Categorical,
CategoricalDtype,
)
import pandas._testing as tm
from pandas.core.indexes.api import (
CategoricalIndex,
Index,
)
from pandas.tests.indexes.common import Base
class TestCategoricalIndex(Base):
_index_cls = CategoricalIndex
@pytest.fixture
def simple_index(self) -> CategoricalIndex:
return self._index_cls(list("aabbca"), categories=list("cab"), ordered=False)
@pytest.fixture
def index(self, request):
return tm.makeCategoricalIndex(100)
def create_index(self, *, categories=None, ordered=False):
if categories is None:
categories = list("cab")
return CategoricalIndex(list("aabbca"), categories=categories, ordered=ordered)
def test_can_hold_identifiers(self):
idx = self.create_index(categories=list("abcd"))
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_pickle_compat_construction(self):
# Once the deprecation is enforced, we can use the parent class's test
with tm.assert_produces_warning(FutureWarning, match="without passing data"):
self._index_cls()
def test_insert(self, simple_index):
ci = simple_index
categories = ci.categories
# test 0th element
result = ci.insert(0, "a")
expected = CategoricalIndex(list("aaabbca"), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# test Nth element that follows Python list behavior
result = ci.insert(-1, "a")
expected = CategoricalIndex(list("aabbcaa"), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# test empty
result = CategoricalIndex([], categories=categories).insert(0, "a")
expected = CategoricalIndex(["a"], categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# invalid -> cast to object
expected = ci.astype(object).insert(0, "d")
result = ci.insert(0, "d")
tm.assert_index_equal(result, expected, exact=True)
# GH 18295 (test missing)
expected = CategoricalIndex(["a", np.nan, "a", "b", "c", "b"])
for na in (np.nan, pd.NaT, None):
result = CategoricalIndex(list("aabcb")).insert(1, na)
tm.assert_index_equal(result, expected)
def test_insert_na_mismatched_dtype(self):
ci = CategoricalIndex([0, 1, 1])
result = ci.insert(0, pd.NaT)
expected = Index([pd.NaT, 0, 1, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_delete(self, simple_index):
ci = simple_index
categories = ci.categories
result = ci.delete(0)
expected = CategoricalIndex(list("abbca"), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
result = ci.delete(-1)
expected = CategoricalIndex(list("aabbc"), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
with tm.external_error_raised((IndexError, ValueError)):
# Either depending on NumPy version
ci.delete(10)
@pytest.mark.parametrize(
"data, non_lexsorted_data",
[[[1, 2, 3], [9, 0, 1, 2, 3]], [list("abc"), list("fabcd")]],
)
def test_is_monotonic(self, data, non_lexsorted_data):
c = CategoricalIndex(data)
assert c.is_monotonic_increasing is True
assert c.is_monotonic_decreasing is False
c = CategoricalIndex(data, ordered=True)
assert c.is_monotonic_increasing is True
assert c.is_monotonic_decreasing is False
c = CategoricalIndex(data, categories=reversed(data))
assert c.is_monotonic_increasing is False
assert c.is_monotonic_decreasing is True
c = CategoricalIndex(data, categories=reversed(data), ordered=True)
assert c.is_monotonic_increasing is False
assert c.is_monotonic_decreasing is True
# test when data is neither monotonic increasing nor decreasing
reordered_data = [data[0], data[2], data[1]]
c = CategoricalIndex(reordered_data, categories=reversed(data))
assert c.is_monotonic_increasing is False
assert c.is_monotonic_decreasing is False
# non lexsorted categories
categories = non_lexsorted_data
c = CategoricalIndex(categories[:2], categories=categories)
assert c.is_monotonic_increasing is True
assert c.is_monotonic_decreasing is False
c = CategoricalIndex(categories[1:3], categories=categories)
assert c.is_monotonic_increasing is True
assert c.is_monotonic_decreasing is False
def test_has_duplicates(self):
idx = CategoricalIndex([0, 0, 0], name="foo")
assert idx.is_unique is False
assert idx.has_duplicates is True
idx = CategoricalIndex([0, 1], categories=[2, 3], name="foo")
assert idx.is_unique is False
assert idx.has_duplicates is True
idx = CategoricalIndex([0, 1, 2, 3], categories=[1, 2, 3], name="foo")
assert idx.is_unique is True
assert idx.has_duplicates is False
@pytest.mark.parametrize(
"data, categories, expected",
[
(
[1, 1, 1],
[1, 2, 3],
{
"first": np.array([False, True, True]),
"last": np.array([True, True, False]),
False: np.array([True, True, True]),
},
),
(
[1, 1, 1],
list("abc"),
{
"first": np.array([False, True, True]),
"last": np.array([True, True, False]),
False: np.array([True, True, True]),
},
),
(
[2, "a", "b"],
list("abc"),
{
"first": np.zeros(shape=(3), dtype=np.bool_),
"last": np.zeros(shape=(3), dtype=np.bool_),
False: np.zeros(shape=(3), dtype=np.bool_),
},
),
(
list("abb"),
list("abc"),
{
"first": np.array([False, False, True]),
"last": np.array([False, True, False]),
False: np.array([False, True, True]),
},
),
],
)
def test_drop_duplicates(self, data, categories, expected):
idx = CategoricalIndex(data, categories=categories, name="foo")
for keep, e in expected.items():
tm.assert_numpy_array_equal(idx.duplicated(keep=keep), e)
e = idx[~e]
result = idx.drop_duplicates(keep=keep)
tm.assert_index_equal(result, e)
@pytest.mark.parametrize(
"data, categories, expected_data",
[
([1, 1, 1], [1, 2, 3], [1]),
([1, 1, 1], list("abc"), [np.nan]),
([1, 2, "a"], [1, 2, 3], [1, 2, np.nan]),
([2, "a", "b"], list("abc"), [np.nan, "a", "b"]),
],
)
def test_unique(self, data, categories, expected_data, ordered):
dtype = CategoricalDtype(categories, ordered=ordered)
idx = CategoricalIndex(data, dtype=dtype)
expected = CategoricalIndex(expected_data, dtype=dtype)
tm.assert_index_equal(idx.unique(), expected)
def test_repr_roundtrip(self):
ci = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True)
str(ci)
tm.assert_index_equal(eval(repr(ci)), ci, exact=True)
# formatting
str(ci)
# long format
# this is not reprable
ci = CategoricalIndex(np.random.randint(0, 5, size=100))
str(ci)
def test_isin(self):
ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"])
tm.assert_numpy_array_equal(
ci.isin(["c"]), np.array([False, False, False, True, False, False])
)
tm.assert_numpy_array_equal(
ci.isin(["c", "a", "b"]), np.array([True] * 5 + [False])
)
tm.assert_numpy_array_equal(
ci.isin(["c", "a", "b", np.nan]), np.array([True] * 6)
)
# mismatched categorical -> coerced to ndarray so doesn't matter
result = ci.isin(ci.set_categories(list("abcdefghi")))
expected = np.array([True] * 6)
tm.assert_numpy_array_equal(result, expected)
result = ci.isin(ci.set_categories(list("defghi")))
expected = np.array([False] * 5 + [True])
tm.assert_numpy_array_equal(result, expected)
def test_identical(self):
ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True)
ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True)
assert ci1.identical(ci1)
assert ci1.identical(ci1.copy())
assert not ci1.identical(ci2)
def test_ensure_copied_data(self, index):
# gh-12309: Check the "copy" argument of each
# Index.__new__ is honored.
#
# Must be tested separately from other indexes because
# self.values is not an ndarray.
# GH#29918 Index.base has been removed
# FIXME: is this test still meaningful?
_base = lambda ar: ar if getattr(ar, "base", None) is None else ar.base
result = CategoricalIndex(index.values, copy=True)
tm.assert_index_equal(index, result)
assert _base(index.values) is not _base(result.values)
result = CategoricalIndex(index.values, copy=False)
assert result._data._codes is index._data._codes
def test_frame_repr(self):
df = pd.DataFrame({"A": [1, 2, 3]}, index=CategoricalIndex(["a", "b", "c"]))
result = repr(df)
expected = " A\na 1\nb 2\nc 3"
assert result == expected
def test_reindex_base(self):
# See test_reindex.py
pass
def test_map_str(self):
# See test_map.py
pass
class TestCategoricalIndex2:
# Tests that are not overriding a test in Base
@pytest.mark.parametrize(
"dtype, engine_type",
[
(np.int8, libindex.Int8Engine),
(np.int16, libindex.Int16Engine),
(np.int32, libindex.Int32Engine),
(np.int64, libindex.Int64Engine),
],
)
def test_engine_type(self, dtype, engine_type):
if dtype != np.int64:
# num. of uniques required to push CategoricalIndex.codes to a
# dtype (128 categories required for .codes dtype to be int16 etc.)
num_uniques = {np.int8: 1, np.int16: 128, np.int32: 32768}[dtype]
ci = CategoricalIndex(range(num_uniques))
else:
# having 2**32 - 2**31 categories would be very memory-intensive,
# so we cheat a bit with the dtype
ci = CategoricalIndex(range(32768)) # == 2**16 - 2**(16 - 1)
arr = ci.values._ndarray.astype("int64")
NDArrayBacked.__init__(ci._data, arr, ci.dtype)
assert np.issubdtype(ci.codes.dtype, dtype)
assert isinstance(ci._engine, engine_type)
@pytest.mark.parametrize(
"func,op_name",
[
(lambda idx: idx - idx, "__sub__"),
(lambda idx: idx + idx, "__add__"),
(lambda idx: idx - ["a", "b"], "__sub__"),
(lambda idx: idx + ["a", "b"], "__add__"),
(lambda idx: ["a", "b"] - idx, "__rsub__"),
(lambda idx: ["a", "b"] + idx, "__radd__"),
],
)
def test_disallow_addsub_ops(self, func, op_name):
# GH 10039
# set ops (+/-) raise TypeError
idx = Index(Categorical(["a", "b"]))
cat_or_list = "'(Categorical|list)' and '(Categorical|list)'"
msg = "|".join(
[
f"cannot perform {op_name} with this index type: CategoricalIndex",
"can only concatenate list",
rf"unsupported operand type\(s\) for [\+-]: {cat_or_list}",
]
)
with pytest.raises(TypeError, match=msg):
func(idx)
def test_method_delegation(self):
ci = CategoricalIndex(list("aabbca"), categories=list("cabdef"))
result = ci.set_categories(list("cab"))
tm.assert_index_equal(
result, CategoricalIndex(list("aabbca"), categories=list("cab"))
)
ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
result = ci.rename_categories(list("efg"))
tm.assert_index_equal(
result, CategoricalIndex(list("ffggef"), categories=list("efg"))
)
# GH18862 (let rename_categories take callables)
result = ci.rename_categories(lambda x: x.upper())
tm.assert_index_equal(
result, CategoricalIndex(list("AABBCA"), categories=list("CAB"))
)
ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
result = ci.add_categories(["d"])
tm.assert_index_equal(
result, CategoricalIndex(list("aabbca"), categories=list("cabd"))
)
ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
result = ci.remove_categories(["c"])
tm.assert_index_equal(
result,
CategoricalIndex(list("aabb") + [np.nan] + ["a"], categories=list("ab")),
)
ci = CategoricalIndex(list("aabbca"), categories=list("cabdef"))
result = ci.as_unordered()
tm.assert_index_equal(result, ci)
ci = CategoricalIndex(list("aabbca"), categories=list("cabdef"))
result = ci.as_ordered()
tm.assert_index_equal(
result,
CategoricalIndex(list("aabbca"), categories=list("cabdef"), ordered=True),
)
# invalid
msg = "cannot use inplace with CategoricalIndex"
with pytest.raises(ValueError, match=msg):
ci.set_categories(list("cab"), inplace=True)
|
bsd-3-clause
|
pythonvietnam/scikit-learn
|
examples/manifold/plot_lle_digits.py
|
181
|
8510
|
"""
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing LDA projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
|
bsd-3-clause
|
bhargav/scikit-learn
|
sklearn/datasets/tests/test_rcv1.py
|
322
|
2414
|
"""Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
|
bsd-3-clause
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/pandas/tseries/tests/test_timeseries.py
|
1
|
115033
|
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range)
from pandas.core.daterange import DateRange
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assert_(not self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
self.assert_(uniques.dtype == 'M8[ns]') # sanity
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
self.assertTrue(result.tz is not None)
self.assertEquals(result.name, 'foo')
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assert_(ts[datetime(2000,1,6)] == 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assert_(timestamp in df.index)
# it works!
df.ix[timestamp]
self.assert_(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEquals(v1, Timestamp('2/28/2005'))
self.assertEquals(v2, Timestamp('4/30/2005'))
self.assertEquals(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assert_(dti2.freq is None)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_(np.array_equal(idx.values, expected.values))
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assert_(expected.freq is not None)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assert_(masked.freq is None)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEquals(stamp, dtval)
self.assertEquals(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEquals(x, stamp.to_pydatetime())
self.assertEquals(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assert_(rng[0].second == 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assert_(result.index is rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assert_(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assert_(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assert_(df[unit].dtype == ns_dtype)
self.assert_((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assert_((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assert_(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assert_((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assert_(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assert_(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assert_(mask[-5:].all())
self.assert_(not mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assert_(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assert_(mask[-5:].all())
self.assert_(not mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEquals(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEquals(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEquals(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assert_(result[0] == exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assert_(result[0] == exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assert_(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEquals(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_(np.array_equal(result, expected))
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEquals(result, -1)
self.assertEquals(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assert_(result is NaT)
result = to_datetime(['', ''])
self.assert_(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assert_(result == expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assert_(result == expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assert_(result == expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_(
np.array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEquals(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEquals(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assert_(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assert_(pd.to_datetime(dt, coerce=True) is NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_(
np.array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_(
np.array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_(
np.array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assert_(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assert_(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assert_(xp.freq == rs.freq)
self.assert_(xp.tzinfo == rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assert_('2000' in str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_(np.array_equal(result, expected))
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEquals(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assert_(len(result) == 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assert_(len(result) == 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assert_(len(result) == 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assert_(len(result) == 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assert_(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assert_(result.freq is None)
self.assert_(len(result) == 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assert_((rs.index.hour == rng[1].hour).all())
self.assert_((rs.index.minute == rng[1].minute).all())
self.assert_((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assert_(len(rs) == 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assert_((rs.index.hour == rng[1].hour).all())
self.assert_((rs.index.minute == rng[1].minute).all())
self.assert_((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assert_(len(rs) == 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assert_(len(filtered) == exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assert_(t >= stime)
else:
self.assert_(t > stime)
if inc_end:
self.assert_(t <= etime)
else:
self.assert_(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assert_(len(filtered) == exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assert_((t >= stime) or (t <= etime))
else:
self.assert_((t > stime) or (t <= etime))
if inc_end:
self.assert_((t <= etime) or (t >= stime))
else:
self.assert_((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assert_(len(filtered) == exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assert_(t >= stime)
else:
self.assert_(t > stime)
if inc_end:
self.assert_(t <= etime)
else:
self.assert_(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assert_(len(filtered) == exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assert_((t >= stime) or (t <= etime))
else:
self.assert_((t > stime) or (t <= etime))
if inc_end:
self.assert_((t <= etime) or (t >= stime))
else:
self.assert_((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assert_(rng.freq == rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assert_(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assert_(rng_ns_normalized.equals(expected))
self.assert_(result.is_normalized)
self.assert_(not rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assert_(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assert_(result == expected)
self.assert_(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assert_(result == expected)
self.assert_(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assert_(result == expected)
self.assert_(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assert_(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assert_(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assert_(iso8601 in result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assert_(ts.to_pydatetime() == dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assert_(ts.to_pydatetime() == dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assert_(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assert_(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_(np.array_equal(casted, exp_values))
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assert_(result.index.equals(ex_index))
self.assert_(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assert_(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assert_(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assert_(rng1.append(rng1).name == 'foo')
self.assert_(rng1.append(rng2).name is None)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assert_(result.index.equals(rng3))
self.assert_(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assert_(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assert_(x[0].dtype == np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assert_((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assert_((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_(np.array_equal(result.values, exp.values))
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assert_(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assert_((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assert_((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assert_(stamp != datetime.min)
self.assert_(stamp != datetime(1600, 1, 1))
self.assert_(stamp != datetime(2700, 1, 1))
self.assert_(stamp > datetime(1600, 1, 1))
self.assert_(stamp >= datetime(1600, 1, 1))
self.assert_(stamp < datetime(2700, 1, 1))
self.assert_(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assert_('2000-01-01' in result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assert_('2000-01-01' in result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_(np.array_equal(result, rng.asi8))
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assert_(idx.nanosecond[0] == t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assert_(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assert_(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assert_(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assert_(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEquals(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_(np.array_equal(result, exp))
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_(np.array_equal(result, exp))
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assert_(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assert_(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assert_(not idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assert_(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assert_(result.equals(ordered))
self.assert_(result.freq == ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assert_(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assert_(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assert_(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assert_(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assert_(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assert_(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assert_(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assert_(ordered.is_monotonic)
self.assert_(np.array_equal(dexer, [1, 2, 0]))
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assert_(ordered[::-1].is_monotonic)
self.assert_(np.array_equal(dexer, [0, 2, 1]))
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assert_(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assert_(not isinstance(result, DatetimeIndex))
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assert_(result.freqstr == 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_(np.array_equal(result, expected))
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assert_((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assert_(result.all())
result = index.isin(list(index))
self.assert_(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_(np.array_equal(result, expected))
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assert_((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assert_((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assert_(index is joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
class TestDatetime64(tm.TestCase):
"""
Also test supoprt for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='Q-JAN', start=datetime(1997, 12, 31), periods=100)
self.assertEquals(dti.year[0], 1998)
self.assertEquals(dti.month[0], 1)
self.assertEquals(dti.day[0], 31)
self.assertEquals(dti.hour[0], 0)
self.assertEquals(dti.minute[0], 0)
self.assertEquals(dti.second[0], 0)
self.assertEquals(dti.microsecond[0], 0)
self.assertEquals(dti.dayofweek[0], 5)
self.assertEquals(dti.dayofyear[0], 31)
self.assertEquals(dti.dayofyear[1], 120)
self.assertEquals(dti.weekofyear[0], 5)
self.assertEquals(dti.weekofyear[1], 18)
self.assertEquals(dti.quarter[0], 1)
self.assertEquals(dti.quarter[1], 2)
self.assertEquals(len(dti.year), 100)
self.assertEquals(len(dti.month), 100)
self.assertEquals(len(dti.day), 100)
self.assertEquals(len(dti.hour), 100)
self.assertEquals(len(dti.minute), 100)
self.assertEquals(len(dti.second), 100)
self.assertEquals(len(dti.microsecond), 100)
self.assertEquals(len(dti.dayofweek), 100)
self.assertEquals(len(dti.dayofyear), 100)
self.assertEquals(len(dti.weekofyear), 100)
self.assertEquals(len(dti.quarter), 100)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_(np.array_equal(dti.nanosecond, np.arange(10)))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assert_(len(dti1.diff(dti2)) == 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEquals(s[48], 48)
self.assertEquals(s['1/2/2009'], 48)
self.assertEquals(s['2009-1-2'], 48)
self.assertEquals(s[datetime(2009, 1, 2)], 48)
self.assertEquals(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEquals(s[48], -1)
s['1/2/2009'] = -2
self.assertEquals(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assert_((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assert_(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assert_((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEquals(len(idx), 20)
self.assertEquals(idx[0], sdate + 0 * dt.bday)
self.assertEquals(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEquals(len(idx), 20)
self.assertEquals(idx[-1], edate)
self.assertEquals(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEquals(len(idx1), len(idx2))
self.assertEquals(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEquals(len(idx1), len(idx2))
self.assertEquals(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEquals(len(idx1), len(idx2))
self.assertEquals(idx1.offset, idx2.offset)
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assert_((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assert_((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assert_(d2.dtypes[0] == np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEquals(df.index[0], stamp)
self.assertEquals(df.reset_index()['Date'][0], stamp)
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assert_(result is result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assert_(s2.values.dtype == 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
self.assert_(series.dtype == 'M8[ns]')
def test_constructor_cant_cast_datetime64(self):
self.assertRaises(TypeError, Series,
date_range('1/1/2000', periods=10), dtype=float)
def test_series_comparison_scalars(self):
val = datetime(2000, 1, 4)
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_(np.array_equal(result, expected))
val = self.series[5]
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_(np.array_equal(result, expected))
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
assert_series_equal(result, expected)
#----------------------------------------------------------------------
# NaT support
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
self.assert_(com.isnull(val))
series[2] = val
self.assert_(com.isnull(series[2]))
def test_set_none_nan(self):
self.series[3] = None
self.assert_(self.series[3] is NaT)
self.series[3:5] = None
self.assert_(self.series[4] is NaT)
self.series[5] = np.nan
self.assert_(self.series[5] is NaT)
self.series[5:7] = np.nan
self.assert_(self.series[6] is NaT)
def test_intercept_astype_object(self):
# this test no longer makes sense as series is by default already M8[ns]
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assert_((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series,
'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assert_((result[:, 0] == expected.values).all())
def test_union(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
self.assert_(df.index.values.dtype == np.dtype('M8[ns]'))
def test_intersection(self):
rng = date_range('6/1/2000', '6/15/2000', freq='D')
rng = rng.delete(5)
rng2 = date_range('5/15/2000', '6/20/2000', freq='D')
rng2 = DatetimeIndex(rng2.values)
result = rng.intersection(rng2)
self.assert_(result.equals(rng))
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assert_(len(result) == 0)
result = rng.intersection(rng[0:0])
self.assert_(len(result) == 0)
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
self.assertEquals(rng[0], ex_first)
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.ix['1/3/2000']
self.assertEquals(result.name, df.index[2])
result = df.T['1/3/2000']
self.assertEquals(result.name, df.index[2])
class TestTimestamp(tm.TestCase):
def test_class_ops(self):
_skip_if_no_pytz()
import pytz
def compare(x,y):
self.assertEqual(int(Timestamp(x).value/1e9), int(Timestamp(y).value/1e9))
compare(Timestamp.now(),datetime.now())
compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC')))
compare(Timestamp.utcnow(),datetime.utcnow())
compare(Timestamp.today(),datetime.today())
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
self.assert_(stamp.year == 2000)
self.assert_(stamp.month == 1)
self.assert_(stamp.microsecond == 0)
self.assert_(stamp.nanosecond == 500)
def test_unit(self):
def check(val,unit=None,h=1,s=1,us=0):
stamp = Timestamp(val, unit=unit)
self.assert_(stamp.year == 2000)
self.assert_(stamp.month == 1)
self.assert_(stamp.day == 1)
self.assert_(stamp.hour == h)
if unit != 'D':
self.assert_(stamp.minute == 1)
self.assert_(stamp.second == s)
self.assert_(stamp.microsecond == us)
else:
self.assert_(stamp.minute == 0)
self.assert_(stamp.second == 0)
self.assert_(stamp.microsecond == 0)
self.assert_(stamp.nanosecond == 0)
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val/long(1000),unit='us')
check(val/long(1000000),unit='ms')
check(val/long(1000000000),unit='s')
check(days,unit='D',h=0)
# using truediv, so these are like floats
if compat.PY3:
check((val+500000)/long(1000000000),unit='s',us=500)
check((val+500000000)/long(1000000000),unit='s',us=500000)
check((val+500000)/long(1000000),unit='ms',us=500)
# get chopped in py2
else:
check((val+500000)/long(1000000000),unit='s')
check((val+500000000)/long(1000000000),unit='s')
check((val+500000)/long(1000000),unit='ms')
# ok
check((val+500000)/long(1000),unit='us',us=500)
check((val+500000000)/long(1000000),unit='ms',us=500000)
# floats
check(val/1000.0 + 5,unit='us',us=5)
check(val/1000.0 + 5000,unit='us',us=5000)
check(val/1000000.0 + 0.5,unit='ms',us=500)
check(val/1000000.0 + 0.005,unit='ms',us=5)
check(val/1000000000.0 + 0.5,unit='s',us=500000)
check(days + 0.5,unit='D',h=12)
# nan
result = Timestamp(np.nan)
self.assert_(result is NaT)
result = Timestamp(None)
self.assert_(result is NaT)
result = Timestamp(iNaT)
self.assert_(result is NaT)
result = Timestamp(NaT)
self.assert_(result is NaT)
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
val = Timestamp(stamp)
self.assert_(val == val)
self.assert_(not val != val)
self.assert_(not val < val)
self.assert_(val <= val)
self.assert_(not val > val)
self.assert_(val >= val)
other = datetime(2012, 5, 18)
self.assert_(val == other)
self.assert_(not val != other)
self.assert_(not val < other)
self.assert_(val <= other)
self.assert_(not val > other)
self.assert_(val >= other)
other = Timestamp(stamp + 100)
self.assert_(not val == other)
self.assert_(val != other)
self.assert_(val < other)
self.assert_(val <= other)
self.assert_(other > val)
self.assert_(other >= val)
def test_cant_compare_tz_naive_w_aware(self):
_skip_if_no_pytz()
# #1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz='utc')
self.assertRaises(Exception, a.__eq__, b)
self.assertRaises(Exception, a.__ne__, b)
self.assertRaises(Exception, a.__lt__, b)
self.assertRaises(Exception, a.__gt__, b)
self.assertRaises(Exception, b.__eq__, a)
self.assertRaises(Exception, b.__ne__, a)
self.assertRaises(Exception, b.__lt__, a)
self.assertRaises(Exception, b.__gt__, a)
if sys.version_info < (3, 3):
self.assertRaises(Exception, a.__eq__, b.to_pydatetime())
self.assertRaises(Exception, a.to_pydatetime().__eq__, b)
else:
self.assertFalse(a == b.to_pydatetime())
self.assertFalse(a.to_pydatetime() == b)
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
self.assert_(result.nanosecond == val.nanosecond)
def test_frequency_misc(self):
self.assertEquals(fmod.get_freq_group('T'),
fmod.FreqGroup.FR_MIN)
code, stride = fmod.get_freq_code(offsets.Hour())
self.assertEquals(code, fmod.FreqGroup.FR_HR)
code, stride = fmod.get_freq_code((5, 'T'))
self.assertEquals(code, fmod.FreqGroup.FR_MIN)
self.assertEquals(stride, 5)
offset = offsets.Hour()
result = fmod.to_offset(offset)
self.assertEquals(result, offset)
result = fmod.to_offset((5, 'T'))
expected = offsets.Minute(5)
self.assertEquals(result, expected)
self.assertRaises(ValueError, fmod.get_freq_code, (5, 'baz'))
self.assertRaises(ValueError, fmod.to_offset, '100foo')
self.assertRaises(ValueError, fmod.to_offset, ('', ''))
result = fmod.get_standard_freq(offsets.Hour())
self.assertEquals(result, 'H')
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
self.assertEquals(d[stamp], 5)
def test_timestamp_compare_scalars(self):
# case where ndim == 0
lhs = np.datetime64(datetime(2013, 12, 6))
rhs = Timestamp('now')
nat = Timestamp('nat')
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
if pd._np_version_under1p7:
# you have to convert to timestamp for this to work with numpy
# scalars
expected = left_f(Timestamp(lhs), rhs)
# otherwise a TypeError is thrown
if left not in ('eq', 'ne'):
with tm.assertRaises(TypeError):
left_f(lhs, rhs)
else:
expected = left_f(lhs, rhs)
result = right_f(rhs, lhs)
self.assertEqual(result, expected)
expected = left_f(rhs, nat)
result = right_f(nat, rhs)
self.assertEqual(result, expected)
def test_timestamp_compare_series(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
s = Series(date_range('20010101', periods=10), name='dates')
s_nat = s.copy(deep=True)
s[0] = pd.Timestamp('nat')
s[3] = pd.Timestamp('nat')
ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(s, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s)
tm.assert_series_equal(result, expected)
# nats
expected = left_f(s, Timestamp('nat'))
result = right_f(Timestamp('nat'), s)
tm.assert_series_equal(result, expected)
# compare to timestamp with series containing nats
expected = left_f(s_nat, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s_nat)
tm.assert_series_equal(result, expected)
# compare to nat with series containing nats
expected = left_f(s_nat, Timestamp('nat'))
result = right_f(Timestamp('nat'), s_nat)
tm.assert_series_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
assert_series_equal(result, expected)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
result = df.ix['2005']
expected = df[df.index.year == 2005]
assert_frame_equal(result, expected)
rng = date_range('1/1/2000', '1/1/2010')
result = rng.get_loc('2009')
expected = slice(3288, 3653)
self.assert_(result == expected)
def test_slice_quarter(self):
dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEquals(len(s['2001Q1']), 90)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEquals(len(df.ix['1Q01']), 90)
def test_slice_month(self):
dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEquals(len(s['2005-11']), 30)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEquals(len(df.ix['2005-11']), 30)
assert_series_equal(s['2005-11'], s['11-2005'])
def test_partial_slice(self):
rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-05':'2006-02']
expected = s['20050501':'20060228']
assert_series_equal(result, expected)
result = s['2005-05':]
expected = s['20050501':]
assert_series_equal(result, expected)
result = s[:'2006-02']
expected = s[:'20060228']
assert_series_equal(result, expected)
result = s['2005-1-1']
self.assert_(result == s.irow(0))
self.assertRaises(Exception, s.__getitem__, '2004-12-31')
def test_partial_slice_daily(self):
rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-31']
assert_series_equal(result, s.ix[:24])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00')
def test_partial_slice_hourly(self):
rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60 * 4])
result = s['2005-1-1 20']
assert_series_equal(result, s.ix[:60])
self.assert_(s['2005-1-1 20:00'] == s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:15')
def test_partial_slice_minutely(self):
rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1 23:59']
assert_series_equal(result, s.ix[:60])
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60])
self.assert_(s[Timestamp('2005-1-1 23:59:00')] == s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:00:00')
def test_partial_slicing_with_multiindex(self):
# GH 4758
# partial string indexing with a multi-index buggy
df = DataFrame({'ACCOUNT':["ACCT1", "ACCT1", "ACCT1", "ACCT2"],
'TICKER':["ABC", "MNP", "XYZ", "XYZ"],
'val':[1,2,3,4]},
index=date_range("2013-06-19 09:30:00", periods=4, freq='5T'))
df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True)
expected = DataFrame([[1]],index=Index(['ABC'],name='TICKER'),columns=['val'])
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')]
assert_frame_equal(result, expected)
expected = df_multi.loc[(pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')]
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')]
assert_series_equal(result, expected)
# this is a KeyError as we don't do partial string selection on multi-levels
def f():
df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')]
self.assertRaises(KeyError, f)
# GH 4294
# partial slice on a series mi
s = pd.DataFrame(randn(1000, 1000), index=pd.date_range('2000-1-1', periods=1000)).stack()
s2 = s[:-1].copy()
expected = s2['2000-1-4']
result = s2[pd.Timestamp('2000-1-4')]
assert_series_equal(result, expected)
result = s[pd.Timestamp('2000-1-4')]
expected = s['2000-1-4']
assert_series_equal(result, expected)
df2 = pd.DataFrame(s)
expected = df2.ix['2000-1-4']
result = df2.ix[pd.Timestamp('2000-1-4')]
assert_frame_equal(result, expected)
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq='2D')
offset = timedelta(2)
values = np.array([snap + i * offset for i in range(n)],
dtype='M8[ns]')
self.assert_(np.array_equal(rng, values))
rng = date_range(
'1/1/2000 08:15', periods=n, normalize=False, freq='B')
the_time = time(8, 15)
for val in rng:
self.assert_(val.time() == the_time)
def test_timedelta(self):
# this is valid too
index = date_range('1/1/2000', periods=50, freq='B')
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
self.assert_(tm.equalContents(index, back))
self.assertEqual(shifted.freq, index.freq)
self.assertEqual(shifted.freq, back.freq)
result = index - timedelta(1)
expected = index + timedelta(-1)
self.assert_(result.equals(expected))
# GH4134, buggy with timedeltas
rng = date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
self.assert_(result1.equals(result4))
self.assert_(result2.equals(result3))
def test_shift(self):
ts = Series(np.random.randn(5),
index=date_range('1/1/2000', periods=5, freq='H'))
result = ts.shift(1, freq='5T')
exp_index = ts.index.shift(1, freq='5T')
self.assert_(result.index.equals(exp_index))
# GH #1063, multiple of same base
result = ts.shift(1, freq='4H')
exp_index = ts.index + datetools.Hour(4)
self.assert_(result.index.equals(exp_index))
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.shift, 1)
def test_setops_preserve_freq(self):
rng = date_range('1/1/2000', '1/1/2002')
result = rng[:50].union(rng[50:100])
self.assert_(result.freq == rng.freq)
result = rng[:50].union(rng[30:100])
self.assert_(result.freq == rng.freq)
result = rng[:50].union(rng[60:100])
self.assert_(result.freq is None)
result = rng[:50].intersection(rng[25:75])
self.assert_(result.freqstr == 'D')
nofreq = DatetimeIndex(list(rng[25:75]))
result = rng[:50].union(nofreq)
self.assert_(result.freq == rng.freq)
result = rng[:50].intersection(nofreq)
self.assert_(result.freq == rng.freq)
def test_min_max(self):
rng = date_range('1/1/2000', '12/31/2000')
rng2 = rng.take(np.random.permutation(len(rng)))
the_min = rng2.min()
the_max = rng2.max()
tm.assert_isinstance(the_min, Timestamp)
tm.assert_isinstance(the_max, Timestamp)
self.assertEqual(the_min, rng[0])
self.assertEqual(the_max, rng[-1])
self.assertEqual(rng.min(), rng[0])
self.assertEqual(rng.max(), rng[-1])
def test_min_max_series(self):
rng = date_range('1/1/2000', periods=10, freq='4h')
lvls = ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C', 'C']
df = DataFrame({'TS': rng, 'V': np.random.randn(len(rng)),
'L': lvls})
result = df.TS.max()
exp = Timestamp(df.TS.iget(-1))
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, exp)
result = df.TS.min()
exp = Timestamp(df.TS.iget(0))
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, exp)
def test_from_M8_structured(self):
dates = [(datetime(2012, 9, 9, 0, 0),
datetime(2012, 9, 8, 15, 10))]
arr = np.array(dates,
dtype=[('Date', 'M8[us]'), ('Forecasting', 'M8[us]')])
df = DataFrame(arr)
self.assertEqual(df['Date'][0], dates[0][0])
self.assertEqual(df['Forecasting'][0], dates[0][1])
s = Series(arr['Date'])
self.assertTrue(s[0], Timestamp)
self.assertEqual(s[0], dates[0][0])
s = Series.from_array(arr['Date'], Index([0]))
self.assertEqual(s[0], dates[0][0])
def test_get_level_values_box(self):
from pandas import MultiIndex
dates = date_range('1/1/2000', periods=4)
levels = [dates, [0, 1]]
labels = [[0, 0, 1, 1, 2, 2, 3, 3],
[0, 1, 0, 1, 0, 1, 0, 1]]
index = MultiIndex(levels=levels, labels=labels)
self.assertTrue(isinstance(index.get_level_values(0)[0], Timestamp))
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
self.assertTrue(df.x1.dtype == 'M8[ns]')
def test_date_range_fy5252(self):
dr = date_range(start="2013-01-01",
periods=2,
freq=offsets.FY5253(startingMonth=1,
weekday=3,
variation="nearest"))
self.assertEqual(dr[0], Timestamp('2013-01-31'))
self.assertEqual(dr[1], Timestamp('2014-01-30'))
class TimeConversionFormats(tm.TestCase):
def test_to_datetime_format(self):
values = ['1/1/2000', '1/2/2000', '1/3/2000']
results1 = [ Timestamp('20000101'), Timestamp('20000201'),
Timestamp('20000301') ]
results2 = [ Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103') ]
for vals, expecteds in [ (values, (Index(results1), Index(results2))),
(Series(values),(Series(results1), Series(results2))),
(values[0], (results1[0], results2[0])),
(values[1], (results1[1], results2[1])),
(values[2], (results1[2], results2[2])) ]:
for i, fmt in enumerate(['%d/%m/%Y', '%m/%d/%Y']):
result = to_datetime(vals, format=fmt)
expected = expecteds[i]
if isinstance(expected, Series):
assert_series_equal(result, Series(expected))
elif isinstance(expected, Timestamp):
self.assert_(result == expected)
else:
self.assert_(result.equals(expected))
def test_to_datetime_format_YYYYMMDD(self):
s = Series([19801222,19801222] + [19810105]*5)
expected = Series([ Timestamp(x) for x in s.apply(str) ])
result = to_datetime(s,format='%Y%m%d')
assert_series_equal(result, expected)
result = to_datetime(s.apply(str),format='%Y%m%d')
assert_series_equal(result, expected)
# with NaT
expected = Series([Timestamp("19801222"),Timestamp("19801222")] + [Timestamp("19810105")]*5)
expected[2] = np.nan
s[2] = np.nan
result = to_datetime(s,format='%Y%m%d')
assert_series_equal(result, expected)
# string with NaT
s = s.apply(str)
s[2] = 'nat'
result = to_datetime(s,format='%Y%m%d')
assert_series_equal(result, expected)
def test_to_datetime_format_microsecond(self):
val = '01-Apr-2011 00:00:01.978'
format = '%d-%b-%Y %H:%M:%S.%f'
result = to_datetime(val, format=format)
exp = dt.datetime.strptime(val, format)
self.assert_(result == exp)
def test_to_datetime_format_time(self):
data = [
['01/10/2010 15:20', '%m/%d/%Y %H:%M', Timestamp('2010-01-10 15:20')],
['01/10/2010 05:43', '%m/%d/%Y %I:%M', Timestamp('2010-01-10 05:43')],
['01/10/2010 13:56:01', '%m/%d/%Y %H:%M:%S', Timestamp('2010-01-10 13:56:01')]#,
#['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p', Timestamp('2010-01-10 20:14')],
#['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p', Timestamp('2010-01-10 07:40')],
#['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p', Timestamp('2010-01-10 09:12:56')]
]
for s, format, dt in data:
self.assertEqual(to_datetime(s, format=format), dt)
def test_to_datetime_format_weeks(self):
data = [
['2009324', '%Y%W%w', Timestamp('2009-08-13')],
['2013020', '%Y%U%w', Timestamp('2013-01-13')]
]
for s, format, dt in data:
self.assertEqual(to_datetime(s, format=format), dt)
class TestToDatetimeInferFormat(tm.TestCase):
def test_to_datetime_infer_datetime_format_consistent_format(self):
time_series = pd.Series(
pd.date_range('20000101', periods=50, freq='H')
)
test_formats = [
'%m-%d-%Y',
'%m/%d/%Y %H:%M:%S.%f',
'%Y-%m-%dT%H:%M:%S.%f',
]
for test_format in test_formats:
s_as_dt_strings = time_series.apply(
lambda x: x.strftime(test_format)
)
with_format = pd.to_datetime(s_as_dt_strings, format=test_format)
no_infer = pd.to_datetime(
s_as_dt_strings, infer_datetime_format=False
)
yes_infer = pd.to_datetime(
s_as_dt_strings, infer_datetime_format=True
)
# Whether the format is explicitly passed, it is inferred, or
# it is not inferred, the results should all be the same
self.assert_(np.array_equal(with_format, no_infer))
self.assert_(np.array_equal(no_infer, yes_infer))
def test_to_datetime_infer_datetime_format_inconsistent_format(self):
test_series = pd.Series(
np.array([
'01/01/2011 00:00:00',
'01-02-2011 00:00:00',
'2011-01-03T00:00:00',
]))
# When the format is inconsistent, infer_datetime_format should just
# fallback to the default parsing
self.assert_(np.array_equal(
pd.to_datetime(test_series, infer_datetime_format=False),
pd.to_datetime(test_series, infer_datetime_format=True)
))
test_series = pd.Series(
np.array([
'Jan/01/2011',
'Feb/01/2011',
'Mar/01/2011',
]))
self.assert_(np.array_equal(
pd.to_datetime(test_series, infer_datetime_format=False),
pd.to_datetime(test_series, infer_datetime_format=True)
))
def test_to_datetime_infer_datetime_format_series_with_nans(self):
test_series = pd.Series(
np.array([
'01/01/2011 00:00:00',
np.nan,
'01/03/2011 00:00:00',
np.nan,
]))
self.assert_(np.array_equal(
pd.to_datetime(test_series, infer_datetime_format=False),
pd.to_datetime(test_series, infer_datetime_format=True)
))
def test_to_datetime_infer_datetime_format_series_starting_with_nans(self):
test_series = pd.Series(
np.array([
np.nan,
np.nan,
'01/01/2011 00:00:00',
'01/02/2011 00:00:00',
'01/03/2011 00:00:00',
]))
self.assert_(np.array_equal(
pd.to_datetime(test_series, infer_datetime_format=False),
pd.to_datetime(test_series, infer_datetime_format=True)
))
class TestGuessDatetimeFormat(tm.TestCase):
def test_guess_datetime_format_with_parseable_formats(self):
dt_string_to_format = (
('20111230', '%Y%m%d'),
('2011-12-30', '%Y-%m-%d'),
('30-12-2011', '%d-%m-%Y'),
('2011-12-30 00:00:00', '%Y-%m-%d %H:%M:%S'),
('2011-12-30T00:00:00', '%Y-%m-%dT%H:%M:%S'),
('2011-12-30 00:00:00.000000', '%Y-%m-%d %H:%M:%S.%f'),
)
for dt_string, dt_format in dt_string_to_format:
self.assertEquals(
tools._guess_datetime_format(dt_string),
dt_format
)
def test_guess_datetime_format_with_dayfirst(self):
ambiguous_string = '01/01/2011'
self.assertEquals(
tools._guess_datetime_format(ambiguous_string, dayfirst=True),
'%d/%m/%Y'
)
self.assertEquals(
tools._guess_datetime_format(ambiguous_string, dayfirst=False),
'%m/%d/%Y'
)
def test_guess_datetime_format_with_locale_specific_formats(self):
# The month names will vary depending on the locale, in which
# case these wont be parsed properly (dateutil can't parse them)
_skip_if_has_locale()
dt_string_to_format = (
('30/Dec/2011', '%d/%b/%Y'),
('30/December/2011', '%d/%B/%Y'),
('30/Dec/2011 00:00:00', '%d/%b/%Y %H:%M:%S'),
)
for dt_string, dt_format in dt_string_to_format:
self.assertEquals(
tools._guess_datetime_format(dt_string),
dt_format
)
def test_guess_datetime_format_invalid_inputs(self):
# A datetime string must include a year, month and a day for it
# to be guessable, in addition to being a string that looks like
# a datetime
invalid_dts = [
'2013',
'01/2013',
'12:00:00',
'1/1/1/1',
'this_is_not_a_datetime',
'51a',
9,
datetime(2011, 1, 1),
]
for invalid_dt in invalid_dts:
self.assertTrue(tools._guess_datetime_format(invalid_dt) is None)
def test_guess_datetime_format_for_array(self):
expected_format = '%Y-%m-%d %H:%M:%S.%f'
dt_string = datetime(2011, 12, 30, 0, 0, 0).strftime(expected_format)
test_arrays = [
np.array([dt_string, dt_string, dt_string], dtype='O'),
np.array([np.nan, np.nan, dt_string], dtype='O'),
np.array([dt_string, 'random_string'], dtype='O'),
]
for test_array in test_arrays:
self.assertEqual(
tools._guess_datetime_format_for_array(test_array),
expected_format
)
format_for_string_of_nans = tools._guess_datetime_format_for_array(
np.array([np.nan, np.nan, np.nan], dtype='O')
)
self.assertTrue(format_for_string_of_nans is None)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
gpl-3.0
|
MalcolmMielle/Auto-Complete-Graph
|
tests/t_test/normaltest.py
|
1
|
4065
|
import numpy
import scipy.stats
import matplotlib.pyplot as plt
list1 = [67, 74, 75, 69, 65, 65, 66, 64, 69, 73, 70, 65, 68, 61.5]
list2 = [96, 86, 91, 91, 77, 93, 86, 81, 71, 94, 74, 93, 85, 64]
list3 = [71, 71]
list4 = [78, 78]
def allStat(list_in, list_in2):
print("T test")
res = scipy.stats.ttest_ind(list_in, list_in2, equal_var = False) #<- since False, no need for homogeneity of variance for we use Welsch t-test
print(res.statistic)
print(res.pvalue)
if res.pvalue <= 0.05:
print("COOL on a un interval de confiance de moins de 5% blabla. Statiquemtne t les deux listes sont differentes dans un interval de 5% (5% derreur accepté :P)")
def mean(list):
sum = 0
for element in list:
sum = sum + element
sum = sum / len(list)
return sum
def variance(list, mean):
sum_el = 0
for element in list:
temp_el = element - mean
temp_el = temp_el * temp_el
sum_el = sum_el + temp_el
sum_el = sum_el / (len(list) - 1)
return sum_el
def sd(variance):
standd = numpy.sqrt(variance)
return standd
def z_score(val, normal):
return (val - numpy.mean(normal)) / numpy.std(normal, ddof = 1)
def sevent3(z_score_max, z_score_min, sd_v):
if(z_score_max >= 3*sd_v or z_score_max <= -3*sd_v):
print("NO max")
return False
elif(z_score_min >= 3*sd_v or z_score_min <= -3*sd_v):
print("NO min")
return False
else:
print("YES")
return True
print("Mean")
print(mean(list1))
print("Variance")
print(variance(list1, mean(list1)))
print("SD")
print(sd(variance(list1, mean(list1))))
#Calculating the normal
mean_v = mean(list1)
sd_v = sd(variance(list1, mean_v))
normal = numpy.random.normal(mean_v, sd_v, 1000)
print("variance")
print(numpy.std(normal, ddof = 1))
print("max min")
print(max(list1))
print(min(list1))
mean_v_2 = mean(list2)
sd_v_2 = sd(variance(list2, mean_v_2))
normal_2 = numpy.random.normal(mean_v_2, sd_v_2, 1000)
# ASSESSING THE NORMALITY
print("Is it normal list 1 ? ")
z_score_max = z_score(max(list1), normal)
z_score_min = z_score(min(list1), normal)
print(z_score_max)
print(z_score_min)
is_normal = sevent3(z_score_max, z_score_min, sd_v)
print("Is it normal list 2 ? ")
z_score_max_2 = z_score(max(list2), normal)
z_score_min_2 = z_score(min(list2), normal)
print(z_score_max_2)
print(z_score_min_2)
is_normal_2 = sevent3(z_score_max_2, z_score_min_2, sd_v_2)
print("normality of list 1: ", is_normal, " and list 2: ", is_normal_2)
# ASSESSING HOMOGENEITY OF VARIANCE
# T test
print("All stat list1 and 2")
allStat(list1, list2)
print("All stat list1 and 70%")
allStat(list1, list3)
print("All stat list2 and 80%")
allStat(list2, list4)
## PRINTING
#font = {'family' : 'normal',
#'weight' : 'bold',
#'size' : 22}
#plt.rc('font', **font)
plt.rcParams.update({'font.size': 50})
plt.figure(1)
count, bins, ignored = plt.hist(normal, 30, normed=True)
#plt.clf()
plt.figure(2)
plt.axvspan(70, 80, alpha=0.5, color='red')
#plt.plot(bins, 1/(sd_v * numpy.sqrt(2 * numpy.pi)) *
#numpy.exp( - (bins - mean_v)**2 / (2 * sd_v**2) ),
#linewidth=4, color='r')
#plt.clf()
#plt.axvline(mean_v - (sd_v*3), color='r')
#plt.axvline(mean_v + (sd_v*3), color='r')
max_c = max(count)
bottom1 = list()
size1 = list()
for el in list1:
bottom1.append(0.4)
size1.append(700)
#bottom1[4] = bottom1[4]*2
#bottom1[5] = bottom1[5]*2
plt.scatter(list1, bottom1, size1, 'r')
#plt.show()
#plt.clf()
plt.figure(1)
count_2, bins_2, ignored_2 = plt.hist(normal_2, 30, normed=True)
#plt.clf()
plt.figure(2)
#plt.plot(bins_2, 1/(sd_v_2 * numpy.sqrt(2 * numpy.pi)) *
#numpy.exp( - (bins_2 - mean_v_2)**2 / (2 * sd_v_2**2) ),
#linewidth=4, color='g')
#plt.axvline(mean_v_2 - (sd_v_2*3), color='g')
#plt.axvline(mean_v_2 + (sd_v_2*3), color='g')
max_c_2 = max(count_2)
bottom = list()
size = list()
for el in list2:
bottom.append(0.2)
size.append(700)
#bottom[2] = bottom[2]*2
#bottom[3] = bottom[3]*2
plt.scatter(list2, bottom, size, 'g')
#Draw the grey zone
plt.xlabel('percentage')
plt.axis([60, 100, 0, 0.5])
plt.show()
|
gpl-3.0
|
NunoEdgarGub1/scikit-learn
|
sklearn/semi_supervised/tests/test_label_propagation.py
|
307
|
1974
|
""" test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
|
bsd-3-clause
|
harmdevries89/fuel
|
docs/conf.py
|
10
|
9795
|
# -*- coding: utf-8 -*-
#
# Fuel documentation build configuration file, created by
# sphinx-quickstart2 on Wed Oct 8 17:59:44 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from mock import Mock as MagicMock
from sphinx.ext.autodoc import cut_lines
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.graphviz',
'sphinx.ext.intersphinx',
'matplotlib.sphinxext.plot_directive',
]
intersphinx_mapping = {
'theano': ('http://theano.readthedocs.org/en/latest/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'python': ('http://docs.python.org/3.4', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None)
}
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['h5py', 'zmq']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
graphviz_dot_args = ['-Gbgcolor=# fcfcfc'] # To match the RTD theme
# Render todo lists
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Fuel'
copyright = u'2014, Université de Montréal'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import fuel
version = '.'.join(fuel.__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = fuel.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Fueldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Fuel.tex', u'Fuel Documentation',
u'Université de Montréal', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'fuel', u'Fuel Documentation',
[u'Université de Montréal'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Fuel', u'Fuel Documentation',
u'Université de Montréal', 'Fuel', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
def skip_abc(app, what, name, obj, skip, options):
return skip or name.startswith('_abc')
def setup(app):
app.connect('autodoc-process-docstring', cut_lines(2, what=['module']))
app.connect('autodoc-skip-member', skip_abc)
|
mit
|
ryfeus/lambda-packs
|
Pandas_numpy/source/pandas/io/api.py
|
14
|
1146
|
"""
Data IO api
"""
# flake8: noqa
from pandas.io.parsers import read_csv, read_table, read_fwf
from pandas.io.clipboards import read_clipboard
from pandas.io.excel import ExcelFile, ExcelWriter, read_excel
from pandas.io.pytables import HDFStore, get_store, read_hdf
from pandas.io.json import read_json
from pandas.io.html import read_html
from pandas.io.sql import read_sql, read_sql_table, read_sql_query
from pandas.io.sas import read_sas
from pandas.io.feather_format import read_feather
from pandas.io.parquet import read_parquet
from pandas.io.stata import read_stata
from pandas.io.pickle import read_pickle, to_pickle
from pandas.io.packers import read_msgpack, to_msgpack
from pandas.io.gbq import read_gbq
# deprecation, xref #13790
def Term(*args, **kwargs):
import warnings
warnings.warn("pd.Term is deprecated as it is not "
"applicable to user code. Instead use in-line "
"string expressions in the where clause when "
"searching in HDFStore",
FutureWarning, stacklevel=2)
from pandas.io.pytables import Term
return Term(*args, **kwargs)
|
mit
|
alexsavio/scikit-learn
|
sklearn/neural_network/tests/test_rbm.py
|
225
|
6278
|
import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
|
bsd-3-clause
|
arokem/nipy
|
examples/labs/need_data/group_reproducibility_analysis.py
|
4
|
4090
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
"""
Example of script to analyse the reproducibility in group studies using a
bootstrap procedure.
This reproduces approximately the work described in 'Analysis of a large fMRI
cohort: Statistical and methodological issues for group analyses' Thirion B,
Pinel P, Meriaux S, Roche A, Dehaene S, Poline JB. Neuroimage. 2007
Mar;35(1):105-20.
Needs matplotlib
Author: Bertrand Thirion, 2005-2009
"""
from os import getcwd, mkdir, path
from numpy import array
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
from nipy.labs.utils.reproducibility_measures import (
group_reproducibility_metrics)
# Local import
from get_data_light import DATA_DIR, get_second_level_dataset
print('This analysis takes a long while, please be patient')
##############################################################################
# Set the paths, data, etc.
##############################################################################
nsubj = 12
nbeta = 29
data_dir = path.join(DATA_DIR, 'group_t_images')
mask_images = [path.join(data_dir, 'mask_subj%02d.nii' % n)
for n in range(nsubj)]
stat_images = [path.join(data_dir, 'spmT_%04d_subj_%02d.nii' % (nbeta, n))
for n in range(nsubj)]
contrast_images = [path.join(data_dir, 'con_%04d_subj_%02d.nii' % (nbeta, n))
for n in range(nsubj)]
all_images = mask_images + stat_images + contrast_images
missing_file = array([not path.exists(m) for m in all_images]).any()
if missing_file:
get_second_level_dataset()
# write directory
write_dir = path.join(getcwd(), 'results')
if not path.exists(write_dir):
mkdir(write_dir)
##############################################################################
# main script
##############################################################################
ngroups = [4]
thresholds = [3.0, 4.0, 5.0]
sigma = 6.0
csize = 10
niter = 10
method = 'crfx'
verbose = 0
swap = False
voxel_results, cluster_results, peak_results = group_reproducibility_metrics(
mask_images, contrast_images, [], thresholds, ngroups, method,
cluster_threshold=csize, number_of_samples=niter, sigma=sigma,
do_clusters=True, do_voxels=True, do_peaks=True, swap=swap)
kap = [k for k in voxel_results[ngroups[0]].values()]
clt = [k for k in cluster_results[ngroups[0]].values()]
pk = [k for k in peak_results[ngroups[0]].values()]
##############################################################################
# plot
##############################################################################
plt.figure()
plt.subplot(1, 3, 1)
plt.boxplot(kap)
plt.title('voxel-level reproducibility')
plt.xticks(range(1, 1 + len(thresholds)), thresholds)
plt.xlabel('threshold')
plt.subplot(1, 3, 2)
plt.boxplot(clt)
plt.title('cluster-level reproducibility')
plt.xticks(range(1, 1 + len(thresholds)), thresholds)
plt.xlabel('threshold')
plt.subplot(1, 3, 3)
plt.boxplot(clt)
plt.title('cluster-level reproducibility')
plt.xticks(range(1, 1 + len(thresholds)), thresholds)
plt.xlabel('threshold')
##############################################################################
# create an image
##############################################################################
"""
# this is commented until a new version of the code allows it
# with the adequate level of abstraction
th = 4.0
swap = False
kwargs = {'threshold':th,'csize':csize}
rmap = map_reproducibility(Functional, VarFunctional, grp_mask, ngroups,
method, swap, verbose, **kwargs)
wmap = mask.astype(np.int)
wmap[mask] = rmap
wim = Nifti1Image(wmap, affine)
wim.get_header()['descrip']= 'reproducibility map at threshold %f, \
cluster size %d'%(th,csize)
wname = path.join(write_dir,'repro.nii')
save(wim, wname)
print('Wrote a reproducibility image in %s'%wname)
"""
|
bsd-3-clause
|
SCP-028/UGA
|
protein_pka/pI/fetch_pdb.py
|
1
|
7980
|
import glob
import json
import pathlib
import subprocess
from decimal import Decimal
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as offline
import xmltodict as xd
# offline.init_notebook_mode()
ROOTDIR = "/home/jovyan/data/protein_pka"
pathlib.Path(f'{ROOTDIR}/result/pdb_meta').mkdir(parents=True, exist_ok=True)
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def reverse_scientic_notation(x):
"""Change string x back to PDB ID.
Example
-------
'5e+28' -> '5E28'
'10000000.0' -> '1E07'
"""
x = f"{Decimal(x):.0E}".split("+")
if len(x[1]) == 1:
x[1] = f"0{x[1]}"
return "".join(x)
def unpack_list_in_df(df, col_target):
# Flatten columns of lists
col_flat = [item for sublist in df[col_target] for item in sublist]
# Row numbers to repeat
lens = df[col_target].apply(len)
vals = range(df.shape[0])
ilocations = np.repeat(vals, lens)
# Replicate rows and add flattened column of lists
cols = [i for i, c in enumerate(df.columns) if c != col_target]
new_df = df.iloc[ilocations, cols].copy()
new_df[col_target] = col_flat
return new_df
# # pK values of proteins with Uniprot IDs
if not pathlib.Path(f"{ROOTDIR}/pK_fixed.csv").is_file():
pK = pd.read_csv(f"{ROOTDIR}/pka_cleaned_merged.csv")
# fix IDs automatically converted to scientific notations
pK_mask = (pK["protein"].str.len() != 4)
pK_wrong = pK[pK_mask]
pK.loc[pK_mask, "protein"] = pK_wrong["protein"].apply(reverse_scientic_notation)
pK.to_csv(f"{ROOTDIR}/pK_fixed.csv", index=False)
pK = pd.read_csv(f"{ROOTDIR}/pK_fixed.csv")
print(f'There are altogether {len(pK["protein"].unique())} unique PDB IDs.')
# # Get general PDB annotation
if not pathlib.Path(f"{ROOTDIR}/result/pdb_meta/pdb_general.json").is_file():
# downloading files using PDB's RESTful API
url = "https://www.rcsb.org/pdb/rest/describePDB?structureId="
pdb_urls = [url + ",".join(pdb) for pdb in chunks(pK.protein.unique(), 1000)]
with open(f"{ROOTDIR}/result/pdb_meta/url_general.list", "w") as f:
f.write("\n".join(pdb_urls))
r = subprocess.run(["aria2c", "--input-file",
f"{ROOTDIR}/result/pdb_meta/url.list"])
# merging downloaded files into one json
filenames = glob.iglob(f"{ROOTDIR}/result/pdb_meta/describePDB*")
ans = {}
for filename in filenames:
with open(filename, "r") as f:
pdb = xd.parse(f.read())
for node in pdb['PDBdescription']['PDB']:
ans[node["@structureId"]] = node
with open(f"{ROOTDIR}/result/pdb_meta/pdb_general.json", "w") as f:
f.write(json.dumps(ans))
PDB_general = pd.read_json(f"{ROOTDIR}/result/pdb_meta/pdb_general.json").T
PDB_general = PDB_general[PDB_general["@status"] == "CURRENT"]
PDB_general = PDB_general[["@structureId", "@deposition_date", "@expMethod", "@resolution"]].reset_index(drop=True)
print(f"The general PDB annotation for {PDB_general.shape[0]} structures were downloaded.")
# PDB_general.sample(1)
# # Get PDB entity information
if not pathlib.Path(f"{ROOTDIR}/result/pdb_meta/pdb_entity.json").is_file():
url = "https://www.rcsb.org/pdb/rest/describeMol?structureId="
pdb_urls = [url + ",".join(pdb) for pdb in chunks(pK.protein.unique(), 1000)]
with open(f"{ROOTDIR}/result/pdb_meta/url_entity.list", "w") as f:
f.write("\n".join(pdb_urls))
r = subprocess.run(["aria2c", "--input-file",
f"{ROOTDIR}/result/pdb_meta/url.list"])
filenames = glob.iglob(f"{ROOTDIR}/result/pdb_meta/describeMol*")
ans = {}
for filename in filenames:
with open(filename, "r") as f:
pdb = xd.parse(f.read())
for node in pdb["molDescription"]["structureId"]:
ans[node["@id"]] = node
with open(f"{ROOTDIR}/result/pdb_meta/pdb_entity.json", "w") as f:
f.write(json.dumps(ans))
PDB_entity = pd.read_json(f"{ROOTDIR}/result/pdb_meta/pdb_entity.json").T
print(f"The PDB entity annotation for {PDB_entity.shape[0]} structures were downloaded.")
PDB_entity_single = PDB_entity[PDB_entity["polymer"].apply(lambda x: isinstance(x, dict))]
PDB_entity_multi = PDB_entity[PDB_entity["polymer"].apply(lambda x: isinstance(x, list))]
# Unpack list in cell
PDB_entity_multi = unpack_list_in_df(PDB_entity_multi, "polymer")
PDB_entity = pd.concat([PDB_entity_single, PDB_entity_multi], axis=0).reset_index(drop=True)
PDB_entity["@length"] = PDB_entity["polymer"].map(lambda x: x["@length"])
PDB_entity["@chain"] = PDB_entity["polymer"].map(lambda x: x["chain"]) # dict
PDB_entity["@weight"] = PDB_entity["polymer"].map(lambda x: x["@weight"])
PDB_entity["@Taxonomy"] = [x["Taxonomy"] if "Taxonomy" in x else np.nan for x in PDB_entity["polymer"]]
PDB_entity["@synonym"] = [x["synonym"] if "synonym" in x else np.nan for x in PDB_entity["polymer"]]
PDB_entity["@uniprot"] = [x["macroMolecule"] if "macroMolecule" in x else np.nan for x in PDB_entity["polymer"]]
PDB_entity = PDB_entity.drop("polymer", axis=1).dropna()
# represent `@chain` in a way that's easier to manipulate
PDB_entity["@chain"] = [[x] if isinstance(x, dict) else x for x in PDB_entity["@chain"]]
PDB_entity["@chain"] = PDB_entity["@chain"].map(lambda x: "".join(sorted([y["@id"] for y in x])))
PDB_entity["@chain"] = PDB_entity["@chain"].str.upper()
# same goes for `@synonym`
PDB_entity["@synonym"] = [[x] if isinstance(x, dict) else x for x in PDB_entity["@synonym"]]
PDB_entity["@synonym"] = PDB_entity["@synonym"].map(lambda x: "".join(sorted([y["@name"] for y in x])))
PDB_entity["@synonym"] = PDB_entity["@synonym"].str.upper()
# and uniprot
PDB_entity["@uniprot"] = [[x] if isinstance(x, dict) else x for x in PDB_entity["@uniprot"]]
PDB_entity["@uniprot"] = PDB_entity["@uniprot"].map(lambda x: [y["accession"]["@id"] for y in x])
PDB_entity = unpack_list_in_df(PDB_entity, "@uniprot")
print(f"After unpacking, there are {PDB_entity.shape[0]} entries in PDB_entity.")
# # Convert to Ensembl Gene ID
# ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/by_organism/HUMAN_9606_idmapping.dat.gz
annotation = pd.read_csv("/home/jovyan/data/annotation/uniprot_id_map/HUMAN_9606_idmapping.csv")
uniprot_ensembl = annotation[annotation["ID_type"] == "Ensembl"]
PDB = pd.merge(PDB_entity, uniprot_ensembl, left_on=PDB_entity["@uniprot"], right_on=uniprot_ensembl["UniProtKB_AC"])
# # Combine previous data frames
PDB = PDB[PDB["@Taxonomy"].astype(str).str.contains("Homo sapiens")]
PDB = PDB[["@uniprot", "@id", "ID", "@length", "@weight", "@chain", "@synonym"]]
PDB = pd.merge(PDB, PDB_general, left_on=PDB["@id"], right_on=PDB_general["@structureId"])
PDB = PDB.drop(["key_0", "@structureId"], axis=1).reset_index(drop=True)
PDB = PDB.rename(columns={"ID": "Ensembl"})
print(f'There are {len(PDB["Ensembl"].unique())} unique Ensembl IDs and {len(PDB["@id"].unique())} unique PDB structures.')
print(f'{len(PDB_entity[~PDB_entity["@id"].isin(PDB["@id"])]["@id"].unique())} PDB entries don\'t have matching Ensembl IDs / are\'t human proteins.')
# # Remove duplicates
# Same gene, same structure, same chains (technical replicates)
PDB = PDB.sort_values("@length", ascending=False).drop_duplicates(keep="first")
# each unique protein, only keep the ones with max(@length)
PDB = PDB[PDB["@length"] == PDB.groupby(["@uniprot", "@chain", "Ensembl"])["@length"].transform(max)]
# remaining duplicates, keep the one with latest deposition date
PDB = PDB[PDB["@deposition_date"] == PDB.groupby(["@uniprot", "@chain", "Ensembl"])["@deposition_date"].transform(max)]
# ...highest resolution
PDB = PDB[PDB["@resolution"] == PDB.groupby(["@uniprot", "@chain", "Ensembl"])["@resolution"].transform(min)]
print(f'There are {len(PDB["@id"].unique())} unique PDB structures, and {len(PDB["Ensembl"].unique())} unique Ensembl genes')
|
apache-2.0
|
togawa28/mousestyles
|
mousestyles/path_diversity/detect_noise.py
|
3
|
3295
|
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import pandas as pd
from mousestyles.path_diversity import compute_angles
def detect_noise(movement, paths, angle_threshold, delta_t):
r"""
Return a list object containing boolean values at points
where measurement noise is detected and will be passed to
a smoothing function
Parameters
----------
movement : pandas.DataFrame
CT, CX, CY coordinates and homebase status
for the unique combination of strain, mouse and day
paths index : a list containing the indices for all paths
angle_threshold : float
positive number indicating the minimum turning angle to flag as noise
delta_t : float
positive number indicating the delta_time interval
Returns
-------
noise index : a pandas series containing the indices at which
noise, as defined by input parameters, is detected
Examples
--------
>>> movement = data.load_movement(1, 2, 1)
>>> paths = path_diversity.path_index(movement, 1, 1)
>>> noise = detect_noise(movement, paths, 135, .1)
"""
# check if all inputs are positive
conditions_value = [angle_threshold <= 0, delta_t <= 0]
if any(conditions_value):
raise ValueError("Input values need to be positive")
if not isinstance(movement, pd.core.frame.DataFrame):
raise TypeError("Movement must be pandas DataFrame")
if set(movement.keys()) != {'isHB', 't', 'x', 'y'}:
raise ValueError(
"The keys of movement must be 't', 'x', 'y', and 'isHB'")
if len(movement) <= 1:
raise ValueError("Movement must contain at least 2 rows")
noise_index = 1
noise_path = []
noise_path = pd.Series(noise_path)
current_noise = False
for path in paths:
path_obj = movement[path[0]:path[1] + 1]
if len(path_obj) > 3:
path_obj['angles'] = compute_angles(path_obj, False)
path_obj['sharp_angle'] = path_obj['angles'] > angle_threshold
path_obj['noise'] = 0
# Note: The above DataFrame manipulations result in a
# SettingWithCopyWarning. The warning persists even after
# attempting the following format:
# .loc[row_indexer,col_indexer] = value. Despite this,
# the output of the function is working as intended.
for i in range(0, len(path_obj) - 1):
if path_obj['sharp_angle'].iloc[i]:
if path_obj['sharp_angle'].iloc[i + 1]:
if path_obj['t'].iloc[
i + 1] - path_obj['t'].iloc[i] < delta_t:
path_obj['noise'].iloc[i] = noise_index
path_obj['noise'].iloc[i + 1] = noise_index
current_noise = True
elif current_noise:
noise_index += 1
current_noise = False
elif current_noise:
noise_index += 1
current_noise = False
else:
path_obj['noise'] = 0
noise_path = noise_path.append(path_obj.noise)
return noise_path
|
bsd-2-clause
|
MechCoder/scikit-learn
|
examples/model_selection/plot_randomized_search.py
|
47
|
3287
|
"""
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from scipy.stats import randint as sp_randint
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(2, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.cv_results_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.cv_results_['params'])))
report(grid_search.cv_results_)
|
bsd-3-clause
|
shishaochen/TensorFlow-0.8-Win
|
tensorflow/contrib/learn/python/learn/tests/test_early_stopping.py
|
5
|
2501
|
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import random
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
class EarlyStoppingTest(tf.test.TestCase):
def testIrisES(self):
random.seed(42)
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=0.2)
val_monitor = learn.monitors.ValidationMonitor(X_val, y_val, n_classes=3)
# classifier without early stopping - overfitting
classifier1 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3,
steps=1000)
classifier1.fit(X_train, y_train)
score1 = accuracy_score(y_test, classifier1.predict(X_test))
# classifier with early stopping - improved accuracy on testing set
classifier2 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3,
steps=1000)
classifier2.fit(X_train, y_train, val_monitor)
score2 = accuracy_score(y_test, classifier2.predict(X_test))
# self.assertGreater(score2, score1, "No improvement using early stopping.")
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
biosustain/marsi
|
marsi/cobra/strain_design/design_methods.py
|
1
|
8836
|
# Copyright 2016 Chr. Hansen A/S
# Copyright 2016 The Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from cameo.core.utils import get_reaction_for
from cameo.flux_analysis.analysis import find_essential_metabolites
from cameo.flux_analysis.simulation import fba
from cameo.strain_design import OptKnock, OptGene, DifferentialFVA
from cameo.strain_design.heuristic.evolutionary.objective_functions import biomass_product_coupled_yield
from cobra.core.model import Model
from pandas import DataFrame
from marsi.cobra.strain_design.evolutionary import OptMet
from marsi.cobra.strain_design.post_processing import replace_design
from marsi.cobra.utils import CURRENCY_METABOLITES
class GenericMARSIDesignMethod(object):
"""
Generic wrapper for Metabolite Analog design method.
This one just runs a optimization method
Example
-------
>>> from marsi.cobra import strain_design
>>> from cameo import models
>>> from cameo.strain_design import OptGene
>>> designer = strain_design.GenericMARSIDesignMethod(model=models.bigg.iJO1366)
>>> designer.optimize_with_reaction("succ_e", max_interventions=5, substrate="EX_glc__D_e",
>>> biomass="BIOMASS_Ec_iJO1366_core_53p95M", max_results=25, design_method=OptGene, manipulation_type="reactions")
"""
def __init__(self, model=None, nearest_neighbors_model=None, min_tanimoto=0.75, currency_metabolites=None,
essential_metabolites=None):
assert isinstance(model, Model)
self.model = model
self.nearest_neighbors_model = nearest_neighbors_model
self.min_tanimoto = min_tanimoto
self.currency_metabolites = currency_metabolites or CURRENCY_METABOLITES
if essential_metabolites is None:
essential_metabolites = []
self.essential_metabolites = find_essential_metabolites(model) + essential_metabolites
for i, m in enumerate(self.essential_metabolites):
if isinstance(m, str):
self.essential_metabolites[i] = model.metabolites.get_by_id(m)
def optimize_with_reaction(self, target, max_interventions=1, substrate=None,
biomass=None, design_method=OptKnock, max_results=100,
non_essential_metabolites=False, max_evaluations=20000, **design_kwargs):
raise NotImplementedError
def optimize_with_metabolites(self, target, max_interventions=1, substrate=None, biomass=None,
max_results=100, non_essential_metabolites=False, max_evaluations=20000,
**design_kwargs):
raise NotImplementedError
def essential_metabolites_reactions(self):
essential_metabolites = find_essential_metabolites(self.model)
reactions = set()
for metabolite in essential_metabolites:
reactions.update(metabolite.reactions)
return reactions
def _evaluate_designs(self, strain_designs, objective_function):
evaluated_designs = DataFrame(columns=["design", "fitness"])
for i, design in enumerate(strain_designs):
with self.model:
design.apply(self.model)
solution = fba(self.model)
fitness = objective_function(self.model, solution, design.targets)
evaluated_designs.loc[i] = [design, fitness]
return evaluated_designs
class RandomMutagenesisDesign(GenericMARSIDesignMethod):
"""
Apply only knockout like designs where total loss of functions are expected.
"""
def optimize_with_reaction(self, target, max_interventions=1, substrate=None,
biomass=None, design_method="optgene", max_results=100,
non_essential_metabolites=False, max_evaluations=20000, **designer_kwargs):
target_flux = get_reaction_for(self.model, target)
exclude_reactions = []
if non_essential_metabolites:
exclude_reactions = self.essential_metabolites_reactions()
if 'essential_reactions' in designer_kwargs:
designer_kwargs['essential_reactions'] += exclude_reactions
else:
designer_kwargs['essential_reactions'] = exclude_reactions
bpcy = biomass_product_coupled_yield(biomass, target_flux, substrate)
if design_method == "optgene":
designer = OptGene(self.model, **designer_kwargs)
knockouts = designer.run(max_knockouts=max_interventions, biomass=biomass, substrate=substrate,
target=target_flux, max_results=max_results, max_evaluations=max_evaluations,
use_nullspace_simplification=False)
elif design_method == "optknock":
designer = OptKnock(self.model, **designer_kwargs)
knockouts = designer.run(max_knockouts=max_interventions, biomass=biomass, substrate=substrate,
target=target_flux, max_results=max_results)
else:
raise ValueError("'design_method' can be one of 'optgene' or 'optknock'")
designs = self._evaluate_designs(iter(knockouts), bpcy)
anti_metabolites_design = DataFrame()
for i_, row in designs.iterrows():
_anti_metabolites_design = replace_design(self.model, row.design, row.fitness, bpcy, fba, {},
ignore_metabolites=self.currency_metabolites,
essential_metabolites=self.essential_metabolites)
anti_metabolites_design = anti_metabolites_design.append(_anti_metabolites_design, ignore_index=True)
return anti_metabolites_design
def optimize_with_metabolites(self, target, max_interventions=1, substrate=None, biomass=None,
max_results=100, max_evaluations=20000, **design_kwargs):
target_flux = get_reaction_for(self.model, target)
designer = OptMet(model=self.model, essential_metabolites=CURRENCY_METABOLITES, **design_kwargs)
knockouts = designer.run(max_knockouts=max_interventions, biomass=biomass, substrate=substrate,
target=target_flux, max_results=max_results, max_evaluations=max_evaluations)
return knockouts.data_frame
class ALEDesign(GenericMARSIDesignMethod):
"""
Apply both knockout and flux modulation. The strains will be selected via growth rate proxy.
"""
def optimize_with_reaction(self, target, max_interventions=1, substrate=None,
biomass=None, design_method="differential_fva", max_results=100,
non_essential_metabolites=False, **designer_kwargs):
target_flux = get_reaction_for(self.model, target)
bpcy = biomass_product_coupled_yield(biomass, target_flux, substrate)
designer = DifferentialFVA(self.model, objective=target, variables=[biomass],
points=max_results, **designer_kwargs)
evaluated_designs = DataFrame(columns=["strain_designs", "fitness"])
for i, design in enumerate(designer.run()):
with self.model:
design.apply(self.model)
solution = self.model.optimize()
fitness = bpcy(self.model, solution, design.targets)
evaluated_designs.loc[i] = [design, fitness]
anti_metabolites_design = DataFrame()
for _, row in evaluated_designs.iterrows():
_anti_metabolites_design = replace_design(self.model, row.design, row.fitness, bpcy, fba, {},
ignore_metabolites=self.currency_metabolites,
essential_metabolites=self.essential_metabolites)
anti_metabolites_design = anti_metabolites_design.append(_anti_metabolites_design, ignore_index=True)
return anti_metabolites_design
def optimize_with_metabolites(self, target, max_interventions=1, substrate=None, biomass=None,
max_results=100, max_evaluations=20000, **design_kwargs):
raise NotImplementedError
|
apache-2.0
|
LouisePaulDelvaux/openfisca-france-data
|
openfisca_france_data/input_data_builders/build_openfisca_indirect_taxation_survey_data/step_0_4_homogeneisation_revenus_menages.py
|
1
|
15961
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import pandas
from openfisca_survey_manager.survey_collections import SurveyCollection
from openfisca_france_data import default_config_files_directory as config_files_directory
from openfisca_france_data.temporary import TemporaryStore
log = logging.getLogger(__name__)
temporary_store = TemporaryStore.create(file_name = "indirect_taxation_tmp")
def build_homogeneisation_revenus_menages(year = None):
"""Build menage consumption by categorie fiscale dataframe """
assert year is not None
# Load data
bdf_survey_collection = SurveyCollection.load(
collection = 'budget_des_familles', config_files_directory = config_files_directory)
survey = bdf_survey_collection.get_survey('budget_des_familles_{}'.format(year))
# **********************************************************************************************************************
# ********************************* HOMOGENEISATION DES DONNEES SUR LES REVENUS DES MENAGES ****************************
# ************************************ CALCUL D'UN PROXI DU REVENU DISPONIBLE DES MENAGES ******************************
# **********************************************************************************************************************
#
# ********************HOMOGENEISATION DES BASES DE RESSOURCES***************************
# /* La base 95 permet de distinguer taxe d'habitation et impôts fonciers. On calcule leur montant relatif pour l'appliquer à 00 et 05 */
if year == 1995:
menrev = survey.get_values(
table = "menrev",
variables = [
'revtot', 'ir', 'irbis', 'imphab', 'impfon', 'revaid', 'revsal', 'revind', 'revsec', 'revret',
'revcho', 'revfam', 'revlog', 'revinv', 'revrmi', 'revpat', 'mena', 'ponderr'
],
)
menage = survey.get_values(
table = "socioscm",
variables = ['exdep', 'exrev', 'mena']
)
menage.set_index('mena')
menrev = menrev.merge(menage, left_index = True, right_index = True)
# cette étape de ne garder que les données dont on est sûr de la qualité et de la véracité
# exdep = 1 si les données sont bien remplies pour les dépenses du ménage
# exrev = 1 si les données sont bien remplies pour les revenus du ménage
menrev = menrev[(menrev.exdep == 1) & (menrev.exrev == 1)]
menrev['foncier_hab'] = menrev.imphab + menrev.impfon
menrev['part_IMPHAB'] = menrev.imphab / menrev.foncier_hab
menrev['part_IMPFON'] = menrev.impfon / menrev.foncier_hab
menrev['revsoc'] = (
menrev.revret + menrev.revcho + menrev.revfam + menrev.revlog + menrev.revinv + menrev.revrmi
)
for variable in ['revcho', 'revfam', 'revinv', 'revlog', 'revret', 'revrmi']:
del menrev[variable]
menrev['revact'] = menrev['revsal'] + menrev['revind'] + menrev['revsec']
menrev.rename(
columns = dict(
revpat = "revpat",
impfon = "impfon",
imphab = "imphab",
revaid = "somme_obl_recue",
),
inplace = True
)
menrev['impot_revenu'] = menrev['ir'] + menrev['irbis']
rev_disp = survey.get_values(
table = "menrev",
variables = ['revtot', 'revret', 'revcho', 'revfam', 'revlog', 'revinv', 'revrmi', 'imphab', 'impfon', 'revaid', 'revsal', 'revind', 'revsec', 'revpat', 'mena', 'ponderr', 'ir','irbis' ],
)
rev_disp.set_index('mena', inplace=True)
menage2 = survey.get_values(
table = "socioscm",
variables = ['exdep', 'exrev', 'mena']
)
menage2.set_index('mena', inplace = True)
rev_disp = menage2.merge(rev_disp, left_index = True, right_index = True)
rev_disp = rev_disp[(rev_disp.exrev == 1) & (rev_disp.exdep == 1)]
rev_disp['revsoc'] = rev_disp['revret'] + rev_disp['revcho'] + rev_disp['revfam'] + rev_disp['revlog'] + rev_disp['revinv'] + rev_disp['revrmi']
rev_disp['impot_revenu'] = rev_disp['ir'] + rev_disp['irbis']
rev_disp.rename(
columns = dict(
revaid = 'somme_obl_recue',
),
inplace = True
)
rev_disp.somme_obl_recue = rev_disp.somme_obl_recue.fillna(0)
rev_disp['revact'] = rev_disp['revsal'] + rev_disp['revind'] + rev_disp['revsec']
rev_disp['revtot'] = rev_disp['revact'] + rev_disp['revpat'] + rev_disp['revsoc'] + rev_disp['somme_obl_recue']
rev_disp['revact'] = rev_disp['revsal'] + rev_disp['revind'] + rev_disp['revsec']
rev_disp.rename(
columns = dict(
ponderr = "pondmen",
mena = "ident_men",
revind = "act_indpt",
revsal = "salaires",
revsec = "autres_rev",
),
inplace = True
)
rev_disp['autoverses'] = '0'
rev_disp['somme_libre_recue'] = '0'
rev_disp['autres_ress'] = '0'
#
# /* Le revenu disponible se calcule à partir de revtot à laquelle on retrancher la taxe d'habitation
# et l'impôt sur le revenu, plus éventuellement les CSG et CRDS.
# La variable revtot est la somme des revenus d'activité, sociaux, du patrimoine et d'aide. */
#
rev_disp['rev_disponible'] = rev_disp.revtot - rev_disp.impot_revenu - rev_disp.imphab
loyers_imputes = temporary_store['depenses_bdf_{}'.format(year)]
loyers_imputes.rename(
columns = {"0411": "loyer_impute"},
inplace = True,
)
rev_dispbis = loyers_imputes.merge(rev_disp, left_index = True, right_index = True)
rev_disp['rev_disp_loyerimput'] = rev_disp['rev_disponible'] - rev_dispbis['loyer_impute']
for var in ['somme_obl_recue', 'act_indpt', 'revpat', 'salaires', 'autres_rev', 'rev_disponible', 'impfon', 'imphab', 'revsoc', 'revact', 'impot_revenu', 'revtot', 'rev_disp_loyerimput'] :
rev_disp[var] = rev_disp[var] / 6.55957
# * CONVERSION EN EUROS
temporary_store["revenus_{}".format(year)] = rev_disp
elif year == 2000:
# TODO: récupérer plutôt les variables qui viennent de la table dépenses (dans temporary_store)
consomen = survey.get_values(
table = "consomen",
variables = ['c13141', 'c13111', 'c13121', 'c13131', 'pondmen', 'ident'],
)
rev_disp = consomen.sort(columns = ['ident'])
del consomen
menage = survey.get_values(
table = "menage",
variables = ['ident', 'revtot', 'revact', 'revsoc', 'revpat', 'rev70', 'rev71', 'revt_d', 'pondmen', 'rev10', 'rev11', 'rev20', 'rev21'],
).sort(columns = ['ident'])
revenus = menage.join(rev_disp, how = "outer", rsuffix = "rev_disp")
revenus.rename(
columns = dict(
c13111 = "impot_res_ppal",
c13141 = "impot_revenu",
c13121 = "impot_autres_res",
rev70 = "somme_obl_recue",
rev71 = "somme_libre_recue",
revt_d= "autres_ress",
ident = "ident_men",
rev10 = "act_indpt",
rev11 = "autoverses",
rev20 = "salaires",
rev21 = "autres_rev",
),
inplace = True
)
var_to_ints = ['pondmen','impot_autres_res','impot_res_ppal','pondmenrev_disp','c13131']
for var_to_int in var_to_ints:
revenus[var_to_int] = revenus[var_to_int].astype(int)
revenus['imphab'] = 0.65 * (revenus.impot_res_ppal + revenus.impot_autres_res)
revenus['impfon'] = 0.35 * (revenus.impot_res_ppal + revenus.impot_autres_res)
loyers_imputes = temporary_store["depenses_bdf_{}".format(year)]
variables = ["0421"]
loyers_imputes = loyers_imputes[variables]
loyers_imputes.rename(
columns = {"0421": "loyer_impute"},
inplace = True,
)
temporary_store["loyers_imputes_{}".format(year)] = loyers_imputes
loyers_imputes.index = loyers_imputes.index.astype('int')
revenus = revenus.set_index('ident_men')
revenus.index = revenus.index.astype('int')
revenus = revenus.merge(loyers_imputes, left_index = True, right_index = True)
revenus['rev_disponible'] = revenus.revtot - revenus.impot_revenu - revenus.imphab
revenus['rev_disponible'] = revenus['rev_disponible'] * (revenus['rev_disponible'] >= 0)
revenus['rev_disp_loyerimput'] = revenus.rev_disponible + revenus.loyer_impute
var_to_ints = ['loyer_impute']
for var_to_int in var_to_ints:
revenus[var_to_int] = revenus[var_to_int].astype(int)
temporary_store["revenus_{}".format(year)] = revenus
elif year == 2005:
c05d = survey.get_values(
table = "c05d",
variables = ['c13111', 'c13121', 'c13141', 'pondmen', 'ident_men'],
)
rev_disp = c05d.sort(columns = ['ident_men'])
del c05d
menage = survey.get_values(
table = "menage",
variables = ['ident_men', 'revtot', 'revact', 'revsoc', 'revpat', 'rev700_d', 'rev701_d',
'rev999_d', 'rev100_d', 'rev101_d', 'rev200_d', 'rev201_d'],
).sort(columns = ['ident_men'])
rev_disp.set_index('ident_men', inplace = True)
menage.set_index('ident_men', inplace = True)
revenus = pandas.concat([menage, rev_disp], axis = 1)
revenus.rename(
columns = dict(
rev100_d = "act_indpt",
rev101_d = "autoverses",
rev200_d = "salaires",
rev201_d = "autres_rev",
rev700_d = "somme_obl_recue",
rev701_d = "somme_libre_recue",
rev999_d = "autres_ress",
c13111 = "impot_res_ppal",
c13141 = "impot_revenu",
c13121 = "impot_autres_res",
),
inplace = True
)
# * Ces pondérations (0.65 0.35) viennent de l'enquête BdF 1995 qui distingue taxe d'habitation et impôts fonciers. A partir de BdF 1995,
# * on a calculé que la taxe d'habitation représente en moyenne 65% des impôts locaux, et que les impôts fonciers en représentenr 35%.
# * On applique ces taux aux enquêtes 2000 et 2005.
# gen imphab= 0.65*(impot_res_ppal + impot_autres_res)
# gen impfon= 0.35*(impot_res_ppal + impot_autres_res)
# drop impot_autres_res impot_res_ppal
revenus['imphab'] = 0.65 * (revenus.impot_res_ppal + revenus.impot_autres_res)
revenus['impfon'] = 0.35 * (revenus.impot_res_ppal + revenus.impot_autres_res)
del revenus['impot_autres_res']
del revenus['impot_res_ppal']
# * Calculer le revenu disponible avec et sans le loyer imputé
loyers_imputes = temporary_store["depenses_bdf_{}".format(year)]
variables = ["0421"]
loyers_imputes = loyers_imputes[variables]
loyers_imputes.rename(
columns = {"0421": "loyer_impute"},
inplace = True,
)
temporary_store["loyers_imputes_{}".format(year)] = loyers_imputes
revenus = revenus.merge(loyers_imputes, left_index = True, right_index = True)
revenus['rev_disponible'] = revenus.revtot - revenus.impot_revenu - revenus.imphab
revenus['rev_disponible'] = revenus['rev_disponible'] * (revenus['rev_disponible'] >= 0)
revenus['rev_disp_loyerimput'] = revenus.rev_disponible + revenus.loyer_impute
temporary_store["revenus_{}".format(year)] = revenus
elif year == 2011:
try:
c05 = survey.get_values(
table = "C05",
variables = ['c13111', 'c13121', 'c13141', 'pondmen', 'ident_me'],
)
except:
c05 = survey.get_values(
table = "c05",
variables = ['c13111', 'c13121', 'c13141', 'pondmen', 'ident_me'],
)
rev_disp = c05.sort(columns = ['ident_me'])
del c05
try:
menage = survey.get_values(
table = "MENAGE",
variables = ['ident_me', 'revtot', 'revact', 'revsoc', 'revpat', 'rev700', 'rev701', 'rev999', 'revindep', 'salaires'],
).sort(columns = ['ident_me'])
except:
menage = survey.get_values(
table = "menage",
variables = ['ident_me', 'revtot', 'revact', 'revsoc', 'revpat', 'rev700', 'rev701', 'rev999', 'revindep', 'salaires'],
).sort(columns = ['ident_me'])
# variables = ['ident_me', 'revtot', 'revact', 'revsoc', 'revpat', 'rev700', 'rev701', 'rev999', 'revindep', 'rev101_d', 'salaires', 'rev201'],
rev_disp.set_index('ident_me', inplace = True)
menage.set_index('ident_me', inplace = True)
revenus = pandas.concat([menage, rev_disp], axis = 1)
revenus.rename(
columns = dict(
revindep = "act_indpt",
#TODO: trouver ces revenus commentés dans bdf 2011
# rev101_d = "autoverses",
salaires = "salaires",
# rev201_d = "autres_rev",
rev700 = "somme_obl_recue",
rev701 = "somme_libre_recue",
rev999 = "autres_ress",
c13111 = "impot_res_ppal",
c13141 = "impot_revenu",
c13121 = "impot_autres_res",
),
inplace = True
)
revenus['imphab'] = 0.65 * (revenus.impot_res_ppal + revenus.impot_autres_res)
revenus['impfon'] = 0.35 * (revenus.impot_res_ppal + revenus.impot_autres_res)
del revenus['impot_autres_res']
del revenus['impot_res_ppal']
loyers_imputes = temporary_store["depenses_bdf_{}".format(year)]
variables = ["0421"]
loyers_imputes = loyers_imputes[variables]
loyers_imputes.rename(
columns = {"0421": "loyer_impute"},
inplace = True,
)
temporary_store["loyers_imputes_{}".format(year)] = loyers_imputes
revenus = revenus.merge(loyers_imputes, left_index = True, right_index = True)
revenus['rev_disponible'] = revenus.revtot - revenus.impot_revenu - revenus.imphab
revenus['rev_disponible'] = revenus['rev_disponible'] * (revenus['rev_disponible'] >= 0)
revenus['rev_disp_loyerimput'] = revenus.rev_disponible + revenus.loyer_impute
temporary_store["revenus_{}".format(year)] = revenus
if __name__ == '__main__':
import sys
import time
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
deb = time.clock()
year = 2000
build_homogeneisation_revenus_menages(year = year)
log.info("step_0_4_homogeneisation_revenus_menages duration is {}".format(time.clock() - deb))
|
agpl-3.0
|
nl8590687/ASRT_SpeechRecognition
|
general_function/file_wav.py
|
1
|
9910
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2016-2099 Ailemon.net
#
# This file is part of ASRT Speech Recognition Tool.
#
# ASRT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# ASRT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ASRT. If not, see <https://www.gnu.org/licenses/>.
# ============================================================================
import os
import wave
import numpy as np
import matplotlib.pyplot as plt
import math
import time
from python_speech_features import mfcc
from python_speech_features import delta
from python_speech_features import logfbank
from scipy.fftpack import fft
def read_wav_data(filename):
'''
读取一个wav文件,返回声音信号的时域谱矩阵和播放时间
'''
wav = wave.open(filename,"rb") # 打开一个wav格式的声音文件流
num_frame = wav.getnframes() # 获取帧数
num_channel=wav.getnchannels() # 获取声道数
framerate=wav.getframerate() # 获取帧速率
num_sample_width=wav.getsampwidth() # 获取实例的比特宽度,即每一帧的字节数
str_data = wav.readframes(num_frame) # 读取全部的帧
wav.close() # 关闭流
wave_data = np.fromstring(str_data, dtype = np.short) # 将声音文件数据转换为数组矩阵形式
wave_data.shape = -1, num_channel # 按照声道数将数组整形,单声道时候是一列数组,双声道时候是两列的矩阵
wave_data = wave_data.T # 将矩阵转置
#wave_data = wave_data
return wave_data, framerate
def GetMfccFeature(wavsignal, fs):
# 获取输入特征
feat_mfcc=mfcc(wavsignal[0],fs)
feat_mfcc_d=delta(feat_mfcc,2)
feat_mfcc_dd=delta(feat_mfcc_d,2)
# 返回值分别是mfcc特征向量的矩阵及其一阶差分和二阶差分矩阵
wav_feature = np.column_stack((feat_mfcc, feat_mfcc_d, feat_mfcc_dd))
return wav_feature
def GetFrequencyFeature(wavsignal, fs):
if(16000 != fs):
raise ValueError('[Error] ASRT currently only supports wav audio files with a sampling rate of 16000 Hz, but this audio is ' + str(fs) + ' Hz. ')
# wav波形 加时间窗以及时移10ms
time_window = 25 # 单位ms
data_input = []
#print(int(len(wavsignal[0])/fs*1000 - time_window) // 10)
wav_length = len(wavsignal[0]) # 计算一条语音信号的原始长度
range0_end = int(len(wavsignal[0])/fs*1000 - time_window) // 10 # 计算循环终止的位置,也就是最终生成的窗数
for i in range(0, range0_end):
p_start = i * 160
p_end = p_start + 400
data_line = []
for j in range(p_start, p_end):
data_line.append(wavsignal[0][j])
#print('wavsignal[0][j]:\n',wavsignal[0][j])
#data_line = abs(fft(data_line)) / len(wavsignal[0])
data_line = fft(data_line) / wav_length
data_line2 = []
for fre_sig in data_line:
# 分别取出频率信号的实部和虚部作为语音信号的频率特征
# 直接使用复数的话,之后会被numpy将虚部丢弃,造成信息丢失
#print('fre_sig:\n',fre_sig)
data_line2.append(fre_sig.real)
data_line2.append(fre_sig.imag)
data_input.append(data_line2[0:len(data_line2)//2]) # 除以2是取一半数据,因为是对称的
#print('data_input:\n',data_input)
#print('data_line:\n',data_line)
#print(len(data_input),len(data_input[0]))
return data_input
def GetFrequencyFeature2(wavsignal, fs):
if(16000 != fs):
raise ValueError('[Error] ASRT currently only supports wav audio files with a sampling rate of 16000 Hz, but this audio is ' + str(fs) + ' Hz. ')
# wav波形 加时间窗以及时移10ms
time_window = 25 # 单位ms
window_length = fs / 1000 * time_window # 计算窗长度的公式,目前全部为400固定值
wav_arr = np.array(wavsignal)
#wav_length = len(wavsignal[0])
wav_length = wav_arr.shape[1]
range0_end = int(len(wavsignal[0])/fs*1000 - time_window) // 10 # 计算循环终止的位置,也就是最终生成的窗数
data_input = np.zeros((range0_end, 200), dtype = np.float) # 用于存放最终的频率特征数据
data_line = np.zeros((1, 400), dtype = np.float)
for i in range(0, range0_end):
p_start = i * 160
p_end = p_start + 400
data_line = wav_arr[0, p_start:p_end]
'''
x=np.linspace(0, 400 - 1, 400, dtype = np.int64)
w = 0.54 - 0.46 * np.cos(2 * np.pi * (x) / (400 - 1) ) # 汉明窗
data_line = data_line * w # 加窗
'''
data_line = np.abs(fft(data_line)) / wav_length
data_input[i]=data_line[0:200] # 设置为400除以2的值(即200)是取一半数据,因为是对称的
#print(data_input.shape)
return data_input
x=np.linspace(0, 400 - 1, 400, dtype = np.int64)
w = 0.54 - 0.46 * np.cos(2 * np.pi * (x) / (400 - 1) ) # 汉明窗
def GetFrequencyFeature3(wavsignal, fs):
if(16000 != fs):
raise ValueError('[Error] ASRT currently only supports wav audio files with a sampling rate of 16000 Hz, but this audio is ' + str(fs) + ' Hz. ')
# wav波形 加时间窗以及时移10ms
time_window = 25 # 单位ms
window_length = fs / 1000 * time_window # 计算窗长度的公式,目前全部为400固定值
wav_arr = np.array(wavsignal)
#wav_length = len(wavsignal[0])
wav_length = wav_arr.shape[1]
range0_end = int(len(wavsignal[0])/fs*1000 - time_window) // 10 # 计算循环终止的位置,也就是最终生成的窗数
data_input = np.zeros((range0_end, 200), dtype = np.float) # 用于存放最终的频率特征数据
data_line = np.zeros((1, 400), dtype = np.float)
for i in range(0, range0_end):
p_start = i * 160
p_end = p_start + 400
data_line = wav_arr[0, p_start:p_end]
data_line = data_line * w # 加窗
data_line = np.abs(fft(data_line)) / wav_length
data_input[i]=data_line[0:200] # 设置为400除以2的值(即200)是取一半数据,因为是对称的
#print(data_input.shape)
data_input = np.log(data_input + 1)
return data_input
def GetFrequencyFeature4(wavsignal, fs):
'''
主要是用来修正3版的bug
'''
if(16000 != fs):
raise ValueError('[Error] ASRT currently only supports wav audio files with a sampling rate of 16000 Hz, but this audio is ' + str(fs) + ' Hz. ')
# wav波形 加时间窗以及时移10ms
time_window = 25 # 单位ms
window_length = fs / 1000 * time_window # 计算窗长度的公式,目前全部为400固定值
wav_arr = np.array(wavsignal)
#wav_length = len(wavsignal[0])
wav_length = wav_arr.shape[1]
range0_end = int(len(wavsignal[0])/fs*1000 - time_window) // 10 + 1 # 计算循环终止的位置,也就是最终生成的窗数
data_input = np.zeros((range0_end, window_length // 2), dtype = np.float) # 用于存放最终的频率特征数据
data_line = np.zeros((1, window_length), dtype = np.float)
for i in range(0, range0_end):
p_start = i * 160
p_end = p_start + 400
data_line = wav_arr[0, p_start:p_end]
data_line = data_line * w # 加窗
data_line = np.abs(fft(data_line)) / wav_length
data_input[i]=data_line[0: window_length // 2] # 设置为400除以2的值(即200)是取一半数据,因为是对称的
#print(data_input.shape)
data_input = np.log(data_input + 1)
return data_input
def wav_scale(energy):
'''
语音信号能量归一化
'''
means = energy.mean() # 均值
var=energy.var() # 方差
e=(energy-means)/math.sqrt(var) # 归一化能量
return e
def wav_scale2(energy):
'''
语音信号能量归一化
'''
maxnum = max(energy)
e = energy / maxnum
return e
def wav_scale3(energy):
'''
语音信号能量归一化
'''
for i in range(len(energy)):
#if i == 1:
# #print('wavsignal[0]:\n {:.4f}'.format(energy[1]),energy[1] is int)
energy[i] = float(energy[i]) / 100.0
#if i == 1:
# #print('wavsignal[0]:\n {:.4f}'.format(energy[1]),energy[1] is int)
return energy
def wav_show(wave_data, fs): # 显示出来声音波形
time = np.arange(0, len(wave_data)) * (1.0/fs) # 计算声音的播放时间,单位为秒
# 画声音波形
#plt.subplot(211)
plt.plot(time, wave_data)
#plt.subplot(212)
#plt.plot(time, wave_data[1], c = "g")
plt.show()
def get_wav_list(filename):
'''
读取一个wav文件列表,返回一个存储该列表的字典类型值
ps:在数据中专门有几个文件用于存放用于训练、验证和测试的wav文件列表
'''
txt_obj=open(filename,'r') # 打开文件并读入
txt_text=txt_obj.read()
txt_lines=txt_text.split('\n') # 文本分割
dic_filelist={} # 初始化字典
list_wavmark=[] # 初始化wav列表
for i in txt_lines:
if(i!=''):
txt_l=i.split(' ')
dic_filelist[txt_l[0]] = txt_l[1]
list_wavmark.append(txt_l[0])
txt_obj.close()
return dic_filelist,list_wavmark
def get_wav_symbol(filename):
'''
读取指定数据集中,所有wav文件对应的语音符号
返回一个存储符号集的字典类型值
'''
txt_obj=open(filename,'r') # 打开文件并读入
txt_text=txt_obj.read()
txt_lines=txt_text.split('\n') # 文本分割
dic_symbol_list={} # 初始化字典
list_symbolmark=[] # 初始化symbol列表
for i in txt_lines:
if(i!=''):
txt_l=i.split(' ')
dic_symbol_list[txt_l[0]]=txt_l[1:]
list_symbolmark.append(txt_l[0])
txt_obj.close()
return dic_symbol_list,list_symbolmark
if(__name__=='__main__'):
wave_data, fs = read_wav_data("A2_0.wav")
wav_show(wave_data[0],fs)
t0=time.time()
freimg = GetFrequencyFeature3(wave_data,fs)
t1=time.time()
print('time cost:',t1-t0)
freimg = freimg.T
plt.subplot(111)
plt.imshow(freimg)
plt.colorbar(cax=None,ax=None,shrink=0.5)
plt.show()
|
gpl-3.0
|
akionakamura/scikit-learn
|
examples/linear_model/plot_ard.py
|
248
|
2622
|
"""
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
|
bsd-3-clause
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/pandas/io/tests/parser/compression.py
|
8
|
5738
|
# -*- coding: utf-8 -*-
"""
Tests compressed data parsing functionality for all
of the parsers defined in parsers.py
"""
import nose
import pandas.util.testing as tm
from pandas import compat
class CompressionTests(object):
def test_zip(self):
try:
import zipfile
except ImportError:
raise nose.SkipTest('need zipfile to run')
with open(self.csv1, 'rb') as data_file:
data = data_file.read()
expected = self.read_csv(self.csv1)
with tm.ensure_clean('test_file.zip') as path:
tmp = zipfile.ZipFile(path, mode='w')
tmp.writestr('test_file', data)
tmp.close()
result = self.read_csv(path, compression='zip')
tm.assert_frame_equal(result, expected)
result = self.read_csv(path, compression='infer')
tm.assert_frame_equal(result, expected)
if self.engine is not 'python':
with open(path, 'rb') as f:
result = self.read_csv(f, compression='zip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean('combined_zip.zip') as path:
inner_file_names = ['test_file', 'second_file']
tmp = zipfile.ZipFile(path, mode='w')
for file_name in inner_file_names:
tmp.writestr(file_name, data)
tmp.close()
self.assertRaisesRegexp(ValueError, 'Multiple files',
self.read_csv, path, compression='zip')
self.assertRaisesRegexp(ValueError, 'Multiple files',
self.read_csv, path, compression='infer')
with tm.ensure_clean() as path:
tmp = zipfile.ZipFile(path, mode='w')
tmp.close()
self.assertRaisesRegexp(ValueError, 'Zero files',
self.read_csv, path, compression='zip')
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
self.assertRaises(zipfile.BadZipfile, self.read_csv,
f, compression='zip')
def test_gzip(self):
try:
import gzip
except ImportError:
raise nose.SkipTest('need gzip to run')
with open(self.csv1, 'rb') as data_file:
data = data_file.read()
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='gzip')
tm.assert_frame_equal(result, expected)
with open(path, 'rb') as f:
result = self.read_csv(f, compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean('test.gz') as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='infer')
tm.assert_frame_equal(result, expected)
def test_bz2(self):
try:
import bz2
except ImportError:
raise nose.SkipTest('need bz2 to run')
with open(self.csv1, 'rb') as data_file:
data = data_file.read()
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
with open(path, 'rb') as fin:
if compat.PY3:
result = self.read_csv(fin, compression='bz2')
tm.assert_frame_equal(result, expected)
elif self.engine is not 'python':
self.assertRaises(ValueError, self.read_csv,
fin, compression='bz2')
with tm.ensure_clean('test.bz2') as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='infer')
tm.assert_frame_equal(result, expected)
def test_xz(self):
lzma = tm._skip_if_no_lzma()
with open(self.csv1, 'rb') as data_file:
data = data_file.read()
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = lzma.LZMAFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='xz')
tm.assert_frame_equal(result, expected)
with open(path, 'rb') as f:
result = self.read_csv(f, compression='xz')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean('test.xz') as path:
tmp = lzma.LZMAFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='infer')
tm.assert_frame_equal(result, expected)
def test_read_csv_infer_compression(self):
# see gh-9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
|
gpl-3.0
|
kastnerkyle/arrayprocessing
|
arrayprocessing/array.py
|
1
|
5010
|
"""Array definitions.
Contains arrays, beamforming, and null-steering.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
from .geometry import GeometryMixin
from .element import MonopoleElement
class BaseArrayMixin(GeometryMixin):
"""Core array functionality, including plots and shared calculations.
This class should not be used directly! Inherit, and override
_get_beam_weights instead.
"""
def __init__(self, n_ant, geometry_type, wavelength_spacing=.5,
random_seed=None):
self.n_ant = n_ant
self.geometry_type = geometry_type
GeometryMixin.__init__(self, n_ant, geometry_type, wavelength_spacing,
random_seed)
self.wavelength_spacing = wavelength_spacing
self.beam_weights = self._get_beam_weights()
def _get_beam_weights(self):
raise AssertionError("""Arrays should override this method!""")
def gain_response(self, az_arr, el_arr=None):
"""Calculate gain responses for input azimuths.
Expects a numpy array of any shape for az_arr
If el_arr is specified, must be the same size as az_arr
Returns a numpy array of
"""
if el_arr is not None:
assert az_arr.shape == el_arr.shape
flat_az = az_arr.ravel()
az_gains = np.zeros(flat_az.shape)
for n, az in enumerate(flat_az):
propagation = self._get_propagation(az)
response = np.matrix(np.exp(-2j * np.pi * np.dot(
self.geometry, propagation) * self.wavelength_spacing))
az_gains[n] = np.abs(np.dot(self.beam_weights.H, response))[0, 0]
return az_gains.reshape(az_arr.shape)
def plot_gain(self, n_pts=50, min_az=-np.pi, max_az=np.pi, log_scale=True):
"""Plot the gain over azimuth for an array."""
all_az = np.linspace(min_az, max_az, n_pts)
all_gain = self.gain_response(all_az)
if log_scale is True:
all_gain = 10 * np.log(all_gain)
plt.plot(all_az, all_gain, color='steelblue')
plt.xlim(min_az, max_az)
def plot_gain2D(self, n_pts=720, log_scale=True):
"""Plot the 2D gain pattern of an array."""
x_plot_min = self.x_min - 1000
x_plot_max = self.x_max + 1000
y_plot_min = self.y_min - 1000
y_plot_max = self.y_max + 1000
# Based on tricontourf example from
# http://matplotlib.org/examples/pylab_examples/tricontour_demo.html
n_angles = n_pts
n_radii = 10
min_radius = 200
radii = np.linspace(min_radius, y_plot_max, n_radii)
angles = np.linspace(-np.pi, np.pi, n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi / n_angles
x = (radii * np.cos(angles)).ravel()
y = (radii * np.sin(angles)).ravel()
z = self.gain_response(angles).ravel()
# Roll so that 0 degrees is north
z = np.roll(z, z.shape[0] / 4)
if log_scale:
z = 10 * np.log(z)
triang = tri.Triangulation(x, y)
xmid = x[triang.triangles].mean(axis=1)
ymid = y[triang.triangles].mean(axis=1)
mask = np.where(xmid * xmid + ymid * ymid < min_radius * min_radius,
1, 0)
triang.set_mask(mask)
ax = plt.gca()
ax.set_aspect('equal')
alpha = .8
plt.tricontourf(triang, z, cmap=plt.cm.Purples, alpha=alpha)
plt.colorbar(alpha=alpha)
self.plot_geometry()
plt.xlim(x_plot_min, x_plot_max)
plt.ylim(y_plot_min, y_plot_max)
ax.patch.set_facecolor('white')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
class UniformArrayMixin(BaseArrayMixin):
"""Equally weighted array."""
def _get_beam_weights(self):
return np.matrix([1.0] * self.n_ant).T
class ClassicalBeamformerMixin(BaseArrayMixin):
"""Classical beamforming."""
def __init__(self, n_ant, beam_dir, geometry_type, wavelength_spacing=.5,
random_seed=None):
self.beam_dir = beam_dir
BaseArrayMixin.__init__(self, n_ant, geometry_type, wavelength_spacing,
random_seed)
def _get_beam_weights(self):
propagation = self._get_propagation(self.beam_dir)
response = np.matrix(np.exp(-2j * np.pi * np.dot(
self.geometry, propagation) * self.wavelength_spacing))
return response / np.sqrt(np.dot(response.H, response))
class MonopoleArray(UniformArrayMixin, MonopoleElement):
"""Monopole array with no beamforming."""
pass
class BeamformedMonopoleArray(ClassicalBeamformerMixin,
MonopoleElement):
"""Classically beamformed monopole array."""
pass
|
bsd-3-clause
|
dhruv13J/scikit-learn
|
sklearn/tree/tree.py
|
3
|
34561
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
for k in range(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
|
bsd-3-clause
|
zuku1985/scikit-learn
|
examples/plot_digits_pipe.py
|
65
|
1652
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
# Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
|
bsd-3-clause
|
lbishal/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
18
|
66251
|
"""Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import math
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..externals import six
from ..base import clone
from sklearn.externals.funcsigs import signature
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
Attributes
----------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = (bounds == "fixed")
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
class Kernel(six.with_metaclass(ABCMeta)):
"""Base class for all kernels."""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg, None)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta. """
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = []
for attr, value in sorted(self.__dict__.items()):
if attr.startswith("hyperparameter_"):
r.append(value)
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(getattr(self, hyperparameter.name))
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
setattr(self, hyperparameter.name,
np.exp(theta[i:i + hyperparameter.n_elements]))
i += hyperparameter.n_elements
else:
setattr(self, hyperparameter.name, np.exp(theta[i]))
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
bounds.append(hyperparameter.bounds)
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin(object):
"""Mixin for kernels which are normalized: k(X, X)=1."""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin(object):
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y)."""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels."""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims:(i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators. """
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
self.hyperparameter_constant_value = \
Hyperparameter("constant_value", "numeric", constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = self.constant_value * np.ones((X.shape[0], Y.shape[0]))
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, self.constant_value
* np.ones((X.shape[0], X.shape[0], 1)))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.constant_value * np.ones(X.shape[0])
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise-component of the signal. Tuning its parameter
corresponds to estimating the noise-level.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
self.hyperparameter_noise_level = \
Hyperparameter("noise_level", "numeric", noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.noise_level * np.ones(X.shape[0])
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length-scale
parameter length_scale>0, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
if np.iterable(length_scale):
if len(length_scale) > 1:
self.anisotropic = True
self.length_scale = np.asarray(length_scale, dtype=np.float)
else:
self.anisotropic = False
self.length_scale = float(length_scale[0])
else:
self.anisotropic = False
self.length_scale = float(length_scale)
self.length_scale_bounds = length_scale_bounds
if self.anisotropic: # anisotropic length_scale
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds,
len(length_scale))
else:
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or self.length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (self.length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
raise Exception("Anisotropic kernels require that the number "
"of length scales and features match.")
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, self.length_scale)
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
See Rasmussen and Williams 2006, pp84 for details regarding the
different variants of the Matern kernel.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
nu: float, default: 1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super(Matern, self).__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (self.length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else: # isotropic
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length-scales. It is
parameterized by a length-scale parameter length_scale>0 and a scale
mixture parameter alpha>0. Only the isotropic variant where length_scale is
a scalar is supported at the moment. The kernel given by:
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
alpha : float > 0, default: 1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on alpha
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_alpha = \
Hyperparameter("alpha", "numeric", alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Exp-Sine-Squared kernel.
The ExpSineSquared kernel allows modeling periodic functions. It is
parameterized by a length-scale parameter length_scale>0 and a periodicity
parameter periodicity>0. Only the isotropic variant where l is a scalar is
supported at the moment. The kernel given by:
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
periodicity : float > 0, default: 1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on periodicity
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_periodicity = \
Hyperparameter("periodicity", "numeric", periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .
. , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel
is invariant to a rotation of the coordinates about the origin, but not
translations. It is parameterized by a parameter sigma_0^2. For
sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
Parameters
----------
sigma_0 : float >= 0, default: 1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on l
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
self.hyperparameter_sigma_0 = \
Hyperparameter("sigma_0", "numeric", sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
Parameters
----------
gamma: float >= 0, default: 1.0
Parameter gamma of the pairwise kernel specified by metric
gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on gamma
metric : string, or callable, default: "linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default: None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.hyperparameter_gamma = \
Hyperparameter("gamma", "numeric", gamma_bounds)
self.metric = metric
if pairwise_kernels_kwargs is not None:
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
else:
self.pairwise_kernels_kwargs = {}
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**self.pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **self.pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X)[:, 0]
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
|
bsd-3-clause
|
catmiao/trading-with-python
|
historicDataDownloader/historicDataDownloader.py
|
77
|
4526
|
'''
Created on 4 aug. 2012
Copyright: Jev Kuznetsov
License: BSD
a module for downloading historic data from IB
'''
import ib
import pandas
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from time import sleep
import tradingWithPython.lib.logger as logger
from pandas import DataFrame, Index
import datetime as dt
from timeKeeper import TimeKeeper
import time
timeFormat = "%Y%m%d %H:%M:%S"
class DataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = DataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1800 S',barSizeSetting='1 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(1)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self,contract, dateTuple ):
''' get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
'''
openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime,closeTime,freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract,t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
if __name__=='__main__':
dl = Downloader(debug=True)
c = Contract()
c.m_symbol = 'SPY'
c.m_secType = 'STK'
c.m_exchange = 'SMART'
c.m_currency = 'USD'
df = dl.getIntradayData(c, (2012,8,6))
df.to_csv('test.csv')
# df = dl.requestData(c, '20120803 22:00:00')
# df.to_csv('test1.csv')
# df = dl.requestData(c, '20120803 21:30:00')
# df.to_csv('test2.csv')
dl.disconnect()
print 'Done.'
|
bsd-3-clause
|
cjayb/mne-python
|
doc/conf.py
|
1
|
27783
|
# -*- coding: utf-8 -*-
#
# MNE documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 11 10:45:48 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from datetime import date
from distutils.version import LooseVersion
import gc
import os
import os.path as op
import sys
import warnings
import sphinx_gallery
from sphinx_gallery.sorting import FileNameSortKey, ExplicitOrder
from numpydoc import docscrape
import matplotlib
import mne
from mne.utils import linkcode_resolve # noqa, analysis:ignore
if LooseVersion(sphinx_gallery.__version__) < LooseVersion('0.2'):
raise ImportError('Must have at least version 0.2 of sphinx-gallery, got '
'%s' % (sphinx_gallery.__version__,))
matplotlib.use('agg')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curdir = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne')))
sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '2.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
'numpydoc',
'sphinx_gallery.gen_gallery',
'sphinx_fontawesome',
'gen_commands',
'gh_substitutions',
'mne_substitutions',
'sphinx_bootstrap_theme',
'sphinx_bootstrap_divs',
'sphinxcontrib.bibtex',
'sphinxcontrib.bibtex2',
]
linkcheck_ignore = [
'https://doi.org/10.1088/0031-9155/57/7/1937', # noqa 403 Client Error: Forbidden for url: http://iopscience.iop.org/article/10.1088/0031-9155/57/7/1937/meta
'https://doi.org/10.1088/0031-9155/51/7/008', # noqa 403 Client Error: Forbidden for url: https://iopscience.iop.org/article/10.1088/0031-9155/51/7/008
'https://sccn.ucsd.edu/wiki/.*', # noqa HTTPSConnectionPool(host='sccn.ucsd.edu', port=443): Max retries exceeded with url: /wiki/Firfilt_FAQ (Caused by SSLError(SSLError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:847)'),))
'https://docs.python.org/dev/howto/logging.html', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
'https://docs.python.org/3/library/.*', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
'https://hal.archives-ouvertes.fr/hal-01848442/', # noqa Sometimes: 503 Server Error: Service Unavailable for url: https://hal.archives-ouvertes.fr/hal-01848442/
]
linkcheck_anchors = False # saves a bit of time
autosummary_generate = True
autodoc_default_options = {'inherited-members': None}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_includes']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MNE'
td = date.today()
copyright = u'2012-%s, MNE Developers. Last updated on %s' % (td.year,
td.isoformat())
nitpicky = True
nitpick_ignore = [
("py:class", "None. Remove all items from D."),
("py:class", "a set-like object providing a view on D's items"),
("py:class", "a set-like object providing a view on D's keys"),
("py:class", "v, remove specified key and return the corresponding value."), # noqa: E501
("py:class", "None. Update D from dict/iterable E and F."),
("py:class", "an object providing a view on D's values"),
("py:class", "a shallow copy of D"),
]
for key in ('AcqParserFIF', 'BiHemiLabel', 'Dipole', 'DipoleFixed', 'Label',
'MixedSourceEstimate', 'MixedVectorSourceEstimate', 'Report',
'SourceEstimate', 'SourceMorph', 'VectorSourceEstimate',
'VolSourceEstimate', 'VolVectorSourceEstimate',
'channels.DigMontage', 'channels.Layout',
'decoding.CSP', 'decoding.EMS', 'decoding.FilterEstimator',
'decoding.GeneralizingEstimator', 'decoding.LinearModel',
'decoding.PSDEstimator', 'decoding.ReceptiveField',
'decoding.SPoC', 'decoding.Scaler', 'decoding.SlidingEstimator',
'decoding.TemporalFilter', 'decoding.TimeDelayingRidge',
'decoding.TimeFrequency', 'decoding.UnsupervisedSpatialFilter',
'decoding.Vectorizer',
'preprocessing.ICA', 'preprocessing.Xdawn',
'simulation.SourceSimulator',
'time_frequency.CrossSpectralDensity',
'utils.deprecated',
'viz.ClickableImage'):
nitpick_ignore.append(('py:obj', f'mne.{key}.__hash__'))
suppress_warnings = ['image.nonlocal_uri'] # we intentionally link outside
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mne.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "py:obj"
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mne.']
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navbar_title': ' ', # we replace this with an image
'source_link_position': "nav", # default
'bootswatch_theme': "flatly", # yeti paper lumen
'navbar_sidebarrel': False, # Render the next/prev links in navbar?
'navbar_pagenav': False,
'navbar_class': "navbar",
'bootstrap_version': "3", # default
'navbar_links': [
("Install", "install/index"),
("Overview", "overview/index"),
("Tutorials", "auto_tutorials/index"),
("Examples", "auto_examples/index"),
("Glossary", "glossary"),
("API", "python_reference"),
],
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/mne_logo_small.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = [
'contributing.html',
'documentation.html',
'getting_started.html',
'install_mne_python.html',
]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
html_copy_source = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# variables to pass to HTML templating engine
build_dev_html = bool(int(os.environ.get('BUILD_DEV_HTML', False)))
html_context = {'use_google_analytics': True, 'use_twitter': True,
'use_media_buttons': True, 'build_dev_html': build_dev_html}
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mne-doc'
# -- Options for LaTeX output ---------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
# ('index', 'MNE.tex', u'MNE Manual',
# u'MNE Contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_toplevel_sectioning = 'part'
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
trim_doctests_flags = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/devdocs', None),
'scipy': ('https://scipy.github.io/devdocs', None),
'matplotlib': ('https://matplotlib.org', None),
'sklearn': ('https://scikit-learn.org/stable', None),
'numba': ('https://numba.pydata.org/numba-doc/latest', None),
'joblib': ('https://joblib.readthedocs.io/en/latest', None),
'mayavi': ('http://docs.enthought.com/mayavi/mayavi', None),
'nibabel': ('https://nipy.org/nibabel', None),
'nilearn': ('http://nilearn.github.io', None),
'surfer': ('https://pysurfer.github.io/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'seaborn': ('https://seaborn.pydata.org/', None),
'statsmodels': ('https://www.statsmodels.org/dev', None),
'patsy': ('https://patsy.readthedocs.io/en/latest', None),
# There are some problems with dipy's redirect:
# https://github.com/nipy/dipy/issues/1955
'dipy': ('https://dipy.org/documentation/latest',
'https://dipy.org/documentation/1.1.1./objects.inv/'),
'mne_realtime': ('https://mne.tools/mne-realtime', None),
'picard': ('https://pierreablin.github.io/picard/', None),
}
##############################################################################
# sphinxcontrib-bibtex
bibtex_bibfiles = ['./references.bib']
bibtex_style = 'unsrt'
bibtex_footbibliography_header = ''
##############################################################################
# sphinx-gallery
examples_dirs = ['../examples', '../tutorials']
gallery_dirs = ['auto_examples', 'auto_tutorials']
os.environ['_MNE_BUILDING_DOC'] = 'true'
scrapers = ('matplotlib',)
try:
mlab = mne.utils._import_mlab()
# Do not pop up any mayavi windows while running the
# examples. These are very annoying since they steal the focus.
mlab.options.offscreen = True
# hack to initialize the Mayavi Engine
mlab.test_plot3d()
mlab.close()
except Exception:
pass
else:
scrapers += ('mayavi',)
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyvista
pyvista.OFF_SCREEN = False
except Exception:
pass
else:
scrapers += ('pyvista',)
if any(x in scrapers for x in ('pyvista', 'mayavi')):
from traits.api import push_exception_handler
push_exception_handler(reraise_exceptions=True)
report_scraper = mne.report._ReportScraper()
scrapers += (report_scraper,)
else:
report_scraper = None
if 'pyvista' in scrapers:
brain_scraper = mne.viz._brain._BrainScraper()
scrapers = list(scrapers)
scrapers.insert(scrapers.index('pyvista'), brain_scraper)
scrapers = tuple(scrapers)
def append_attr_meth_examples(app, what, name, obj, options, lines):
"""Append SG examples backreferences to method and attr docstrings."""
# NumpyDoc nicely embeds method and attribute docstrings for us, but it
# does not respect the autodoc templates that would otherwise insert
# the .. include:: lines, so we need to do it.
# Eventually this could perhaps live in SG.
if what in ('attribute', 'method'):
size = os.path.getsize(op.join(
op.dirname(__file__), 'generated', '%s.examples' % (name,)))
if size > 0:
lines += """
.. _sphx_glr_backreferences_{1}:
.. rubric:: Examples using ``{0}``:
.. minigallery:: {1}
""".format(name.split('.')[-1], name).split('\n')
def setup(app):
"""Set up the Sphinx app."""
app.connect('autodoc-process-docstring', append_attr_meth_examples)
if report_scraper is not None:
report_scraper.app = app
app.connect('build-finished', report_scraper.copyfiles)
class Resetter(object):
"""Simple class to make the str(obj) static for Sphinx build env hash."""
def __repr__(self):
return '<%s>' % (self.__class__.__name__,)
def __call__(self, gallery_conf, fname):
import matplotlib.pyplot as plt
reset_warnings(gallery_conf, fname)
# in case users have interactive mode turned on in matplotlibrc,
# turn it off here (otherwise the build can be very slow)
plt.ioff()
gc.collect()
def reset_warnings(gallery_conf, fname):
"""Ensure we are future compatible and ignore silly warnings."""
# In principle, our examples should produce no warnings.
# Here we cause warnings to become errors, with a few exceptions.
# This list should be considered alongside
# setup.cfg -> [tool:pytest] -> filterwarnings
# remove tweaks from other module imports or example runs
warnings.resetwarnings()
# restrict
warnings.filterwarnings('error')
# allow these, but show them
warnings.filterwarnings('always', '.*non-standard config type: "foo".*')
warnings.filterwarnings('always', '.*config type: "MNEE_USE_CUUDAA".*')
warnings.filterwarnings('always', '.*cannot make axes width small.*')
warnings.filterwarnings('always', '.*Axes that are not compatible.*')
warnings.filterwarnings('always', '.*FastICA did not converge.*')
warnings.filterwarnings( # xhemi morph (should probably update sample)
'always', '.*does not exist, creating it and saving it.*')
warnings.filterwarnings('default', module='sphinx') # internal warnings
warnings.filterwarnings(
'always', '.*converting a masked element to nan.*') # matplotlib?
# allow these warnings, but don't show them
warnings.filterwarnings(
'ignore', '.*OpenSSL\\.rand is deprecated.*')
warnings.filterwarnings('ignore', '.*is currently using agg.*')
warnings.filterwarnings( # SciPy-related warning (maybe 1.2.0 will fix it)
'ignore', '.*the matrix subclass is not the recommended.*')
warnings.filterwarnings( # some joblib warning
'ignore', '.*semaphore_tracker: process died unexpectedly.*')
warnings.filterwarnings( # needed until SciPy 1.2.0 is released
'ignore', '.*will be interpreted as an array index.*', module='scipy')
for key in ('HasTraits', r'numpy\.testing', 'importlib', r'np\.loads',
'Using or importing the ABCs from', # internal modules on 3.7
r"it will be an error for 'np\.bool_'", # ndimage
"DocumenterBridge requires a state object", # sphinx dev
"'U' mode is deprecated", # sphinx io
r"joblib is deprecated in 0\.21", # nilearn
'The usage of `cmp` is deprecated and will', # sklearn/pytest
'scipy.* is deprecated and will be removed in', # dipy
r'Converting `np\.character` to a dtype is deprecated', # vtk
r'sphinx\.util\.smartypants is deprecated',
'is a deprecated alias for the builtin', # NumPy
):
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*%s.*" % key, category=DeprecationWarning)
warnings.filterwarnings( # deal with bootstrap-theme bug
'ignore', message=".*modify script_files in the theme.*",
category=Warning)
warnings.filterwarnings( # nilearn
'ignore', message=r'sklearn\.externals\.joblib is deprecated.*',
category=FutureWarning)
warnings.filterwarnings( # nilearn
'ignore', message=r'The sklearn.* module is.*', category=FutureWarning)
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*ufunc size changed.*", category=RuntimeWarning)
warnings.filterwarnings( # realtime
'ignore', message=".*unclosed file.*", category=ResourceWarning)
warnings.filterwarnings('ignore', message='Exception ignored in.*')
# allow this ImportWarning, but don't show it
warnings.filterwarnings(
'ignore', message="can't resolve package from", category=ImportWarning)
warnings.filterwarnings(
'ignore', message='.*mne-realtime.*', category=DeprecationWarning)
reset_warnings(None, None)
sphinx_gallery_conf = {
'doc_module': ('mne',),
'reference_url': dict(mne=None),
'examples_dirs': examples_dirs,
'subsection_order': ExplicitOrder(['../examples/io/',
'../examples/simulation/',
'../examples/preprocessing/',
'../examples/visualization/',
'../examples/time_frequency/',
'../examples/stats/',
'../examples/decoding/',
'../examples/connectivity/',
'../examples/forward/',
'../examples/inverse/',
'../examples/realtime/',
'../examples/datasets/',
'../tutorials/intro/',
'../tutorials/io/',
'../tutorials/raw/',
'../tutorials/preprocessing/',
'../tutorials/epochs/',
'../tutorials/evoked/',
'../tutorials/time-freq/',
'../tutorials/source-modeling/',
'../tutorials/stats-sensor-space/',
'../tutorials/stats-source-space/',
'../tutorials/machine-learning/',
'../tutorials/simulation/',
'../tutorials/sample-datasets/',
'../tutorials/discussions/',
'../tutorials/misc/']),
'gallery_dirs': gallery_dirs,
'default_thumb_file': os.path.join('_static', 'mne_helmet.png'),
'backreferences_dir': 'generated',
'plot_gallery': 'True', # Avoid annoying Unicode/bool default warning
'download_section_examples': False,
'thumbnail_size': (160, 112),
'remove_config_comments': True,
'min_reported_time': 1.,
'abort_on_example_error': False,
'reset_modules': ('matplotlib', Resetter()), # called w/each script
'image_scrapers': scrapers,
'show_memory': not sys.platform.startswith('win'),
'line_numbers': False, # XXX currently (0.3.dev0) messes with style
'within_subsection_order': FileNameSortKey,
'capture_repr': ('_repr_html_',),
'junit': op.join('..', 'test-results', 'sphinx-gallery', 'junit.xml'),
'matplotlib_animations': True,
'compress_images': ('images', 'thumbnails'),
}
##############################################################################
# numpydoc
# XXX This hack defines what extra methods numpydoc will document
docscrape.ClassDoc.extra_public_methods = mne.utils._doc_special_members
numpydoc_class_members_toctree = False
numpydoc_attributes_as_param_list = True
numpydoc_xref_param_type = True
numpydoc_xref_aliases = {
# Python
'file-like': ':term:`file-like <python:file object>`',
# Matplotlib
'colormap': ':doc:`colormap <matplotlib:tutorials/colors/colormaps>`',
'color': ':doc:`color <matplotlib:api/colors_api>`',
'collection': ':doc:`collections <matplotlib:api/collections_api>`',
'Axes': 'matplotlib.axes.Axes',
'Figure': 'matplotlib.figure.Figure',
'Axes3D': 'mpl_toolkits.mplot3d.axes3d.Axes3D',
'ColorbarBase': 'matplotlib.colorbar.ColorbarBase',
# Mayavi
'mayavi.mlab.Figure': 'mayavi.core.api.Scene',
'mlab.Figure': 'mayavi.core.api.Scene',
# sklearn
'LeaveOneOut': 'sklearn.model_selection.LeaveOneOut',
# joblib
'joblib.Parallel': 'joblib.Parallel',
# nibabel
'Nifti1Image': 'nibabel.nifti1.Nifti1Image',
'Nifti2Image': 'nibabel.nifti2.Nifti2Image',
'SpatialImage': 'nibabel.spatialimages.SpatialImage',
# MNE
'Label': 'mne.Label', 'Forward': 'mne.Forward', 'Evoked': 'mne.Evoked',
'Info': 'mne.Info', 'SourceSpaces': 'mne.SourceSpaces',
'SourceMorph': 'mne.SourceMorph',
'Epochs': 'mne.Epochs', 'Layout': 'mne.channels.Layout',
'EvokedArray': 'mne.EvokedArray', 'BiHemiLabel': 'mne.BiHemiLabel',
'AverageTFR': 'mne.time_frequency.AverageTFR',
'EpochsTFR': 'mne.time_frequency.EpochsTFR',
'Raw': 'mne.io.Raw', 'ICA': 'mne.preprocessing.ICA',
'Covariance': 'mne.Covariance', 'Annotations': 'mne.Annotations',
'DigMontage': 'mne.channels.DigMontage',
'VectorSourceEstimate': 'mne.VectorSourceEstimate',
'VolSourceEstimate': 'mne.VolSourceEstimate',
'VolVectorSourceEstimate': 'mne.VolVectorSourceEstimate',
'MixedSourceEstimate': 'mne.MixedSourceEstimate',
'MixedVectorSourceEstimate': 'mne.MixedVectorSourceEstimate',
'SourceEstimate': 'mne.SourceEstimate', 'Projection': 'mne.Projection',
'ConductorModel': 'mne.bem.ConductorModel',
'Dipole': 'mne.Dipole', 'DipoleFixed': 'mne.DipoleFixed',
'InverseOperator': 'mne.minimum_norm.InverseOperator',
'CrossSpectralDensity': 'mne.time_frequency.CrossSpectralDensity',
'SourceMorph': 'mne.SourceMorph',
'Xdawn': 'mne.preprocessing.Xdawn',
'Report': 'mne.Report', 'Forward': 'mne.Forward',
'TimeDelayingRidge': 'mne.decoding.TimeDelayingRidge',
'Vectorizer': 'mne.decoding.Vectorizer',
'UnsupervisedSpatialFilter': 'mne.decoding.UnsupervisedSpatialFilter',
'TemporalFilter': 'mne.decoding.TemporalFilter',
'Scaler': 'mne.decoding.Scaler', 'SPoC': 'mne.decoding.SPoC',
'PSDEstimator': 'mne.decoding.PSDEstimator',
'LinearModel': 'mne.decoding.LinearModel',
'FilterEstimator': 'mne.decoding.FilterEstimator',
'EMS': 'mne.decoding.EMS', 'CSP': 'mne.decoding.CSP',
'Beamformer': 'mne.beamformer.Beamformer',
'Transform': 'mne.transforms.Transform',
}
numpydoc_xref_ignore = {
# words
'instance', 'instances', 'of', 'default', 'shape', 'or',
'with', 'length', 'pair', 'matplotlib', 'optional', 'kwargs', 'in',
'dtype', 'object', 'self.verbose',
# shapes
'n_vertices', 'n_faces', 'n_channels', 'm', 'n', 'n_events', 'n_colors',
'n_times', 'obj', 'n_chan', 'n_epochs', 'n_picks', 'n_ch_groups',
'n_dipoles', 'n_ica_components', 'n_pos', 'n_node_names', 'n_tapers',
'n_signals', 'n_step', 'n_freqs', 'wsize', 'Tx', 'M', 'N', 'p', 'q',
'n_observations', 'n_regressors', 'n_cols', 'n_frequencies', 'n_tests',
'n_samples', 'n_permutations', 'nchan', 'n_points', 'n_features',
'n_parts', 'n_features_new', 'n_components', 'n_labels', 'n_events_in',
'n_splits', 'n_scores', 'n_outputs', 'n_trials', 'n_estimators', 'n_tasks',
'nd_features', 'n_classes', 'n_targets', 'n_slices', 'n_hpi', 'n_fids',
'n_elp', 'n_pts', 'n_tris', 'n_nodes', 'n_nonzero', 'n_events_out',
'n_segments', 'n_orient_inv', 'n_orient_fwd', 'n_orient', 'n_dipoles_lcmv',
'n_dipoles_fwd', 'n_picks_ref',
# Undocumented (on purpose)
'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi',
'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF',
# sklearn subclasses
'mapping', 'to', 'any',
# unlinkable
'mayavi.mlab.pipeline.surface',
'CoregFrame', 'Kit2FiffFrame', 'FiducialsFrame',
}
|
bsd-3-clause
|
scenarios/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/__init__.py
|
6
|
11427
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest. Use `.predict()` for
regression problems.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest. Use `.predict_proba()` when
using for binary classification problems.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### TensorForestEstimator
Supports regression and binary classification.
```python
params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(
num_classes=2, num_features=40, num_trees=10, max_nodes=1000)
# Estimator using the default graph builder.
estimator = TensorForestEstimator(params, model_dir=model_dir)
# Or estimator using TrainingLossForest as the graph builder.
estimator = TensorForestEstimator(
params, graph_builder_class=tensor_forest.TrainingLossForest,
model_dir=model_dir)
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
|
apache-2.0
|
vshtanko/scikit-learn
|
benchmarks/bench_plot_neighbors.py
|
287
|
6433
|
"""
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
|
bsd-3-clause
|
rlouf/patterns-of-segregation
|
bin/analysis/neighbourhoods_polycentrism.py
|
1
|
3066
|
"""neighbourhoods_polycentrism
Compute the ratio of the size of the two largest neighbourhoods for all 2000
MSA.
"""
from __future__ import division
import csv
import networkx as nx
from matplotlib import pylab as plt
#
# Preparation
#
## List of MSA
msa = {}
with open('data/names/msa.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa[rows[0]] = rows[1]
## Classes
classes = {}
with open('extr/classes/msa_average/classes.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
classes[rows[0]] =[int(r) for r in rows[1:]]
#
# Compute the clustering value
#
ratio_size = {}
for i, city in enumerate(msa):
print "Compute clustering for %s (%s/%s)"%(msa[city],
i+1,
len(msa))
## Import distribution
households = {}
with open('data/income/msa/%s/income.csv'%city, 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
households[rows[0]] = {c:int(h) for c,h in enumerate(rows[1:])}
income = {bg:{cl:sum([households[bg][c] for c in classes[cl]]) for cl in classes}
for bg in households}
## Import adjacency matrix
adjacency = {}
with open('extr/adjacency_bg/msa/%s.csv'%city, 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
adjacency[rows[0]] = rows[1:]
## Transform into graph
G = nx.from_dict_of_lists(adjacency)
## Import list of bg where each class is overrepresented
over_bg = {cl:[] for cl in classes}
with open('extr/neighbourhoods/classes/msa/%s.csv'%city, 'r') as source:
reader = csv.reader(source, delimiter='\t')
for rows in reader:
over_bg[rows[0]].append(rows[1])
## Extract neighbourhoods (the connected components of the subgraph
## constituted of the areal units where the class is overrepresented)
neighbourhoods = {cl: [[n for n in subgraph]
for subgraph in nx.connected_component_subgraphs(G.subgraph(over_bg[cl]))]
for cl in classes}
neigh_pops = {cl: [sum([income[bg][cl] for bg in neigh]) for neigh in neighbourhoods[cl]]
for cl in classes}
ratio_size[city] = {cl: (sorted(neigh_pops[cl], reverse=1)[1] /
max(neigh_pops[cl]))
if len(neigh_pops[cl]) > 1 else 0
for cl in classes}
#
# Save the data
#
with open('extr/neighbourhoods/polycentrism/polycentrism.csv', 'w') as output:
output.write('MSA FIP')
for cl in sorted(classes):
output.write('\t%s'%cl)
output.write('\n')
for city in ratio_size:
output.write(str(city))
for cl in sorted(classes):
output.write('\t%s'%ratio_size[city][cl])
output.write('\n')
|
bsd-3-clause
|
Lawrence-Liu/scikit-learn
|
sklearn/neighbors/approximate.py
|
128
|
22351
|
"""Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[i], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[i], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
|
bsd-3-clause
|
tylerjereddy/scipy
|
doc/source/tutorial/stats/plots/kde_plot3.py
|
12
|
1249
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
rng = np.random.default_rng()
x1 = rng.normal(size=200) # random data, normal distribution
xs = np.linspace(x1.min()-1, x1.max()+1, 200)
kde1 = stats.gaussian_kde(x1)
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(211)
ax1.plot(x1, np.zeros(x1.shape), 'b+', ms=12) # rug plot
ax1.plot(xs, kde1(xs), 'k-', label="Scott's Rule")
ax1.plot(xs, kde2(xs), 'b-', label="Silverman's Rule")
ax1.plot(xs, stats.norm.pdf(xs), 'r--', label="True PDF")
ax1.set_xlabel('x')
ax1.set_ylabel('Density')
ax1.set_title("Normal (top) and Student's T$_{df=5}$ (bottom) distributions")
ax1.legend(loc=1)
x2 = stats.t.rvs(5, size=200, random_state=rng) # random data, T distribution
xs = np.linspace(x2.min() - 1, x2.max() + 1, 200)
kde3 = stats.gaussian_kde(x2)
kde4 = stats.gaussian_kde(x2, bw_method='silverman')
ax2 = fig.add_subplot(212)
ax2.plot(x2, np.zeros(x2.shape), 'b+', ms=12) # rug plot
ax2.plot(xs, kde3(xs), 'k-', label="Scott's Rule")
ax2.plot(xs, kde4(xs), 'b-', label="Silverman's Rule")
ax2.plot(xs, stats.t.pdf(xs, 5), 'r--', label="True PDF")
ax2.set_xlabel('x')
ax2.set_ylabel('Density')
plt.show()
|
bsd-3-clause
|
DavidNorman/tensorflow
|
tensorflow/python/estimator/inputs/pandas_io.py
|
41
|
1293
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""pandas_io python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator.inputs import pandas_io
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
pandas_io.__all__ = [s for s in dir(pandas_io) if not s.startswith('__')]
from tensorflow_estimator.python.estimator.inputs.pandas_io import *
|
apache-2.0
|
bzero/statsmodels
|
statsmodels/tools/tests/test_grouputils.py
|
31
|
11494
|
import numpy as np
import pandas as pd
from statsmodels.tools.grouputils import Grouping
from statsmodels.tools.tools import categorical
from statsmodels.datasets import grunfeld, anes96
from pandas.util import testing as ptesting
class CheckGrouping(object):
def test_reindex(self):
# smoke test
self.grouping.reindex(self.grouping.index)
def test_count_categories(self):
self.grouping.count_categories(level=0)
np.testing.assert_equal(self.grouping.counts, self.expected_counts)
def test_sort(self):
# data frame
sorted_data, index = self.grouping.sort(self.data)
expected_sorted_data = self.data.sort_index()
ptesting.assert_frame_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, pd.DataFrame))
np.testing.assert_(not index.equals(self.grouping.index))
# make sure it copied
if hasattr(sorted_data, 'equals'): # newer pandas
np.testing.assert_(not sorted_data.equals(self.data))
# 2d arrays
sorted_data, index = self.grouping.sort(self.data.values)
np.testing.assert_array_equal(sorted_data,
expected_sorted_data.values)
np.testing.assert_(isinstance(sorted_data, np.ndarray))
# 1d series
series = self.data[self.data.columns[0]]
sorted_data, index = self.grouping.sort(series)
expected_sorted_data = series.sort_index()
ptesting.assert_series_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, pd.Series))
if hasattr(sorted_data, 'equals'):
np.testing.assert_(not sorted_data.equals(series))
# 1d array
array = series.values
sorted_data, index = self.grouping.sort(array)
expected_sorted_data = series.sort_index().values
np.testing.assert_array_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, np.ndarray))
def test_transform_dataframe(self):
names = self.data.index.names
transformed_dataframe = self.grouping.transform_dataframe(
self.data,
lambda x : x.mean(),
level=0)
expected = self.data.reset_index().groupby(names[0]
).apply(lambda x : x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_dataframe,
expected.values)
if len(names) > 1:
transformed_dataframe = self.grouping.transform_dataframe(
self.data, lambda x : x.mean(),
level=1)
expected = self.data.reset_index().groupby(names[1]
).apply(lambda x :
x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_dataframe,
expected.values)
def test_transform_array(self):
names = self.data.index.names
transformed_array = self.grouping.transform_array(
self.data.values,
lambda x : x.mean(),
level=0)
expected = self.data.reset_index().groupby(names[0]
).apply(lambda x : x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_array,
expected.values)
if len(names) > 1:
transformed_array = self.grouping.transform_array(
self.data.values,
lambda x : x.mean(), level=1)
expected = self.data.reset_index().groupby(names[1]
).apply(lambda x :
x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_array,
expected.values)
def test_transform_slices(self):
names = self.data.index.names
transformed_slices = self.grouping.transform_slices(
self.data.values,
lambda x, idx : x.mean(0),
level=0)
expected = self.data.reset_index().groupby(names[0]).mean()[
self.data.columns]
np.testing.assert_allclose(transformed_slices, expected.values,
rtol=1e-12, atol=1e-25)
if len(names) > 1:
transformed_slices = self.grouping.transform_slices(
self.data.values,
lambda x, idx : x.mean(0),
level=1)
expected = self.data.reset_index().groupby(names[1]
).mean()[
self.data.columns]
np.testing.assert_allclose(transformed_slices, expected.values,
rtol=1e-12, atol=1e-25)
def test_dummies_groups(self):
# smoke test, calls dummy_sparse under the hood
self.grouping.dummies_groups()
if len(self.grouping.group_names) > 1:
self.grouping.dummies_groups(level=1)
def test_dummy_sparse(self):
data = self.data
self.grouping.dummy_sparse()
expected = categorical(data.index.get_level_values(0).values,
drop=True)
np.testing.assert_equal(self.grouping._dummies.toarray(), expected)
if len(self.grouping.group_names) > 1:
self.grouping.dummy_sparse(level=1)
expected = categorical(data.index.get_level_values(1).values,
drop=True)
np.testing.assert_equal(self.grouping._dummies.toarray(),
expected)
class TestMultiIndexGrouping(CheckGrouping):
@classmethod
def setupClass(cls):
grun_data = grunfeld.load_pandas().data
multi_index_data = grun_data.set_index(['firm', 'year'])
multi_index_panel = multi_index_data.index
cls.grouping = Grouping(multi_index_panel)
cls.data = multi_index_data
cls.expected_counts = [20] * 11
class TestIndexGrouping(CheckGrouping):
@classmethod
def setupClass(cls):
grun_data = grunfeld.load_pandas().data
index_data = grun_data.set_index(['firm'])
index_group = index_data.index
cls.grouping = Grouping(index_group)
cls.data = index_data
cls.expected_counts = [20] * 11
def test_init_api():
# make a multi-index panel
grun_data = grunfeld.load_pandas().data
multi_index_panel = grun_data.set_index(['firm', 'year']).index
grouping = Grouping(multi_index_panel)
# check group_names
np.testing.assert_array_equal(grouping.group_names, ['firm', 'year'])
# check shape
np.testing.assert_array_equal(grouping.index_shape, (11, 20))
# check index_int
np.testing.assert_array_equal(grouping.labels,
[[ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]])
grouping = Grouping(multi_index_panel, names=['firms', 'year'])
np.testing.assert_array_equal(grouping.group_names, ['firms', 'year'])
# make a multi-index grouping
anes_data = anes96.load_pandas().data
multi_index_groups = anes_data.set_index(['educ', 'income',
'TVnews']).index
grouping = Grouping(multi_index_groups)
np.testing.assert_array_equal(grouping.group_names,
['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.index_shape, (7, 24, 8))
# make a list multi-index panel
list_panel = multi_index_panel.tolist()
grouping = Grouping(list_panel, names=['firms', 'year'])
np.testing.assert_array_equal(grouping.group_names, ['firms', 'year'])
np.testing.assert_array_equal(grouping.index_shape, (11, 20))
# make a list multi-index grouping
list_groups = multi_index_groups.tolist()
grouping = Grouping(list_groups, names=['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.group_names,
['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.index_shape, (7, 24, 8))
# single-variable index grouping
index_group = multi_index_panel.get_level_values(0)
grouping = Grouping(index_group)
# the original multi_index_panel had it's name changed inplace above
np.testing.assert_array_equal(grouping.group_names, ['firms'])
np.testing.assert_array_equal(grouping.index_shape, (220,))
# single variable list grouping
list_group = multi_index_panel.get_level_values(0).tolist()
grouping = Grouping(list_group)
np.testing.assert_array_equal(grouping.group_names, ["group0"])
np.testing.assert_array_equal(grouping.index_shape, 11*20)
# test generic group names
grouping = Grouping(list_groups)
np.testing.assert_array_equal(grouping.group_names,
['group0', 'group1', 'group2'])
|
bsd-3-clause
|
jaidevd/scikit-learn
|
sklearn/tests/test_multiclass.py
|
26
|
26681
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal, assert_raises_regex
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from sklearn.utils import shuffle
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression,
SGDClassifier)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_check_classification_targets():
# Test that check_classification_target return correct type. #5782
y = np.array([0.0, 1.1, 2.0, 3.0])
msg = type_of_target(y)
assert_raise_message(ValueError, msg, check_classification_targets, y)
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_partial_fit():
# Test if partial_fit is working as intented
X, y = shuffle(iris.data, iris.target, random_state=0)
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(X[:100], y[:100], np.unique(y))
ovr.partial_fit(X[100:], y[100:])
pred = ovr.predict(X)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(y)))
assert_greater(np.mean(y == pred), 0.65)
# Test when mini batches doesn't have all classes
# with SGDClassifier
X = np.abs(np.random.randn(14, 2))
y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3]
ovr = OneVsRestClassifier(SGDClassifier(n_iter=1, shuffle=False,
random_state=0))
ovr.partial_fit(X[:7], y[:7], np.unique(y))
ovr.partial_fit(X[7:], y[7:])
pred = ovr.predict(X)
ovr1 = OneVsRestClassifier(SGDClassifier(n_iter=1, shuffle=False,
random_state=0))
pred1 = ovr1.fit(X, y).predict(X)
assert_equal(np.mean(pred == y), np.mean(pred1 == y))
def test_ovr_partial_fit_exceptions():
ovr = OneVsRestClassifier(MultinomialNB())
X = np.abs(np.random.randn(14, 2))
y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3]
ovr.partial_fit(X[:7], y[:7], np.unique(y))
# A new class value which was not in the first call of partial_fit
# It should raise ValueError
y1 = [5] + y[7:-1]
assert_raises_regex(ValueError, "Mini-batch contains \[.+\] while classes"
" must be subset of \[.+\]",
ovr.partial_fit, X=X[7:], y=y1)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_
# function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# Decision function only estimator.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_false(hasattr(decision_only, 'predict_proba'))
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
assert_false(hasattr(decision_only, 'predict_proba'))
decision_only.fit(X_train, Y_train)
assert_false(hasattr(decision_only, 'predict_proba'))
assert_true(hasattr(decision_only, 'decision_function'))
# Estimator which can get predict_proba enabled after fitting
gs = GridSearchCV(svm.SVC(probability=False),
param_grid={'probability': [True]})
proba_after_fit = OneVsRestClassifier(gs)
assert_false(hasattr(proba_after_fit, 'predict_proba'))
proba_after_fit.fit(X_train, Y_train)
assert_true(hasattr(proba_after_fit, 'predict_proba'))
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# Decision function only estimator.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_false(hasattr(decision_only, 'predict_proba'))
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
iris_data_list = [list(a) for a in iris.data]
prediction_from_list = ovo.fit(iris_data_list,
list(iris.target)).predict(iris_data_list)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_partial_fit_predict():
X, y = shuffle(iris.data, iris.target)
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:100], y[:100], np.unique(y))
ovo1.partial_fit(X[100:], y[100:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
ovo2.fit(X, y)
pred2 = ovo2.predict(X)
assert_equal(len(ovo1.estimators_), n_classes * (n_classes - 1) / 2)
assert_greater(np.mean(y == pred1), 0.65)
assert_almost_equal(pred1, pred2)
# Test when mini-batches don't have all target classes
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovo1.partial_fit(iris.data[60:], iris.target[60:])
pred1 = ovo1.predict(iris.data)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred1, pred2)
assert_equal(len(ovo1.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred1), 0.65)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_pairwise_indices():
clf_precomputed = svm.SVC(kernel='precomputed')
X, y = iris.data, iris.target
ovr_false = OneVsOneClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
ovr_false.fit(linear_kernel, y)
n_estimators = len(ovr_false.estimators_)
precomputed_indices = ovr_false.pairwise_indices_
for idx in precomputed_indices:
assert_equal(idx.shape[0] * n_estimators / (n_estimators - 1),
linear_kernel.shape[0])
def test_pairwise_attribute():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC()
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
assert_false(ovr_false._pairwise)
ovr_true = MultiClassClassifier(clf_precomputed)
assert_true(ovr_true._pairwise)
def test_pairwise_cross_val_score():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC(kernel='linear')
X, y = iris.data, iris.target
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
ovr_true = MultiClassClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(ovr_true, linear_kernel, y)
score_linear = cross_val_score(ovr_false, X, y)
assert_array_equal(score_precomputed, score_linear)
|
bsd-3-clause
|
rvraghav93/scikit-learn
|
doc/datasets/mldata_fixture.py
|
367
|
1183
|
"""Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
|
bsd-3-clause
|
nicproulx/mne-python
|
mne/dipole.py
|
2
|
47344
|
"""Single-dipole functions and classes."""
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
from copy import deepcopy
from functools import partial
import re
import numpy as np
from scipy import linalg
from .cov import read_cov, _get_whitener_data
from .io.constants import FIFF
from .io.pick import pick_types, channel_type
from .io.proj import make_projector, _needs_eeg_average_ref_proj
from .bem import _fit_sphere
from .evoked import _read_evoked, _aspect_rev, _write_evokeds
from .transforms import (_print_coord_trans, _coord_frame_name,
apply_trans, invert_transform, Transform)
from .viz.evoked import _plot_evoked
from .forward._make_forward import (_get_trans, _setup_bem,
_prep_meg_channels, _prep_eeg_channels)
from .forward._compute_forward import (_compute_forwards_meeg,
_prep_field_computation)
from .externals.six import string_types
from .surface import (transform_surface_to, _normalize_vectors,
_get_ico_surface, _compute_nearest)
from .bem import _bem_find_surface, _bem_explain_surface
from .source_space import (_make_volume_source_space, SourceSpaces,
_points_outside_surface)
from .parallel import parallel_func
from .utils import (logger, verbose, _time_mask, warn, _check_fname,
check_fname, _pl)
class Dipole(object):
"""Dipole class for sequential dipole fits.
.. note:: This class should usually not be instantiated directly,
instead :func:`mne.read_dipole` should be used.
Used to store positions, orientations, amplitudes, times, goodness of fit
of dipoles, typically obtained with Neuromag/xfit, mne_dipole_fit
or certain inverse solvers. Note that dipole position vectors are given in
the head coordinate frame.
Parameters
----------
times : array, shape (n_dipoles,)
The time instants at which each dipole was fitted (sec).
pos : array, shape (n_dipoles, 3)
The dipoles positions (m) in head coordinates.
amplitude : array, shape (n_dipoles,)
The amplitude of the dipoles (Am).
ori : array, shape (n_dipoles, 3)
The dipole orientations (normalized to unit length).
gof : array, shape (n_dipoles,)
The goodness of fit.
name : str | None
Name of the dipole.
See Also
--------
read_dipole
DipoleFixed
Notes
-----
This class is for sequential dipole fits, where the position
changes as a function of time. For fixed dipole fits, where the
position is fixed as a function of time, use :class:`mne.DipoleFixed`.
"""
def __init__(self, times, pos, amplitude, ori, gof,
name=None): # noqa: D102
self.times = np.array(times)
self.pos = np.array(pos)
self.amplitude = np.array(amplitude)
self.ori = np.array(ori)
self.gof = np.array(gof)
self.name = name
def __repr__(self): # noqa: D105
s = "n_times : %s" % len(self.times)
s += ", tmin : %s" % np.min(self.times)
s += ", tmax : %s" % np.max(self.times)
return "<Dipole | %s>" % s
def save(self, fname):
"""Save dipole in a .dip file.
Parameters
----------
fname : str
The name of the .dip file.
"""
fmt = " %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.1f"
# NB CoordinateSystem is hard-coded as Head here
with open(fname, 'wb') as fid:
fid.write('# CoordinateSystem "Head"\n'.encode('utf-8'))
fid.write('# begin end X (mm) Y (mm) Z (mm)'
' Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm) g/%\n'
.encode('utf-8'))
t = self.times[:, np.newaxis] * 1000.
gof = self.gof[:, np.newaxis]
amp = 1e9 * self.amplitude[:, np.newaxis]
out = np.concatenate((t, t, self.pos / 1e-3, amp,
self.ori * amp, gof), axis=-1)
np.savetxt(fid, out, fmt=fmt)
if self.name is not None:
fid.write(('## Name "%s dipoles" Style "Dipoles"'
% self.name).encode('utf-8'))
def crop(self, tmin=None, tmax=None):
"""Crop data to a given time interval.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
"""
sfreq = None
if len(self.times) > 1:
sfreq = 1. / np.median(np.diff(self.times))
mask = _time_mask(self.times, tmin, tmax, sfreq=sfreq)
for attr in ('times', 'pos', 'gof', 'amplitude', 'ori'):
setattr(self, attr, getattr(self, attr)[mask])
def copy(self):
"""Copy the Dipoles object.
Returns
-------
dip : instance of Dipole
The copied dipole instance.
"""
return deepcopy(self)
@verbose
def plot_locations(self, trans, subject, subjects_dir=None,
bgcolor=(1, 1, 1), opacity=0.3,
brain_color=(1, 1, 0), fig_name=None,
fig_size=(600, 600), mode=None,
scale_factor=0.1e-1, colors=None,
coord_frame='mri', idx='gof',
show_all=True, ax=None, block=False,
show=True, verbose=None):
"""Plot dipole locations in 3d.
.. warning:: Using mode with option 'cone' or 'sphere' will be
deprecated in version 0.15.
Parameters
----------
trans : dict
The mri to head trans.
subject : str
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : None | str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
The default is None.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
fig_name : str
Mayavi figure name.
fig_size : tuple of length 2
Mayavi figure size.
mode : str
Should be ``'cone'`` or ``'sphere'`` or ``'orthoview'`` to specify
how the dipoles should be shown. If orthoview then matplotlib is
used otherwise it is mayavi.
.. versionadded:: 0.14.0
scale_factor : float
The scaling applied to amplitudes for the plot. Only applies for
modes ``cone`` and ``sphere``.
colors: list of colors | None
Color to plot with each dipole. If None default colors are used.
coord_frame : str
Coordinate frame to use, 'head' or 'mri'. Defaults to 'mri'.
.. versionadded:: 0.14.0
idx : int | 'gof' | 'amplitude'
Index of the initially plotted dipole. Can also be 'gof' to plot
the dipole with highest goodness of fit value or 'amplitude' to
plot the dipole with the highest amplitude. The dipoles can also be
browsed through using up/down arrow keys or mouse scroll. Defaults
to 'gof'. Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
show_all : bool
Whether to always plot all the dipoles. If True (default), the
active dipole is plotted as a red dot and it's location determines
the shown MRI slices. The the non-active dipoles are plotted as
small blue dots. If False, only the active dipole is plotted.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
ax : instance of matplotlib Axes3D | None
Axes to plot into. If None (default), axes will be created.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
block : bool
Whether to halt program execution until the figure is closed.
Defaults to False. Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
show : bool
Show figure if True. Defaults to True.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
Returns
-------
fig : instance of mlab.Figure or matplotlib Figure
The mayavi figure or matplotlib Figure.
Notes
-----
.. versionadded:: 0.9.0
"""
from .viz import plot_dipole_locations
dipoles = self
if mode in [None, 'cone', 'sphere']: # support old behavior
dipoles = []
for t in self.times:
dipoles.append(self.copy())
dipoles[-1].crop(t, t)
elif mode != 'orthoview':
raise ValueError("mode must be 'cone', 'sphere' or 'orthoview'. "
"Got %s." % mode)
return plot_dipole_locations(
dipoles, trans, subject, subjects_dir, bgcolor, opacity,
brain_color, fig_name, fig_size, mode, scale_factor,
colors, coord_frame, idx, show_all, ax, block, show)
def plot_amplitudes(self, color='k', show=True):
"""Plot the dipole amplitudes as a function of time.
Parameters
----------
color: matplotlib Color
Color to use for the trace.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
"""
from .viz import plot_dipole_amplitudes
return plot_dipole_amplitudes([self], [color], show)
def __getitem__(self, item):
"""Get a time slice.
Parameters
----------
item : array-like or slice
The slice of time points to use.
Returns
-------
dip : instance of Dipole
The sliced dipole.
"""
if isinstance(item, int): # make sure attributes stay 2d
item = [item]
selected_times = self.times[item].copy()
selected_pos = self.pos[item, :].copy()
selected_amplitude = self.amplitude[item].copy()
selected_ori = self.ori[item, :].copy()
selected_gof = self.gof[item].copy()
selected_name = self.name
return Dipole(
selected_times, selected_pos, selected_amplitude, selected_ori,
selected_gof, selected_name)
def __len__(self):
"""Return the number of dipoles.
Returns
-------
len : int
The number of dipoles.
Examples
--------
This can be used as::
>>> len(dipoles) # doctest: +SKIP
10
"""
return self.pos.shape[0]
def _read_dipole_fixed(fname):
"""Read a fixed dipole FIF file."""
logger.info('Reading %s ...' % fname)
info, nave, aspect_kind, first, last, comment, times, data = \
_read_evoked(fname)
return DipoleFixed(info, data, times, nave, aspect_kind, first, last,
comment)
class DipoleFixed(object):
"""Dipole class for fixed-position dipole fits.
.. note:: This class should usually not be instantiated directly,
instead :func:`mne.read_dipole` should be used.
Parameters
----------
info : instance of Info
The measurement info.
data : array, shape (n_channels, n_times)
The dipole data.
times : array, shape (n_times,)
The time points.
nave : int
Number of averages.
aspect_kind : int
The kind of data.
first : int
First sample.
last : int
Last sample.
comment : str
The dipole comment.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
See Also
--------
read_dipole
Dipole
Notes
-----
This class is for fixed-position dipole fits, where the position
(and maybe orientation) is static over time. For sequential dipole fits,
where the position can change a function of time, use :class:`mne.Dipole`.
.. versionadded:: 0.12
"""
@verbose
def __init__(self, info, data, times, nave, aspect_kind, first, last,
comment, verbose=None): # noqa: D102
self.info = info
self.nave = nave
self._aspect_kind = aspect_kind
self.kind = _aspect_rev.get(str(aspect_kind), 'Unknown')
self.first = first
self.last = last
self.comment = comment
self.times = times
self.data = data
self.verbose = verbose
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
@verbose
def save(self, fname, verbose=None):
"""Save dipole in a .fif file.
Parameters
----------
fname : str
The name of the .fif file. Must end with ``'.fif'`` or
``'.fif.gz'`` to make it explicit that the file contains
dipole information in FIF format.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
"""
check_fname(fname, 'DipoleFixed', ('-dip.fif', '-dip.fif.gz'),
('.fif', '.fif.gz'))
_write_evokeds(fname, self, check=False)
def plot(self, show=True):
"""Plot dipole data.
Parameters
----------
show : bool
Call pyplot.show() at the end or not.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure containing the time courses.
"""
return _plot_evoked(self, picks=None, exclude=(), unit=True, show=show,
ylim=None, xlim='tight', proj=False, hline=None,
units=None, scalings=None, titles=None, axes=None,
gfp=False, window_title=None, spatial_colors=False,
plot_type="butterfly", selectable=False)
# #############################################################################
# IO
@verbose
def read_dipole(fname, verbose=None):
"""Read .dip file from Neuromag/xfit or MNE.
Parameters
----------
fname : str
The name of the .dip or .fif file.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
dipole : instance of Dipole or DipoleFixed
The dipole.
See Also
--------
mne.Dipole
mne.DipoleFixed
"""
_check_fname(fname, overwrite='read', must_exist=True)
if fname.endswith('.fif') or fname.endswith('.fif.gz'):
return _read_dipole_fixed(fname)
else:
return _read_dipole_text(fname)
def _read_dipole_text(fname):
"""Read a dipole text file."""
# Figure out the special fields
need_header = True
def_line = name = None
# There is a bug in older np.loadtxt regarding skipping fields,
# so just read the data ourselves (need to get name and header anyway)
data = list()
with open(fname, 'r') as fid:
for line in fid:
if not (line.startswith('%') or line.startswith('#')):
need_header = False
data.append(line.strip().split())
else:
if need_header:
def_line = line
if line.startswith('##') or line.startswith('%%'):
m = re.search('Name "(.*) dipoles"', line)
if m:
name = m.group(1)
del line
data = np.atleast_2d(np.array(data, float))
if def_line is None:
raise IOError('Dipole text file is missing field definition '
'comment, cannot parse %s' % (fname,))
# actually parse the fields
def_line = def_line.lstrip('%').lstrip('#').strip()
# MNE writes it out differently than Elekta, let's standardize them...
fields = re.sub('([X|Y|Z] )\(mm\)', # "X (mm)", etc.
lambda match: match.group(1).strip() + '/mm', def_line)
fields = re.sub('\((.*?)\)', # "Q(nAm)", etc.
lambda match: '/' + match.group(1), fields)
fields = re.sub('(begin|end) ', # "begin" and "end" with no units
lambda match: match.group(1) + '/ms', fields)
fields = fields.lower().split()
used_fields = ('begin/ms',
'x/mm', 'y/mm', 'z/mm',
'q/nam',
'qx/nam', 'qy/nam', 'qz/nam',
'g/%')
missing_fields = sorted(set(used_fields) - set(fields))
if len(missing_fields) > 0:
raise RuntimeError('Could not find necessary fields in header: %s'
% (missing_fields,))
ignored_fields = sorted(set(fields) - set(used_fields) - set(['end/ms']))
if len(ignored_fields) > 0:
warn('Ignoring extra fields in dipole file: %s' % (ignored_fields,))
if len(fields) != data.shape[1]:
raise IOError('More data fields (%s) found than data columns (%s): %s'
% (len(fields), data.shape[1], fields))
logger.info("%d dipole(s) found" % len(data))
if 'end/ms' in fields:
if np.diff(data[:, [fields.index('begin/ms'),
fields.index('end/ms')]], 1, -1).any():
warn('begin and end fields differed, but only begin will be used '
'to store time values')
# Find the correct column in our data array, then scale to proper units
idx = [fields.index(field) for field in used_fields]
assert len(idx) == 9
times = data[:, idx[0]] / 1000.
pos = 1e-3 * data[:, idx[1:4]] # put data in meters
amplitude = data[:, idx[4]]
norm = amplitude.copy()
amplitude /= 1e9
norm[norm == 0] = 1
ori = data[:, idx[5:8]] / norm[:, np.newaxis]
gof = data[:, idx[8]]
return Dipole(times, pos, amplitude, ori, gof, name)
# #############################################################################
# Fitting
def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1):
"""Compute the forward solution and do other nice stuff."""
B = _compute_forwards_meeg(rr, fwd_data, n_jobs, verbose=False)
B = np.concatenate(B, axis=1)
B_orig = B.copy()
# Apply projection and whiten (cov has projections already)
B = np.dot(B, whitener.T)
# column normalization doesn't affect our fitting, so skip for now
# S = np.sum(B * B, axis=1) # across channels
# scales = np.repeat(3. / np.sqrt(np.sum(np.reshape(S, (len(rr), 3)),
# axis=1)), 3)
# B *= scales[:, np.newaxis]
scales = np.ones(3)
return B, B_orig, scales
def _make_guesses(surf_or_rad, r0, grid, exclude, mindist, n_jobs):
"""Make a guess space inside a sphere or BEM surface."""
if isinstance(surf_or_rad, dict):
surf = surf_or_rad
logger.info('Guess surface (%s) is in %s coordinates'
% (_bem_explain_surface(surf['id']),
_coord_frame_name(surf['coord_frame'])))
else:
radius = surf_or_rad[0]
logger.info('Making a spherical guess space with radius %7.1f mm...'
% (1000 * radius))
surf = _get_ico_surface(3)
_normalize_vectors(surf['rr'])
surf['rr'] *= radius
surf['rr'] += r0
logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid))
src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist,
do_neighbors=False, n_jobs=n_jobs)
# simplify the result to make things easier later
src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']],
nuse=src['nuse'], coord_frame=src['coord_frame'],
vertno=np.arange(src['nuse']))
return SourceSpaces([src])
def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None):
"""Calculate the residual sum of squares."""
if fwd_svd is None:
fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis, :])[0]
uu, sing, vv = linalg.svd(fwd, overwrite_a=True, full_matrices=False)
else:
uu, sing, vv = fwd_svd
gof = _dipole_gof(uu, sing, vv, B, B2)[0]
# mne-c uses fitness=B2-Bm2, but ours (1-gof) is just a normalized version
return 1. - gof
def _dipole_gof(uu, sing, vv, B, B2):
"""Calculate the goodness of fit from the forward SVD."""
ncomp = 3 if sing[2] / sing[0] > 0.2 else 2
one = np.dot(vv[:ncomp], B)
Bm2 = np.sum(one * one)
gof = Bm2 / B2
return gof, one
def _fit_Q(fwd_data, whitener, proj_op, B, B2, B_orig, rd, ori=None):
"""Fit the dipole moment once the location is known."""
if 'fwd' in fwd_data:
# should be a single precomputed "guess" (i.e., fixed position)
assert rd is None
fwd = fwd_data['fwd']
assert fwd.shape[0] == 3
fwd_orig = fwd_data['fwd_orig']
assert fwd_orig.shape[0] == 3
scales = fwd_data['scales']
assert scales.shape == (3,)
fwd_svd = fwd_data['fwd_svd'][0]
else:
fwd, fwd_orig, scales = _dipole_forwards(fwd_data, whitener,
rd[np.newaxis, :])
fwd_svd = None
if ori is None:
if fwd_svd is None:
fwd_svd = linalg.svd(fwd, full_matrices=False)
uu, sing, vv = fwd_svd
gof, one = _dipole_gof(uu, sing, vv, B, B2)
ncomp = len(one)
# Counteract the effect of column normalization
Q = scales[0] * np.sum(uu.T[:ncomp] *
(one / sing[:ncomp])[:, np.newaxis], axis=0)
else:
fwd = np.dot(ori[np.newaxis], fwd)
sing = np.linalg.norm(fwd)
one = np.dot(fwd / sing, B)
gof = (one * one)[0] / B2
Q = ori * (scales[0] * np.sum(one / sing))
B_residual = _compute_residual(proj_op, B_orig, fwd_orig, Q)
return Q, gof, B_residual
def _compute_residual(proj_op, B_orig, fwd_orig, Q):
"""Compute the residual."""
# apply the projector to both elements
return np.dot(proj_op, B_orig) - np.dot(np.dot(Q, fwd_orig), proj_op.T)
def _fit_dipoles(fun, min_dist_to_inner_skull, data, times, guess_rrs,
guess_data, fwd_data, whitener, proj_op, ori, n_jobs):
"""Fit a single dipole to the given whitened, projected data."""
from scipy.optimize import fmin_cobyla
parallel, p_fun, _ = parallel_func(fun, n_jobs)
# parallel over time points
res = parallel(p_fun(min_dist_to_inner_skull, B, t, guess_rrs,
guess_data, fwd_data, whitener, proj_op,
fmin_cobyla, ori)
for B, t in zip(data.T, times))
pos = np.array([r[0] for r in res])
amp = np.array([r[1] for r in res])
ori = np.array([r[2] for r in res])
gof = np.array([r[3] for r in res]) * 100 # convert to percentage
residual = np.array([r[4] for r in res]).T
return pos, amp, ori, gof, residual
'''Simplex code in case we ever want/need it for testing
def _make_tetra_simplex():
"""Make the initial tetrahedron"""
#
# For this definition of a regular tetrahedron, see
#
# http://mathworld.wolfram.com/Tetrahedron.html
#
x = np.sqrt(3.0) / 3.0
r = np.sqrt(6.0) / 12.0
R = 3 * r
d = x / 2.0
simplex = 1e-2 * np.array([[x, 0.0, -r],
[-d, 0.5, -r],
[-d, -0.5, -r],
[0., 0., R]])
return simplex
def try_(p, y, psum, ndim, fun, ihi, neval, fac):
"""Helper to try a value"""
ptry = np.empty(ndim)
fac1 = (1.0 - fac) / ndim
fac2 = fac1 - fac
ptry = psum * fac1 - p[ihi] * fac2
ytry = fun(ptry)
neval += 1
if ytry < y[ihi]:
y[ihi] = ytry
psum[:] += ptry - p[ihi]
p[ihi] = ptry
return ytry, neval
def _simplex_minimize(p, ftol, stol, fun, max_eval=1000):
"""Minimization with the simplex algorithm
Modified from Numerical recipes"""
y = np.array([fun(s) for s in p])
ndim = p.shape[1]
assert p.shape[0] == ndim + 1
mpts = ndim + 1
neval = 0
psum = p.sum(axis=0)
loop = 1
while(True):
ilo = 1
if y[1] > y[2]:
ihi = 1
inhi = 2
else:
ihi = 2
inhi = 1
for i in range(mpts):
if y[i] < y[ilo]:
ilo = i
if y[i] > y[ihi]:
inhi = ihi
ihi = i
elif y[i] > y[inhi]:
if i != ihi:
inhi = i
rtol = 2 * np.abs(y[ihi] - y[ilo]) / (np.abs(y[ihi]) + np.abs(y[ilo]))
if rtol < ftol:
break
if neval >= max_eval:
raise RuntimeError('Maximum number of evaluations exceeded.')
if stol > 0: # Has the simplex collapsed?
dsum = np.sqrt(np.sum((p[ilo] - p[ihi]) ** 2))
if loop > 5 and dsum < stol:
break
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, -1.)
if ytry <= y[ilo]:
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 2.)
elif ytry >= y[inhi]:
ysave = y[ihi]
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 0.5)
if ytry >= ysave:
for i in range(mpts):
if i != ilo:
psum[:] = 0.5 * (p[i] + p[ilo])
p[i] = psum
y[i] = fun(psum)
neval += ndim
psum = p.sum(axis=0)
loop += 1
'''
def _surface_constraint(rd, surf, min_dist_to_inner_skull):
"""Surface fitting constraint."""
dist = _compute_nearest(surf['rr'], rd[np.newaxis, :],
return_dists=True)[1][0]
if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]:
dist *= -1.
# Once we know the dipole is below the inner skull,
# let's check if its distance to the inner skull is at least
# min_dist_to_inner_skull. This can be enforced by adding a
# constrain proportional to its distance.
dist -= min_dist_to_inner_skull
return dist
def _sphere_constraint(rd, r0, R_adj):
"""Sphere fitting constraint."""
return R_adj - np.sqrt(np.sum((rd - r0) ** 2))
def _fit_dipole(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_data, fwd_data, whitener, proj_op,
fmin_cobyla, ori):
"""Fit a single bit of data."""
B = np.dot(whitener, B_orig)
# make constraint function to keep the solver within the inner skull
if isinstance(fwd_data['inner_skull'], dict): # bem
surf = fwd_data['inner_skull']
constraint = partial(_surface_constraint, surf=surf,
min_dist_to_inner_skull=min_dist_to_inner_skull)
else: # sphere
surf = None
R, r0 = fwd_data['inner_skull']
constraint = partial(_sphere_constraint, r0=r0,
R_adj=R - min_dist_to_inner_skull)
del R, r0
# Find a good starting point (find_best_guess in C)
B2 = np.dot(B, B)
if B2 == 0:
warn('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0, B
idx = np.argmin([_fit_eval(guess_rrs[[fi], :], B, B2, fwd_svd)
for fi, fwd_svd in enumerate(guess_data['fwd_svd'])])
x0 = guess_rrs[idx]
fun = partial(_fit_eval, B=B, B2=B2, fwd_data=fwd_data, whitener=whitener)
# Tested minimizers:
# Simplex, BFGS, CG, COBYLA, L-BFGS-B, Powell, SLSQP, TNC
# Several were similar, but COBYLA won for having a handy constraint
# function we can use to ensure we stay inside the inner skull /
# smallest sphere
rd_final = fmin_cobyla(fun, x0, (constraint,), consargs=(),
rhobeg=5e-2, rhoend=5e-5, disp=False)
# simplex = _make_tetra_simplex() + x0
# _simplex_minimize(simplex, 1e-4, 2e-4, fun)
# rd_final = simplex[0]
# Compute the dipole moment at the final point
Q, gof, residual = _fit_Q(fwd_data, whitener, proj_op, B, B2, B_orig,
rd_final, ori=ori)
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
msg = '---- Fitted : %7.1f ms' % (1000. * t)
if surf is not None:
dist_to_inner_skull = _compute_nearest(
surf['rr'], rd_final[np.newaxis, :], return_dists=True)[1][0]
msg += (", distance to inner skull : %2.4f mm"
% (dist_to_inner_skull * 1000.))
logger.info(msg)
return rd_final, amp, ori, gof, residual
def _fit_dipole_fixed(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_data, fwd_data, whitener, proj_op,
fmin_cobyla, ori):
"""Fit a data using a fixed position."""
B = np.dot(whitener, B_orig)
B2 = np.dot(B, B)
if B2 == 0:
warn('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0
# Compute the dipole moment
Q, gof, residual = _fit_Q(guess_data, whitener, proj_op, B, B2, B_orig,
rd=None, ori=ori)
if ori is None:
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
else:
amp = np.dot(Q, ori)
# No corresponding 'logger' message here because it should go *very* fast
return guess_rrs[0], amp, ori, gof, residual
@verbose
def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1,
pos=None, ori=None, verbose=None):
"""Fit a dipole.
Parameters
----------
evoked : instance of Evoked
The dataset to fit.
cov : str | instance of Covariance
The noise covariance.
bem : str | instance of ConductorModel
The BEM filename (str) or conductor model.
trans : str | None
The head<->MRI transform filename. Must be provided unless BEM
is a sphere model.
min_dist : float
Minimum distance (in milimeters) from the dipole to the inner skull.
Must be positive. Note that because this is a constraint passed to
a solver it is not strict but close, i.e. for a ``min_dist=5.`` the
fits could be 4.9 mm from the inner skull.
n_jobs : int
Number of jobs to run in parallel (used in field computation
and fitting).
pos : ndarray, shape (3,) | None
Position of the dipole to use. If None (default), sequential
fitting (different position and orientation for each time instance)
is performed. If a position (in head coords) is given as an array,
the position is fixed during fitting.
.. versionadded:: 0.12
ori : ndarray, shape (3,) | None
Orientation of the dipole to use. If None (default), the
orientation is free to change as a function of time. If an
orientation (in head coordinates) is given as an array, ``pos``
must also be provided, and the routine computes the amplitude and
goodness of fit of the dipole at the given position and orientation
for each time instant.
.. versionadded:: 0.12
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
dip : instance of Dipole or DipoleFixed
The dipole fits. A :class:`mne.DipoleFixed` is returned if
``pos`` and ``ori`` are both not None.
residual : ndarray, shape (n_meeg_channels, n_times)
The good M-EEG data channels with the fitted dipolar activity
removed.
See Also
--------
mne.beamformer.rap_music
Notes
-----
.. versionadded:: 0.9.0
"""
# This could eventually be adapted to work with other inputs, these
# are what is needed:
evoked = evoked.copy()
# Determine if a list of projectors has an average EEG ref
if _needs_eeg_average_ref_proj(evoked.info):
raise ValueError('EEG average reference is mandatory for dipole '
'fitting.')
if min_dist < 0:
raise ValueError('min_dist should be positive. Got %s' % min_dist)
if ori is not None and pos is None:
raise ValueError('pos must be provided if ori is not None')
data = evoked.data
if not np.isfinite(data).all():
raise ValueError('Evoked data must be finite')
info = evoked.info
times = evoked.times.copy()
comment = evoked.comment
# Convert the min_dist to meters
min_dist_to_inner_skull = min_dist / 1000.
del min_dist
# Figure out our inputs
neeg = len(pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=[]))
if isinstance(bem, string_types):
bem_extra = bem
else:
bem_extra = repr(bem)
logger.info('BEM : %s' % bem_extra)
if trans is not None:
logger.info('MRI transform : %s' % trans)
mri_head_t, trans = _get_trans(trans)
else:
mri_head_t = Transform('head', 'mri')
bem = _setup_bem(bem, bem_extra, neeg, mri_head_t, verbose=False)
if not bem['is_sphere']:
if trans is None:
raise ValueError('mri must not be None if BEM is provided')
# Find the best-fitting sphere
inner_skull = _bem_find_surface(bem, 'inner_skull')
inner_skull = inner_skull.copy()
R, r0 = _fit_sphere(inner_skull['rr'], disp=False)
# r0 back to head frame for logging
r0 = apply_trans(mri_head_t['trans'], r0[np.newaxis, :])[0]
logger.info('Head origin : '
'%6.1f %6.1f %6.1f mm rad = %6.1f mm.'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], 1000 * R))
else:
r0 = bem['r0']
if len(bem.get('layers', [])) > 0:
R = bem['layers'][0]['rad']
kind = 'rad'
else: # MEG-only
# Use the minimum distance to the MEG sensors as the radius then
R = np.dot(linalg.inv(info['dev_head_t']['trans']),
np.hstack([r0, [1.]]))[:3] # r0 -> device
R = R - [info['chs'][pick]['loc'][:3]
for pick in pick_types(info, meg=True, exclude=[])]
if len(R) == 0:
raise RuntimeError('No MEG channels found, but MEG-only '
'sphere model used')
R = np.min(np.sqrt(np.sum(R * R, axis=1))) # use dist to sensors
kind = 'max_rad'
logger.info('Sphere model : origin at (% 7.2f % 7.2f % 7.2f) mm, '
'%s = %6.1f mm'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], kind, R))
inner_skull = [R, r0] # NB sphere model defined in head frame
r0_mri = apply_trans(invert_transform(mri_head_t)['trans'],
r0[np.newaxis, :])[0]
accurate = False # can be an option later (shouldn't make big diff)
# Deal with DipoleFixed cases here
if pos is not None:
fixed_position = True
pos = np.array(pos, float)
if pos.shape != (3,):
raise ValueError('pos must be None or a 3-element array-like,'
' got %s' % (pos,))
logger.info('Fixed position : %6.1f %6.1f %6.1f mm'
% tuple(1000 * pos))
if ori is not None:
ori = np.array(ori, float)
if ori.shape != (3,):
raise ValueError('oris must be None or a 3-element array-like,'
' got %s' % (ori,))
norm = np.sqrt(np.sum(ori * ori))
if not np.isclose(norm, 1):
raise ValueError('ori must be a unit vector, got length %s'
% (norm,))
logger.info('Fixed orientation : %6.4f %6.4f %6.4f mm'
% tuple(ori))
else:
logger.info('Free orientation : <time-varying>')
fit_n_jobs = 1 # only use 1 job to do the guess fitting
else:
fixed_position = False
# Eventually these could be parameters, but they are just used for
# the initial grid anyway
guess_grid = 0.02 # MNE-C uses 0.01, but this is faster w/similar perf
guess_mindist = max(0.005, min_dist_to_inner_skull)
guess_exclude = 0.02
logger.info('Guess grid : %6.1f mm' % (1000 * guess_grid,))
if guess_mindist > 0.0:
logger.info('Guess mindist : %6.1f mm'
% (1000 * guess_mindist,))
if guess_exclude > 0:
logger.info('Guess exclude : %6.1f mm'
% (1000 * guess_exclude,))
logger.info('Using %s MEG coil definitions.'
% ("accurate" if accurate else "standard"))
fit_n_jobs = n_jobs
if isinstance(cov, string_types):
logger.info('Noise covariance : %s' % (cov,))
cov = read_cov(cov, verbose=False)
logger.info('')
_print_coord_trans(mri_head_t)
_print_coord_trans(info['dev_head_t'])
logger.info('%d bad channels total' % len(info['bads']))
# Forward model setup (setup_forward_model from setup.c)
ch_types = [channel_type(info, idx) for idx in range(info['nchan'])]
megcoils, compcoils, megnames, meg_info = [], [], [], None
eegels, eegnames = [], []
if 'grad' in ch_types or 'mag' in ch_types:
megcoils, compcoils, megnames, meg_info = \
_prep_meg_channels(info, exclude='bads',
accurate=accurate, verbose=verbose)
if 'eeg' in ch_types:
eegels, eegnames = _prep_eeg_channels(info, exclude='bads',
verbose=verbose)
# Ensure that MEG and/or EEG channels are present
if len(megcoils + eegels) == 0:
raise RuntimeError('No MEG or EEG channels found.')
# Whitener for the data
logger.info('Decomposing the sensor noise covariance matrix...')
picks = pick_types(info, meg=True, eeg=True, ref_meg=False)
# In case we want to more closely match MNE-C for debugging:
# from .io.pick import pick_info
# from .cov import prepare_noise_cov
# info_nb = pick_info(info, picks)
# cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False)
# nzero = (cov['eig'] > 0)
# n_chan = len(info_nb['ch_names'])
# whitener = np.zeros((n_chan, n_chan), dtype=np.float)
# whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero])
# whitener = np.dot(whitener, cov['eigvec'])
whitener = _get_whitener_data(info, cov, picks, verbose=False)
# Proceed to computing the fits (make_guess_data)
if fixed_position:
guess_src = dict(nuse=1, rr=pos[np.newaxis], inuse=np.array([True]))
logger.info('Compute forward for dipole location...')
else:
logger.info('\n---- Computing the forward solution for the guesses...')
guess_src = _make_guesses(inner_skull, r0_mri,
guess_grid, guess_exclude, guess_mindist,
n_jobs=n_jobs)[0]
# grid coordinates go from mri to head frame
transform_surface_to(guess_src, 'head', mri_head_t)
logger.info('Go through all guess source locations...')
# inner_skull goes from mri to head frame
if isinstance(inner_skull, dict):
transform_surface_to(inner_skull, 'head', mri_head_t)
if fixed_position:
if isinstance(inner_skull, dict):
check = _surface_constraint(pos, inner_skull,
min_dist_to_inner_skull)
else:
check = _sphere_constraint(pos, r0,
R_adj=R - min_dist_to_inner_skull)
if check <= 0:
raise ValueError('fixed position is %0.1fmm outside the inner '
'skull boundary' % (-1000 * check,))
# C code computes guesses w/sphere model for speed, don't bother here
fwd_data = dict(coils_list=[megcoils, eegels], infos=[meg_info, None],
ccoils_list=[compcoils, None], coil_types=['meg', 'eeg'],
inner_skull=inner_skull)
# fwd_data['inner_skull'] in head frame, bem in mri, confusing...
_prep_field_computation(guess_src['rr'], bem, fwd_data, n_jobs,
verbose=False)
guess_fwd, guess_fwd_orig, guess_fwd_scales = _dipole_forwards(
fwd_data, whitener, guess_src['rr'], n_jobs=fit_n_jobs)
# decompose ahead of time
guess_fwd_svd = [linalg.svd(fwd, overwrite_a=False, full_matrices=False)
for fwd in np.array_split(guess_fwd,
len(guess_src['rr']))]
guess_data = dict(fwd=guess_fwd, fwd_svd=guess_fwd_svd,
fwd_orig=guess_fwd_orig, scales=guess_fwd_scales)
del guess_fwd, guess_fwd_svd, guess_fwd_orig, guess_fwd_scales # destroyed
logger.info('[done %d source%s]' % (guess_src['nuse'],
_pl(guess_src['nuse'])))
# Do actual fits
data = data[picks]
ch_names = [info['ch_names'][p] for p in picks]
proj_op = make_projector(info['projs'], ch_names, info['bads'])[0]
fun = _fit_dipole_fixed if fixed_position else _fit_dipole
out = _fit_dipoles(
fun, min_dist_to_inner_skull, data, times, guess_src['rr'],
guess_data, fwd_data, whitener, proj_op, ori, n_jobs)
if fixed_position and ori is not None:
# DipoleFixed
data = np.array([out[1], out[3]])
out_info = deepcopy(info)
loc = np.concatenate([pos, ori, np.zeros(6)])
out_info['chs'] = [
dict(ch_name='dip 01', loc=loc, kind=FIFF.FIFFV_DIPOLE_WAVE,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN, unit=FIFF.FIFF_UNIT_AM,
coil_type=FIFF.FIFFV_COIL_DIPOLE,
unit_mul=0, range=1, cal=1., scanno=1, logno=1),
dict(ch_name='goodness', loc=np.zeros(12),
kind=FIFF.FIFFV_GOODNESS_FIT, unit=FIFF.FIFF_UNIT_AM,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
coil_type=FIFF.FIFFV_COIL_NONE,
unit_mul=0, range=1., cal=1., scanno=2, logno=100)]
for key in ['hpi_meas', 'hpi_results', 'projs']:
out_info[key] = list()
for key in ['acq_pars', 'acq_stim', 'description', 'dig',
'experimenter', 'hpi_subsystem', 'proj_id', 'proj_name',
'subject_info']:
out_info[key] = None
out_info._update_redundant()
out_info._check_consistency()
dipoles = DipoleFixed(out_info, data, times, evoked.nave,
evoked._aspect_kind, evoked.first, evoked.last,
comment)
else:
dipoles = Dipole(times, out[0], out[1], out[2], out[3], comment)
residual = out[4]
logger.info('%d time points fitted' % len(dipoles.times))
return dipoles, residual
def get_phantom_dipoles(kind='vectorview'):
"""Get standard phantom dipole locations and orientations.
Parameters
----------
kind : str
Get the information for the given system:
``vectorview`` (default)
The Neuromag VectorView phantom.
``otaniemi``
The older Neuromag phantom used at Otaniemi.
Returns
-------
pos : ndarray, shape (n_dipoles, 3)
The dipole positions.
ori : ndarray, shape (n_dipoles, 3)
The dipole orientations.
Notes
-----
The Elekta phantoms have a radius of 79.5mm, and HPI coil locations
in the XY-plane at the axis extrema (e.g., (79.5, 0), (0, -79.5), ...).
"""
_valid_types = ('vectorview', 'otaniemi')
if not isinstance(kind, string_types) or kind not in _valid_types:
raise ValueError('kind must be one of %s, got %s'
% (_valid_types, kind,))
if kind == 'vectorview':
# these values were pulled from a scanned image provided by
# Elekta folks
a = np.array([59.7, 48.6, 35.8, 24.8, 37.2, 27.5, 15.8, 7.9])
b = np.array([46.1, 41.9, 38.3, 31.5, 13.9, 16.2, 20.0, 19.3])
x = np.concatenate((a, [0] * 8, -b, [0] * 8))
y = np.concatenate(([0] * 8, -a, [0] * 8, b))
c = [22.9, 23.5, 25.5, 23.1, 52.0, 46.4, 41.0, 33.0]
d = [44.4, 34.0, 21.6, 12.7, 62.4, 51.5, 39.1, 27.9]
z = np.concatenate((c, c, d, d))
elif kind == 'otaniemi':
# these values were pulled from an Neuromag manual
# (NM20456A, 13.7.1999, p.65)
a = np.array([56.3, 47.6, 39.0, 30.3])
b = np.array([32.5, 27.5, 22.5, 17.5])
c = np.zeros(4)
x = np.concatenate((a, b, c, c, -a, -b, c, c))
y = np.concatenate((c, c, -a, -b, c, c, b, a))
z = np.concatenate((b, a, b, a, b, a, a, b))
pos = np.vstack((x, y, z)).T / 1000.
# Locs are always in XZ or YZ, and so are the oris. The oris are
# also in the same plane and tangential, so it's easy to determine
# the orientation.
ori = list()
for this_pos in pos:
this_ori = np.zeros(3)
idx = np.where(this_pos == 0)[0]
# assert len(idx) == 1
idx = np.setdiff1d(np.arange(3), idx[0])
this_ori[idx] = (this_pos[idx][::-1] /
np.linalg.norm(this_pos[idx])) * [1, -1]
# Now we have this quality, which we could uncomment to
# double-check:
# np.testing.assert_allclose(np.dot(this_ori, this_pos) /
# np.linalg.norm(this_pos), 0,
# atol=1e-15)
ori.append(this_ori)
ori = np.array(ori)
return pos, ori
def _concatenate_dipoles(dipoles):
"""Concatenate a list of dipoles."""
times, pos, amplitude, ori, gof = [], [], [], [], []
for dipole in dipoles:
times.append(dipole.times)
pos.append(dipole.pos)
amplitude.append(dipole.amplitude)
ori.append(dipole.ori)
gof.append(dipole.gof)
return Dipole(np.concatenate(times), np.concatenate(pos),
np.concatenate(amplitude), np.concatenate(ori),
np.concatenate(gof), name=None)
|
bsd-3-clause
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-pymatgen/package.py
|
5
|
1650
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPymatgen(PythonPackage):
"""Python Materials Genomics is a robust materials analysis code that
defines core object representations for structures and molecules with
support for many electronic structure codes. It is currently the core
analysis code powering the Materials Project."""
homepage = "http://www.pymatgen.org/"
url = "https://pypi.io/packages/source/p/pymatgen/pymatgen-4.7.2.tar.gz"
version('4.7.2', sha256='e439b78cc3833a03963c3c3efe349d8a0e52a1550c8a05c56a89aa1b86657436')
version('4.6.2', sha256='f34349090c6f604f7d402cb09cd486830b38523639d7160d7fd282d504036a0e')
extends('python', ignore='bin/tabulate')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
depends_on('py-requests', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-tabulate', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
|
lgpl-2.1
|
zhuangjun1981/retinotopic_mapping
|
retinotopic_mapping/test/test_StimulusRoutines.py
|
1
|
39611
|
import os
import unittest
import retinotopic_mapping.StimulusRoutines as sr
class TestSimulation(unittest.TestCase):
def setUp(self):
import retinotopic_mapping.MonitorSetup as ms
# Setup monitor/indicator objects
self.monitor = ms.Monitor(resolution=(1200,1600), dis=15.,
mon_width_cm=40., mon_height_cm=30.,
C2T_cm=15.,C2A_cm=20., center_coordinates=(0., 60.),
downsample_rate=10)
# import matplotlib.pyplot as plt
# self.monitor.plot_map()
# plt.show()
self.indicator = ms.Indicator(self.monitor, width_cm = 3., height_cm = 3., position = 'northeast',
is_sync = True, freq = 1.)
self.curr_folder = os.path.dirname(os.path.realpath(__file__))
def test_blur_cos(self):
import numpy as np
dis = np.arange(10, 30, 0.1) - 20.
sigma = 10.
blurred = sr.blur_cos(dis=dis, sigma=sigma)
# import matplotlib.pyplot as plt
# plt.plot(dis, blurred)
# plt.show()
# print blurred[50]
# print blurred[100]
assert (np.array_equal(blurred[0:50], np.ones((50,))))
assert ((blurred[100] - 0.5) < 1E-10)
assert (np.array_equal(blurred[150:200], np.zeros((50,))))
def test_get_circle_mask(self):
mask = sr.get_circle_mask(map_alt=self.monitor.deg_coord_y, map_azi=self.monitor.deg_coord_x,
center=(10., 60.), radius=20., is_smooth_edge=True,
blur_ratio=0.5, blur_func=sr.blur_cos, is_plot=False)
# print mask[39, 100]
assert (mask[39, 100] - 0.404847 < 1E10)
def test_get_circle_mask2(self):
import numpy as np
alt = np.arange(-30., 30., 1.)[::-1]
azi = np.arange(-30., 30., 1.)
azi_map, alt_map = np.meshgrid(azi, alt)
cm = sr.get_circle_mask(map_alt=alt_map, map_azi=azi_map, center=(0., 10.), radius=10.,
is_smooth_edge=False)
# import matplotlib.pyplot as plt
# plt.imshow(cm)
# plt.show()
assert (cm[28, 49] == 1)
cm = sr.get_circle_mask(map_alt=alt_map, map_azi=azi_map, center=(10., 0.), radius=10.,
is_smooth_edge=False)
# import matplotlib.pyplot as plt
# plt.imshow(cm)
# plt.show()
assert (cm[10, 30] == 1)
def test_get_warped_probes(self):
import numpy as np
azis = np.arange(0, 10, 0.1)
alts = np.arange(30, 40, 0.1)[::-1]
coord_azi, coord_alt = np.meshgrid(azis, alts)
probes = ([32., 5., 1.],)
frame = sr.get_warped_probes(deg_coord_alt=coord_alt, deg_coord_azi=coord_azi,
probes=probes, width=0.5,
height=1., ori=0., background_color=0.)
# import matplotlib.pyplot as plt
# plt.imshow(frame)
# plt.show()
assert (frame[75, 51] == 1)
frame = sr.get_warped_probes(deg_coord_alt=coord_alt, deg_coord_azi=coord_azi,
probes=probes, width=0.5,
height=1., ori=30., background_color=0.)
assert (frame[76, 47] == 1)
assert (frame[81, 53] == 1)
def test_get_grating(self):
import numpy as np
alt = np.arange(-30., 30., 1.)[::-1]
azi = np.arange(-30., 30., 1.)
azi_map, alt_map = np.meshgrid(azi, alt)
grating = sr.get_grating(alt_map=alt_map, azi_map=azi_map, dire=315.,
spatial_freq=0.04, center=(0., 0.), phase=0.,
contrast=1.)
assert (grating[34, 29] < 0.827)
assert (grating[34, 29] > 0.825)
# import matplotlib.pyplot as plt
# f, (ax) = plt.subplots(1)
# ax.imshow(grating, cmap='gray')
# plt.show()
def test_get_grid_locations(self):
monitor_azi = self.monitor.deg_coord_x
monitor_alt = self.monitor.deg_coord_y
grid_locs = sr.get_grid_locations(subregion=[-20., -10., 30., 90.], grid_space=[10., 10.],
monitor_azi=monitor_azi, monitor_alt=monitor_alt,
is_include_edge=True, is_plot=False)
assert (len(grid_locs) == 14)
def test_UC_generate_movie_by_index(self):
# Setup Uniform Contrast Objects
uc = sr.UniformContrast(monitor=self.monitor, indicator=self.indicator, duration=0.1,
color=1., pregap_dur=1., postgap_dur=1.5, background=0.,
coordinate='degree')
uc_full_seq, uc_full_dict = uc.generate_movie_by_index()
assert (uc_full_seq.shape == (2, 120, 160))
assert (len(uc_full_dict['stimulation']['index_to_display']) == 156)
frames_unique = uc_full_dict['stimulation']['frames_unique']
all_frames = []
for ind in uc_full_dict['stimulation']['index_to_display']:
all_frames.append(frames_unique[ind])
# Parameters defining where the frame blocks should start and end
ref_rate = self.monitor.refresh_rate
pregap_end = uc.pregap_frame_num
on_end = pregap_end + int(uc.duration*ref_rate)
postgap_end = on_end + uc.postgap_frame_num
for i in range(pregap_end):
assert (all_frames[i] == (0., -1.))
for i in range(pregap_end, on_end):
assert (all_frames[i] == (1., 1.))
for i in range(on_end, postgap_end):
assert (all_frames[i] == (0., -1.))
def test_FC_generate_movie_by_index(self):
fc = sr.FlashingCircle(monitor=self.monitor,
indicator=self.indicator,
center=(10., 90.), flash_frame_num=30,
color=-1., pregap_dur=0.5, postgap_dur=1.2,
background=1., coordinate='degree',
midgap_dur=1., iteration=3)
fc_full_seq, fc_full_dict = fc.generate_movie_by_index()
assert (fc_full_seq.shape == (2, 120, 160))
# print len(fc_full_dict['stimulation']['index_to_display'])
assert (len(fc_full_dict['stimulation']['index_to_display']) == 312)
frames_unique = fc_full_dict['stimulation']['frames_unique']
frames = []
for ind in fc_full_dict['stimulation']['index_to_display']:
frames.append(frames_unique[ind])
# Parameters defining where the frame blocks should start and end
flashing_end = fc.pregap_frame_num + fc.flash_frame_num
midgap_end = flashing_end + fc.midgap_frame_num
next_flash_end = midgap_end + fc.flash_frame_num
for i in range(fc.pregap_frame_num):
assert (frames[i] == (0, -1.))
for i in range(fc.pregap_frame_num, flashing_end):
assert (frames[i] == (1, 1.))
for i in range(flashing_end, midgap_end):
assert (frames[i] == (0., -1.))
for i in range(midgap_end, next_flash_end):
assert (frames[i] == (1, 1.))
assert (fc_full_seq[1, 39, 124] == -1)
# import matplotlib.pyplot as plt
# f, (ax) = plt.subplots(1)
# ax.imshow(fc_full_seq[1])
# plt.show()
def test_FC_generate_movie(self):
fc = sr.FlashingCircle(monitor=self.monitor,
indicator=self.indicator,
center=(10., 90.), flash_frame_num=30,
color=-1., pregap_dur=0.1, postgap_dur=1.0,
background=1., coordinate='degree',
midgap_dur=0.5, iteration=10)
fc_full_seq, fc_full_dict = fc.generate_movie()
assert (fc_full_seq.shape == (636, 120, 160))
assert (len(fc_full_dict['stimulation']['frames']) == 636)
frames = fc_full_dict['stimulation']['frames']
# print frames
# Parameters defining where the frame blocks should start and end
flashing_end = fc.pregap_frame_num + fc.flash_frame_num
midgap_end = flashing_end + fc.midgap_frame_num
next_flash_end = midgap_end + fc.flash_frame_num
for i in range(fc.pregap_frame_num):
assert (frames[i] == (0, -1.))
for i in range(fc.pregap_frame_num, flashing_end):
assert (frames[i] == (1, 1.))
for i in range(flashing_end, midgap_end):
assert (frames[i] == (0., -1.))
for i in range(midgap_end, next_flash_end):
assert (frames[i] == (1, 1.))
assert (fc_full_seq[6, 39, 124] == -1.)
# import matplotlib.pyplot as plt
# f, (ax) = plt.subplots(1)
# ax.imshow(fc_full_seq[6])
# plt.show()
def test_SN_generate_display_index(self):
sn = sr.SparseNoise(monitor=self.monitor, indicator=self.indicator,
background=0., coordinate='degree', grid_space=(10.,10.),
probe_size=(10.,10.), probe_orientation=0., probe_frame_num=6,
subregion=[10, 20, 0., 60.], sign='ON', iteration=1, pregap_dur=0.1,
postgap_dur=0.2, is_include_edge=True)
frames_unique, index_to_display = sn._generate_display_index()
for frame in frames_unique:
assert (len(frame) == 4)
# print '\n'.join([str(f) for f in frames_unique])
# print index_to_display
assert (index_to_display[:6] == [0, 0, 0, 0, 0, 0])
assert (index_to_display[-12:] == [0] * 12)
# print max(index_to_display)
# print len(frames_unique)
assert (max(index_to_display) == len(frames_unique) -1)
probe_num = (len(index_to_display) - 18) / 6
for probe_ind in range(probe_num):
assert (len(set(index_to_display[6 + probe_ind * 6: 9 + probe_ind * 6])) == 1)
assert (len(set(index_to_display[9 + probe_ind * 6: 12 + probe_ind * 6])) == 1)
assert (index_to_display[9 + probe_ind * 6] - index_to_display[8 + probe_ind * 6] == 1)
def test_SN_get_probe_index_for_one_iter_on_off(self):
import numpy as np
sn = sr.SparseNoise(monitor=self.monitor, indicator=self.indicator,
background=0., coordinate='degree', grid_space=(5., 5.),
probe_size=(5., 5.), probe_orientation=0., probe_frame_num=6,
subregion=[-30, 30, -10., 90.], sign='ON-OFF', iteration=2)
frames_unique = sn._generate_frames_for_index_display()
probe_ind = sn._get_probe_index_for_one_iter_on_off(frames_unique)
for j in range(len(probe_ind) - 1):
probe_loc_0 = frames_unique[probe_ind[j]]
probe_loc_1 = frames_unique[probe_ind[j + 1]]
assert(not np.array_equal(probe_loc_0, probe_loc_1))
def test_SN_generate_display_index2(self):
import numpy as np
sn = sr.SparseNoise(monitor=self.monitor, indicator=self.indicator,
background=0., coordinate='degree', grid_space=(10., 10.),
probe_size=(10., 10.), probe_orientation=0., probe_frame_num=8,
subregion=[-10, 10, 45., 55.], sign='ON-OFF', iteration=2,
pregap_dur=0.5, postgap_dur=0.3, is_include_edge=True)
frames_unique, index_to_display = sn._generate_display_index()
for frame in frames_unique:
assert (len(frame) == 4)
assert (index_to_display[:30] == [0] * 30)
assert (index_to_display[-18:] == [0] * 18)
assert (max(index_to_display) == len(frames_unique) - 1)
# frame_num_iter = (len(index_to_display) - 18 - 30) / 2
assert ((len(index_to_display) - 48) % (8 * 2) == 0)
probe_num = (len(index_to_display) - 48) / (8 * 2)
for probe_ind in range(probe_num):
assert (len(set(index_to_display[30 + probe_ind * 8: 34 + probe_ind * 8])) == 1)
assert (len(set(index_to_display[34 + probe_ind * 8: 38 + probe_ind * 8])) == 1)
assert (np.array_equal(frames_unique[index_to_display[33 + probe_ind * 8]][1],
frames_unique[index_to_display[34 + probe_ind * 8]][1]))
def test_SN_generate_movie_by_index(self):
sn = sr.SparseNoise(monitor=self.monitor, indicator=self.indicator,
background=0., coordinate='degree', grid_space=(10., 10.),
probe_size=(10., 10.), probe_orientation=0., probe_frame_num=6,
subregion=[-20., -10., 30., 90.], sign='ON', iteration=1, pregap_dur=0.1,
postgap_dur=0.2, is_include_edge=True)
mov_unique, _ = sn.generate_movie_by_index()
import numpy as np
# import matplotlib.pyplot as plt
# plt.imshow(np.max(mov_unique, axis=0))
# plt.show()
assert (np.max(mov_unique, axis=0)[66, 121] == 1)
def test_SN_generate_movie(self):
sn = sr.SparseNoise(monitor=self.monitor, indicator=self.indicator,
background=0., coordinate='degree', grid_space=(10., 10.),
probe_size=(10., 10.), probe_orientation=0., probe_frame_num=6,
subregion=[-20., -10., 30., 90.], sign='OFF', iteration=1, pregap_dur=0.1,
postgap_dur=0.2, is_include_edge=True)
mov, _ = sn.generate_movie()
import numpy as np
import matplotlib.pyplot as plt
# plt.imshow(np.min(mov, axis=0))
# plt.show()
assert (np.min(mov, axis=0)[92, 38] == -1)
def test_DGC_generate_frames(self):
dgc = sr.DriftingGratingCircle(monitor=self.monitor, indicator=self.indicator, background=0.,
coordinate='degree', center=(10., 90.), sf_list=(0.02, 0.04),
tf_list=(1.0,), dire_list=(45.,), con_list=(0.8,), radius_list=(20.,),
block_dur=2., midgap_dur=1., iteration=2, pregap_dur=1.5,
postgap_dur=3., is_blank_block=False)
frames = dgc.generate_frames()
assert (len(frames) == 930)
assert ([f[0] for f in frames[0:90]] == [0] * 90)
assert ([f[0] for f in frames[210:270]] == [0] * 60)
assert ([f[0] for f in frames[390:450]] == [0] * 60)
assert ([f[0] for f in frames[570:630]] == [0] * 60)
assert ([f[0] for f in frames[750:930]] == [0] * 180)
assert ([f[8] for f in frames[0:90]] == [-1.] * 90)
assert ([f[8] for f in frames[210:270]] == [-1.] * 60)
assert ([f[8] for f in frames[390:450]] == [-1.] * 60)
assert ([f[8] for f in frames[570:630]] == [-1.] * 60)
assert ([f[8] for f in frames[750:930]] == [-1.] * 180)
assert ([f[0] for f in frames[90:210]] == [1] * 120)
assert ([f[0] for f in frames[270:390]] == [1] * 120)
assert ([f[0] for f in frames[450:570]] == [1] * 120)
assert ([f[0] for f in frames[630:750]] == [1] * 120)
assert (frames[90][8] == 1.)
assert ([f[8] for f in frames[91:150]] == [0.] * 59)
assert (frames[150][8] == 1.)
assert ([f[8] for f in frames[151:210]] == [0.] * 59)
assert (frames[270][8] == 1.)
assert ([f[8] for f in frames[271:330]] == [0.] * 59)
assert (frames[330][8] == 1.)
assert ([f[8] for f in frames[331:390]] == [0.] * 59)
assert (frames[450][8] == 1.)
assert ([f[8] for f in frames[451:510]] == [0.] * 59)
assert (frames[510][8] == 1.)
assert ([f[8] for f in frames[511:570]] == [0.] * 59)
assert (frames[630][8] == 1.)
assert ([f[8] for f in frames[631:690]] == [0.] * 59)
assert (frames[690][8] == 1.)
assert ([f[8] for f in frames[691:750]] == [0.] * 59)
def test_DGC_blank_block(self):
dgc = sr.DriftingGratingCircle(monitor=self.monitor, indicator=self.indicator, background=0.,
coordinate='degree', center=(10., 90.), sf_list=(0.02,),
tf_list=(4.0,), dire_list=(45.,), con_list=(0.8,), radius_list=(20.,),
block_dur=0.5, midgap_dur=0.1, iteration=2, pregap_dur=0.2,
postgap_dur=0.3, is_blank_block=True)
frames = dgc.generate_frames()
# print('\n'.join([str(f) for f in frames]))
assert (len(frames) == 168)
for frame in frames:
assert (len(frame) == 9)
_ = dgc._generate_frames_for_index_display_condition((0., 0., 0., 0., 0.))
frames_unique_blank, index_to_display_blank = _
# print('\nDGC frames_unique_blank:')
# print('\n'.join([str(f) for f in frames_unique_blank]))
# print('\nDGC index_to_display_blank:')
# print(index_to_display_blank)
assert (frames_unique_blank == ((1, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0),
(1, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)))
assert (index_to_display_blank == [0] + [1] * 29)
frames_unique, condi_ind_in_frames_unique = dgc._generate_frames_unique_and_condi_ind_dict()
# print('\nDGC frames_unique:')
# print('\n'.join([str(f) for f in frames_unique]))
# print('\nDGC condi_ind_in_frames_unique:')
# print(condi_ind_in_frames_unique)
assert (frames_unique[-1] == (1, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
assert (frames_unique[-2] == (1, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0))
assert (condi_ind_in_frames_unique['condi_0001'] == [16] + [17] * 29)
def test_DGC_generate_frames_for_index_display_condition(self):
dgc = sr.DriftingGratingCircle(monitor=self.monitor, indicator=self.indicator,
block_dur=2., sf_list=(0.04,), tf_list=(2.0,),
dire_list=(45.,), con_list=(0.8,), radius_list=(10.,),
midgap_dur=0.1, pregap_dur=0.5, postgap_dur=0.2,
iteration=2, is_blank_block=False)
conditions = dgc._generate_all_conditions()
# print len(conditions)
assert (len(conditions) == 1)
frames_unique_condi, index_to_display_condi = dgc._generate_frames_for_index_display_condition(conditions[0])
assert (index_to_display_condi == range(30) * 4)
assert (max(index_to_display_condi) == len(frames_unique_condi) - 1)
# print '\n'.join([str(f) for f in frames_unique_condi])
assert ([f[0] for f in frames_unique_condi] == [1] * 30)
assert (frames_unique_condi[0][1] == 1)
assert (frames_unique_condi[0][8] == 1.)
assert ([f[1] for f in frames_unique_condi[1:]] == [0] * 29)
assert ([f[8] for f in frames_unique_condi[1:]] == [0.] * 29)
def test_DGC_generate_frames_unique_and_condi_ind_dict(self):
dgc = sr.DriftingGratingCircle(monitor=self.monitor, indicator=self.indicator,
block_dur=2., sf_list=(0.04,), tf_list=(1., 3.0,),
dire_list=(45., 90.), con_list=(0.8,), radius_list=(10.,),
midgap_dur=0.1, pregap_dur=0.5, postgap_dur=0.2,
iteration=2, is_blank_block=False)
frames_unique, condi_ind_in_frames_unique = dgc._generate_frames_unique_and_condi_ind_dict()
assert (len(condi_ind_in_frames_unique) == 4)
assert (set(condi_ind_in_frames_unique.keys()) == {'condi_0000', 'condi_0001', 'condi_0002', 'condi_0003'})
assert (len(frames_unique) == 161)
for frame in frames_unique:
assert (len(frame) == 9)
import numpy as np
for cond, ind in condi_ind_in_frames_unique.items():
assert (len(ind) == 120)
assert (ind[0] % 20 == 1)
assert (len(np.unique(ind)) == 60 or len(np.unique(ind)) == 20)
# print '\ncond'
# print ind
def test_DGC_generate_display_index(self):
dgc = sr.DriftingGratingCircle(monitor=self.monitor, indicator=self.indicator,
block_dur=2., sf_list=(0.04,), tf_list=(1., 3.0,),
dire_list=(45., 90.), con_list=(0.8,), radius_list=(10.,),
midgap_dur=0.1, pregap_dur=0.5, postgap_dur=0.2,
iteration=2, is_blank_block=False)
frames_unique, index_to_display = dgc._generate_display_index()
# print '\n'.join([str(f) for f in frames_unique])
assert (len(frames_unique) == 161)
assert (max(index_to_display) == len(frames_unique) - 1)
# print len(index_to_display)
assert (len(index_to_display) == 1044)
def test_LSN_generate_all_probes(self):
lsn = sr.LocallySparseNoise(monitor=self.monitor, indicator=self.indicator,
min_distance=20., background=0., coordinate='degree',
grid_space=(10., 10.), probe_size=(10., 10.),
probe_orientation=0., probe_frame_num=6, subregion=[-10., 10., 0., 30.],
sign='ON', iteration=1, pregap_dur=2., postgap_dur=3.,
is_include_edge=True, repeat=1)
all_probes = lsn._generate_all_probes()
all_probes = [tuple(p) for p in all_probes]
assert (set(all_probes) == {
(-10., 0., 1.), (0., 0., 1.), (10., 0., 1.),
(-10., 10., 1.), (0., 10., 1.), (10., 10., 1.),
(-10., 20., 1.), (0., 20., 1.), (10., 20., 1.),
(-10., 30., 1.), (0., 30., 1.), (10., 30., 1.),
})
def test_LSN_generate_probe_locs_one_frame(self):
lsn = sr.LocallySparseNoise(monitor=self.monitor, indicator=self.indicator,
min_distance=20., background=0., coordinate='degree',
grid_space=(10.,10.), probe_size=(10.,10.),
probe_orientation=0., probe_frame_num=6, subregion=[-10., 20., 0., 60.],
sign='ON', iteration=1, pregap_dur=2., postgap_dur=3.,
is_include_edge=True, repeat=1)
all_probes = lsn._generate_all_probes()
probes_one_frame, all_probes_left = lsn._generate_probe_locs_one_frame(all_probes)
import itertools
import numpy as np
for (p0, p1) in itertools.combinations(probes_one_frame, r=2):
curr_dis = np.sqrt((p0[0] - p1[0]) ** 2 + (p0[1] - p1[1]) **2)
# print (p0, p1), curr_dis
assert (curr_dis > 20.)
def test_LSN_generate_probe_sequence_one_iteration(self):
lsn = sr.LocallySparseNoise(monitor=self.monitor, indicator=self.indicator,
min_distance=20., background=0., coordinate='degree',
grid_space=(10., 10.), probe_size=(10., 10.),
probe_orientation=0., probe_frame_num=6, subregion=[-10., 20., 0., 60.],
sign='ON-OFF', iteration=1, pregap_dur=2., postgap_dur=3.,
is_include_edge=True, repeat=1)
all_probes = lsn._generate_all_probes()
frames = lsn._generate_probe_sequence_one_iteration(all_probes=all_probes, is_redistribute=False)
# print '\n'.join([str(f) for f in frames])
# print [len(f) for f in frames]
assert (sum([len(f) for f in frames]) == len(all_probes))
import itertools
import numpy as np
alt_lst = np.arange(-10., 25., 10)
azi_lst = np.arange(0., 65., 10)
all_probes = list(itertools.product(alt_lst, azi_lst, [-1., 1.]))
all_probes_frame = []
for frame in frames:
all_probes_frame += [tuple(probe) for probe in frame]
# asserting all pairs in the particular frame meet sparsity criterion
for (p0, p1) in itertools.combinations(frame, r=2):
curr_dis = np.sqrt((p0[0] - p1[0]) ** 2 + (p0[1] - p1[1]) ** 2)
# print (p0, p1), curr_dis
assert (curr_dis > 20.)
# assert all frames combined cover whole subregion
assert (set(all_probes) == set(all_probes_frame))
def test_LSN_is_fit(self):
# todo: finish this
pass
def test_LSN_redistribute_one_probe(self):
# todo: finish this
pass
def test_LSN_redistribute_probes(self):
lsn = sr.LocallySparseNoise(monitor=self.monitor, indicator=self.indicator,
min_distance=20., background=0., coordinate='degree',
grid_space=(10., 10.), probe_size=(10., 10.),
probe_orientation=0., probe_frame_num=6, subregion=[-10., 20., 0., 60.],
sign='ON-OFF', iteration=1, pregap_dur=2., postgap_dur=3.,
is_include_edge=True, repeat=1)
all_probes = lsn._generate_all_probes()
frames = lsn._generate_probe_sequence_one_iteration(all_probes=all_probes, is_redistribute=True)
# print '\n'.join([str(f) for f in frames])
# print [len(f) for f in frames]
assert (sum([len(f) for f in frames]) == len(all_probes))
import itertools
import numpy as np
alt_lst = np.arange(-10., 25., 10)
azi_lst = np.arange(0., 65., 10)
all_probes = list(itertools.product(alt_lst, azi_lst, [-1., 1.]))
all_probes_frame = []
for frame in frames:
all_probes_frame += [tuple(probe) for probe in frame]
# asserting all pairs in the particular frame meet sparsity criterion
for (p0, p1) in itertools.combinations(frame, r=2):
curr_dis = np.sqrt((p0[0] - p1[0]) ** 2 + (p0[1] - p1[1]) ** 2)
# print (p0, p1), curr_dis
assert (curr_dis > 20.)
# assert all frames combined cover whole subregion
assert (set(all_probes) == set(all_probes_frame))
def test_LSN_generate_frames_for_index_display(self):
lsn = sr.LocallySparseNoise(monitor=self.monitor, indicator=self.indicator,
min_distance=20., background=0., coordinate='degree',
grid_space=(10., 10.), probe_size=(10., 10.),
probe_orientation=0., probe_frame_num=6, subregion=[-10., 20., 0., 60.],
sign='ON-OFF', iteration=2, pregap_dur=2., postgap_dur=3.,
is_include_edge=True, repeat=1)
frames_unique = lsn._generate_frames_for_index_display()
# print len(frames_unique)
# print '\n'.join([str(f) for f in frames_unique])
assert (len(frames_unique) % 2 == 1)
for frame in frames_unique:
assert (len(frame) == 4)
def test_LSN_generate_display_index(self):
lsn = sr.LocallySparseNoise(monitor=self.monitor, indicator=self.indicator,
min_distance=20., background=0., coordinate='degree',
grid_space=(10., 10.), probe_size=(10., 10.),
probe_orientation=30., probe_frame_num=6, subregion=[-10., 20., 0., 60.],
sign='ON-OFF', iteration=2, pregap_dur=2., postgap_dur=3.,
is_include_edge=True, repeat=1)
frames_unique, index_to_display = lsn._generate_display_index()
# print index_to_display
assert (index_to_display[:lsn.pregap_frame_num] == [0] * lsn.pregap_frame_num)
assert (index_to_display[-lsn.postgap_frame_num:] == [0] * lsn.postgap_frame_num)
assert (len(index_to_display) == (len(frames_unique) - 1) * lsn.probe_frame_num / 2 +
lsn.pregap_frame_num + lsn.postgap_frame_num)
def test_LSN_repeat(self):
lsn = sr.LocallySparseNoise(monitor=self.monitor, indicator=self.indicator,
min_distance=20., background=0., coordinate='degree',
grid_space=(10., 10.), probe_size=(10., 10.),
probe_orientation=0., probe_frame_num=4, subregion=[-10., 20., 0., 60.],
sign='ON-OFF', iteration=1, pregap_dur=2., postgap_dur=3.,
is_include_edge=True, repeat=3)
import itertools
import numpy as np
alt_lst = np.arange(-10., 25., 10)
azi_lst = np.arange(0., 65., 10)
all_probes = list(itertools.product(alt_lst, azi_lst, [-1., 1.]))
frames_unique, display_index = lsn._generate_display_index()
for probe in all_probes:
present_frames = 0
for di in display_index:
if frames_unique[di][1] is not None and list(probe) in frames_unique[di][1]:
present_frames += 1
# print('probe:{}, number of frames: {}'.format(str(probe), present_frames))
assert (present_frames == 4 * 3)
def test_SGC_generate_frames_for_index_display(self):
sgc = sr.StaticGratingCircle(monitor=self.monitor, indicator=self.indicator, background=0.,
coordinate='degree', center=(0., 30.), sf_list=(0.02, 0.04, 0.08),
ori_list=(0., 45., 90., 135.), con_list=(0.2, 0.5, 0.8),
radius_list=(50.,), phase_list=(0., 90., 180., 270.),
display_dur=0.25, midgap_dur=0., iteration=2, pregap_dur=2.,
postgap_dur=3., is_blank_block=False)
frames_unique = sgc._generate_frames_for_index_display()
# print len(frames_unique)
assert (len(frames_unique) == (3 * 4 * 3 * 4 * 2 + 1))
for frame in frames_unique:
assert(len(frame) == 7)
sgc = sr.StaticGratingCircle(monitor=self.monitor, indicator=self.indicator, background=0.,
coordinate='degree', center=(0., 30.), sf_list=(0.02, 0.04, 0.08),
ori_list=(0., 90., 180., 270.), con_list=(0.2, 0.5, 0.8),
radius_list=(50.,), phase_list=(0., 90., 180., 270.),
display_dur=0.25, midgap_dur=0., iteration=2, pregap_dur=2.,
postgap_dur=3., is_blank_block=False)
frames_unique = sgc._generate_frames_for_index_display()
# print len(frames_unique)
assert (len(frames_unique) == (3 * 2 * 3 * 4 * 2 + 1))
def test_SGC_generate_display_index(self):
sgc = sr.StaticGratingCircle(monitor=self.monitor, indicator=self.indicator, background=0.,
coordinate='degree', center=(0., 30.), sf_list=(0.02, 0.04, 0.08),
ori_list=(0., 45., 90., 135.), con_list=(0.2, 0.5, 0.8),
radius_list=(50.,), phase_list=(0., 90., 180., 270.),
display_dur=0.25, midgap_dur=0.1, iteration=2, pregap_dur=2.,
postgap_dur=3., is_blank_block=False)
frames_unique, index_to_display = sgc._generate_display_index()
for frame in frames_unique:
assert (len(frame) == 7)
assert (max(index_to_display) == len(frames_unique) - 1)
# print len(index_to_display)
# print index_to_display
assert (len(index_to_display) == 6342)
def test_SGC_blank_block(self):
sgc = sr.StaticGratingCircle(monitor=self.monitor, indicator=self.indicator, background=0.,
coordinate='degree', center=(0., 30.), sf_list=(0.04,),
ori_list=(90., ), con_list=(0.8, ), radius_list=(50.,),
phase_list=(0., 180.,), display_dur=0.1, midgap_dur=0.1,
iteration=2, pregap_dur=0., postgap_dur=0., is_blank_block=True)
all_conditions = sgc._generate_all_conditions()
# print('\nSGC all_conditions:')
# print('\n'.join([str(c) for c in all_conditions]))
assert (all_conditions[-1] == (0., 0., 0., 0., 0.))
frames_unique = sgc._generate_frames_for_index_display()
for frame in frames_unique:
assert (len(frame) == 7)
# print('\nSGC frames_unique:')
# print('\n'.join([str(f) for f in frames_unique]))
assert (frames_unique[-1] == (1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
assert (frames_unique[-2] == (1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0))
_, index_to_display = sgc._generate_display_index()
assert (len(index_to_display) == 66)
def test_SS_generate_display_index(self):
ss = sr.StimulusSeparator(monitor=self.monitor, indicator=self.indicator,
coordinate='degree', background=0.,
indicator_on_frame_num=4, indicator_off_frame_num=4,
cycle_num=10, pregap_dur=0., postgap_dur=0.)
frames_unique, index_to_display = ss._generate_display_index()
assert (frames_unique == ((0, -1), (1, 1.), (1, -1.)))
assert (len(index_to_display) == 80)
for frame in frames_unique:
assert (len(frame) == 2)
def test_SI_wrap_images(self):
si = sr.StaticImages(monitor=self.monitor, indicator=self.indicator, background=0.,
coordinate='degree', img_center=(0., 60.), deg_per_pixel=(0.1, 0.1),
display_dur=0.25, midgap_dur=0., iteration=1, pregap_dur=2.,
postgap_dur=3., is_blank_block=False)
img_w_path = os.path.join(self.curr_folder, 'test_data', 'wrapped_images_for_display.hdf5')
if os.path.isfile(img_w_path):
os.remove(img_w_path)
si.wrap_images(work_dir=os.path.join(self.curr_folder, 'test_data'))
import h5py
img_w_f = h5py.File(img_w_path, 'r')
assert (img_w_f['images_wrapped/images'].shape == (2, 120, 160))
assert (img_w_f['images_wrapped/altitude'].shape == (120, 160))
assert (img_w_f['images_wrapped/azimuth'].shape == (120, 160))
import numpy as np
assert (np.array_equal(img_w_f['images_wrapped/altitude'].value, self.monitor.deg_coord_y))
assert (np.array_equal(img_w_f['images_wrapped/azimuth'].value, self.monitor.deg_coord_x))
assert (img_w_f['images_dewrapped/images'].shape == (2, 270, 473))
assert (img_w_f['images_dewrapped/altitude'].shape == (270, 473))
assert (img_w_f['images_dewrapped/azimuth'].shape == (270, 473))
img_w_f.close()
os.remove(img_w_path)
def test_SI_generate_frames_for_index_display(self):
si = sr.StaticImages(monitor=self.monitor, indicator=self.indicator, background=0.,
coordinate='degree', img_center=(0., 60.), deg_per_pixel=(0.1, 0.1),
display_dur=0.25, midgap_dur=0., iteration=1, pregap_dur=2.,
postgap_dur=3., is_blank_block=False)
import numpy as np
si.images_wrapped = np.random.rand(27, 120, 160)
frames_unique = si._generate_frames_for_index_display()
assert (len(frames_unique) == 55)
for frame in frames_unique:
assert (len(frame) == 3)
def test_SI_generate_display_index(self):
si = sr.StaticImages(monitor=self.monitor, indicator=self.indicator, background=0.,
coordinate='degree', img_center=(0., 60.), deg_per_pixel=(0.1, 0.1),
display_dur=0.25, midgap_dur=0.1, iteration=2, pregap_dur=2.,
postgap_dur=3., is_blank_block=False)
import numpy as np
si.images_wrapped = np.random.rand(15, 120, 160)
frames_unique, index_to_display = si._generate_display_index()
assert (len(index_to_display) == 924)
for frame in frames_unique:
assert (len(frame) == 3)
def test_SI_blank_block(self):
si = sr.StaticImages(monitor=self.monitor, indicator=self.indicator, background=0.,
coordinate='degree', img_center=(0., 60.), deg_per_pixel=(0.1, 0.1),
display_dur=0.1, midgap_dur=0.1, iteration=1, pregap_dur=0.,
postgap_dur=0., is_blank_block=True)
import numpy as np
si.images_wrapped = np.random.rand(2, 120, 160)
frames_unique, index_to_display = si._generate_display_index()
assert (len(frames_unique) == 7)
for frame in frames_unique:
assert (len(frame) == 3)
assert (frames_unique[-1] == (1, -1, 0.))
assert (frames_unique[-2] == (1, -1, 1.))
# print('frames_unique:')
# print('\n'.join([str(f) for f in frames_unique]))
# print('\nindex_to_display: {}.'.format(index_to_display))
# print('\nframes to be displayed:')
# frames = [frames_unique[i] for i in index_to_display]
# print('\n'.join([str(f) for f in frames]))
assert (len(index_to_display) == 30)
def test_SL_generate_frames_for_index_display(self):
sl = sr.SinusoidalLuminance(monitor=self.monitor, indicator=self.indicator, background=0.,
coordinate='degree', pregap_dur=2., postgap_dur=3., midgap_dur=0.,
max_level=0.5, min_level=-0.2, start_phase=0., frequency=4., cycle_num=10)
frames = sl._generate_frames_for_index_display()
# print(frames)
assert (len(frames) == 16)
assert (frames[0] == (0, None, -1.))
for i in range(1, 8):
assert (frames[i][2] == 1.)
for i in range(8, 16):
assert (frames[i][2] == 0.)
colors = [f[1] for f in frames]
# print(colors)
assert(colors[1] == 0.15)
assert(max(colors[1:]) <= 0.5)
assert(min(colors[1:]) >= -0.2)
def test_SL_generate_display_index(self):
sl = sr.SinusoidalLuminance(monitor=self.monitor, indicator=self.indicator, background=0.,
coordinate='degree', pregap_dur=2., postgap_dur=3., midgap_dur=0.5,
max_level=1., min_level=-1., start_phase=0., frequency=4., cycle_num=3)
ind = sl._generate_display_index()
# print(ind)
assert(ind == [0] * 120 +
range(1, 16) + [0] * 30 +
range(1, 16) + [0] * 30 +
range(1, 16) +
[0] * 180)
if __name__ == '__main__':
unittest.main(verbosity=2.)
|
gpl-3.0
|
justincassidy/scikit-learn
|
examples/cross_decomposition/plot_compare_cross_decomposition.py
|
142
|
4761
|
"""
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
|
bsd-3-clause
|
sheqi/TVpgGLM
|
test/practice5_Latent_Distance_Weight_Scott.py
|
1
|
3415
|
# Latent distance model for neural data
import numpy as np
import numpy.random as npr
from autograd import grad
from hips.inference.hmc import hmc
from pybasicbayes.distributions import Gaussian
from pyglm.utils.utils import expand_scalar, compute_optimal_rotation
from matplotlib import pyplot as plt
# Simulated data
dim = 2
N = 20
r = 1
th = np.linspace(0, 2 * np.pi, N, endpoint=False)
x = r * np.cos(th)
y = r * np.sin(th)
L = np.hstack((x[:,None], y[:,None]))
#w = 4
#s = 0.8
#x = s * (np.arange(N) % w)
#y = s * (np.arange(N) // w)
#L = np.hstack((x[:,None], y[:,None]))
W = np.zeros((N,N))
# Distance matrix
D = ((L[:, None, :] - L[None, :, :]) ** 2).sum(2)
Mu = -D
Mu = np.tile(Mu[:,:,None], (1,1,1))
sig = 0.01*np.eye(N)
Sig = np.tile(sig[:,:,None,None], (1,1,1,1))
L_estimate = np.random.randn(N, dim)
for n in range(N):
for m in range(N):
W[n, m] = npr.multivariate_normal(Mu[n, m], Sig[n, m])
# Inference
def _hmc_log_probability(N, dim, L, W):
"""
Compute the log probability as a function of L.
This allows us to take the gradients wrt L using autograd.
:param L:
:param A:
:return:
"""
import autograd.numpy as anp
# Compute pairwise distance
L1 = anp.reshape(L, (N, 1, dim))
L2 = anp.reshape(L, (1, N, dim))
# Mu = a * anp.sqrt(anp.sum((L1-L2)**2, axis=2)) + b
Mu = -anp.sum((L1 - L2) ** 2, axis=2)
X = (W - Mu[:, :, None])
# Get the covariance and precision
Sig = 0.01
Lmb = 1. / Sig
lp = anp.sum(-0.5 * X ** 2 * Lmb)
# Log prior of L under spherical Gaussian prior
lp += -0.5 * anp.sum(L * L)
return lp
def plot_LatentDistanceModel(W, L, N, L_true=None, ax=None):
"""
If D==2, plot the embedded nodes and the connections between them
:param L_true: If given, rotate the inferred features to match F_true
:return:
"""
# Color the weights by the
import matplotlib.cm as cm
cmap = cm.get_cmap("RdBu")
W_lim = abs(W[:, :]).max()
W_rel = (W[:, :] - (-W_lim)) / (2 * W_lim)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, aspect="equal")
# If true locations are given, rotate L to match L_true
if L_true is not None:
R = compute_optimal_rotation(L, L_true)
L = L.dot(R)
# Scatter plot the node embeddings
# Plot the edges between nodes
for n1 in range(N):
for n2 in range(N):
ax.plot([L[n1, 0], L[n2, 0]], [L[n1, 1], L[n2, 1]], '-', color=cmap(W_rel[n1, n2]), lw=1.0)
ax.plot(L[:, 0], L[:, 1], 's', color='k', markerfacecolor='k', markeredgecolor='k')
# Get extreme feature values
b = np.amax(abs(L)) + L[:].std() / 2.0
# Plot grids for origin
ax.plot([0, 0], [-b, b], ':k', lw=0.5)
ax.plot([-b, b], [0, 0], ':k', lw=0.5)
# Set the limits
ax.set_xlim([-b, b])
ax.set_ylim([-b, b])
# Labels
ax.set_xlabel('Latent Dimension 1')
ax.set_ylabel('Latent Dimension 2')
plt.show()
return ax
for i in range(1000):
L1 = L_estimate
lp = lambda L1: _hmc_log_probability(N, dim, L1, W)
dlp = grad(lp)
stepsz = 0.001
nsteps = 10
L_estimate = hmc(lp, dlp, stepsz, nsteps, L1.copy(), negative_log_prob=False)
D1 = ((L_estimate[:, None, :] - L_estimate[None, :, :]) ** 2).sum(2)
W_estimate = -D1
plot_LatentDistanceModel(W_estimate, L_estimate, N)
plot_LatentDistanceModel(W, L, N)
|
mit
|
kushalbhola/MyStuff
|
Practice/PythonApplication/env/Lib/site-packages/pandas/tests/frame/test_api.py
|
1
|
19375
|
from copy import deepcopy
import datetime
import pydoc
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
SparseDataFrame,
SparseDtype,
compat,
date_range,
timedelta_range,
)
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal,
assert_frame_equal,
assert_series_equal,
)
class SharedWithSparse:
"""
A collection of tests DataFrame and SparseDataFrame can share.
In generic tests on this class, use ``self._assert_frame_equal()`` and
``self._assert_series_equal()`` which are implemented in sub-classes
and dispatch correctly.
"""
def _assert_frame_equal(self, left, right):
"""Dispatch to frame class dependent assertion"""
raise NotImplementedError
def _assert_series_equal(self, left, right):
"""Dispatch to series class dependent assertion"""
raise NotImplementedError
def test_copy_index_name_checking(self, float_frame):
# don't want to be able to modify the index stored elsewhere after
# making a copy
for attr in ("index", "columns"):
ind = getattr(float_frame, attr)
ind.name = None
cp = float_frame.copy()
getattr(cp, attr).name = "foo"
assert getattr(float_frame, attr).name is None
def test_getitem_pop_assign_name(self, float_frame):
s = float_frame["A"]
assert s.name == "A"
s = float_frame.pop("A")
assert s.name == "A"
s = float_frame.loc[:, "B"]
assert s.name == "B"
s2 = s.loc[:]
assert s2.name == "B"
def test_get_value(self, float_frame):
for idx in float_frame.index:
for col in float_frame.columns:
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = float_frame.get_value(idx, col)
expected = float_frame[col][idx]
tm.assert_almost_equal(result, expected)
def test_add_prefix_suffix(self, float_frame):
with_prefix = float_frame.add_prefix("foo#")
expected = pd.Index(["foo#{c}".format(c=c) for c in float_frame.columns])
tm.assert_index_equal(with_prefix.columns, expected)
with_suffix = float_frame.add_suffix("#foo")
expected = pd.Index(["{c}#foo".format(c=c) for c in float_frame.columns])
tm.assert_index_equal(with_suffix.columns, expected)
with_pct_prefix = float_frame.add_prefix("%")
expected = pd.Index(["%{c}".format(c=c) for c in float_frame.columns])
tm.assert_index_equal(with_pct_prefix.columns, expected)
with_pct_suffix = float_frame.add_suffix("%")
expected = pd.Index(["{c}%".format(c=c) for c in float_frame.columns])
tm.assert_index_equal(with_pct_suffix.columns, expected)
def test_get_axis(self, float_frame):
f = float_frame
assert f._get_axis_number(0) == 0
assert f._get_axis_number(1) == 1
assert f._get_axis_number("index") == 0
assert f._get_axis_number("rows") == 0
assert f._get_axis_number("columns") == 1
assert f._get_axis_name(0) == "index"
assert f._get_axis_name(1) == "columns"
assert f._get_axis_name("index") == "index"
assert f._get_axis_name("rows") == "index"
assert f._get_axis_name("columns") == "columns"
assert f._get_axis(0) is f.index
assert f._get_axis(1) is f.columns
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(2)
with pytest.raises(ValueError, match="No axis.*foo"):
f._get_axis_name("foo")
with pytest.raises(ValueError, match="No axis.*None"):
f._get_axis_name(None)
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(None)
def test_keys(self, float_frame):
getkeys = float_frame.keys
assert getkeys() is float_frame.columns
def test_column_contains_raises(self, float_frame):
with pytest.raises(TypeError, match="unhashable type: 'Index'"):
float_frame.columns in float_frame
def test_tab_completion(self):
# DataFrame whose columns are identifiers shall have them in __dir__.
df = pd.DataFrame([list("abcd"), list("efgh")], columns=list("ABCD"))
for key in list("ABCD"):
assert key in dir(df)
assert isinstance(df.__getitem__("A"), pd.Series)
# DataFrame whose first-level columns are identifiers shall have
# them in __dir__.
df = pd.DataFrame(
[list("abcd"), list("efgh")],
columns=pd.MultiIndex.from_tuples(list(zip("ABCD", "EFGH"))),
)
for key in list("ABCD"):
assert key in dir(df)
for key in list("EFGH"):
assert key not in dir(df)
assert isinstance(df.__getitem__("A"), pd.DataFrame)
def test_not_hashable(self):
empty_frame = DataFrame()
df = self.klass([1])
msg = "'(Sparse)?DataFrame' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(df)
with pytest.raises(TypeError, match=msg):
hash(empty_frame)
def test_new_empty_index(self):
df1 = self.klass(np.random.randn(0, 3))
df2 = self.klass(np.random.randn(0, 3))
df1.index.name = "foo"
assert df2.index.name is None
def test_array_interface(self, float_frame):
with np.errstate(all="ignore"):
result = np.sqrt(float_frame)
assert isinstance(result, type(float_frame))
assert result.index is float_frame.index
assert result.columns is float_frame.columns
self._assert_frame_equal(result, float_frame.apply(np.sqrt))
def test_get_agg_axis(self, float_frame):
cols = float_frame._get_agg_axis(0)
assert cols is float_frame.columns
idx = float_frame._get_agg_axis(1)
assert idx is float_frame.index
msg = r"Axis must be 0 or 1 \(got 2\)"
with pytest.raises(ValueError, match=msg):
float_frame._get_agg_axis(2)
def test_nonzero(self, float_frame, float_string_frame):
empty_frame = DataFrame()
assert empty_frame.empty
assert not float_frame.empty
assert not float_string_frame.empty
# corner case
df = DataFrame({"A": [1.0, 2.0, 3.0], "B": ["a", "b", "c"]}, index=np.arange(3))
del df["A"]
assert not df.empty
def test_iteritems(self):
df = self.klass([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
for k, v in df.items():
assert isinstance(v, self.klass._constructor_sliced)
def test_items(self):
# GH 17213, GH 13918
cols = ["a", "b", "c"]
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=cols)
for c, (k, v) in zip(cols, df.items()):
assert c == k
assert isinstance(v, Series)
assert (df[k] == v).all()
def test_iter(self, float_frame):
assert tm.equalContents(list(float_frame), float_frame.columns)
def test_iterrows(self, float_frame, float_string_frame):
for k, v in float_frame.iterrows():
exp = float_frame.loc[k]
self._assert_series_equal(v, exp)
for k, v in float_string_frame.iterrows():
exp = float_string_frame.loc[k]
self._assert_series_equal(v, exp)
def test_iterrows_iso8601(self):
# GH 19671
if self.klass == SparseDataFrame:
pytest.xfail(reason="SparseBlock datetime type not implemented.")
s = self.klass(
{
"non_iso8601": ["M1701", "M1802", "M1903", "M2004"],
"iso8601": date_range("2000-01-01", periods=4, freq="M"),
}
)
for k, v in s.iterrows():
exp = s.loc[k]
self._assert_series_equal(v, exp)
def test_iterrows_corner(self):
# gh-12222
df = DataFrame(
{
"a": [datetime.datetime(2015, 1, 1)],
"b": [None],
"c": [None],
"d": [""],
"e": [[]],
"f": [set()],
"g": [{}],
}
)
expected = Series(
[datetime.datetime(2015, 1, 1), None, None, "", [], set(), {}],
index=list("abcdefg"),
name=0,
dtype="object",
)
_, result = next(df.iterrows())
tm.assert_series_equal(result, expected)
def test_itertuples(self, float_frame):
for i, tup in enumerate(float_frame.itertuples()):
s = self.klass._constructor_sliced(tup[1:])
s.name = tup[0]
expected = float_frame.iloc[i, :].reset_index(drop=True)
self._assert_series_equal(s, expected)
df = self.klass(
{"floats": np.random.randn(5), "ints": range(5)}, columns=["floats", "ints"]
)
for tup in df.itertuples(index=False):
assert isinstance(tup[1], int)
df = self.klass(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[["a", "a"]]
assert list(dfaa.itertuples()) == [(0, 1, 1), (1, 2, 2), (2, 3, 3)]
# repr with int on 32-bit/windows
if not (compat.is_platform_windows() or compat.is_platform_32bit()):
assert (
repr(list(df.itertuples(name=None)))
== "[(0, 1, 4), (1, 2, 5), (2, 3, 6)]"
)
tup = next(df.itertuples(name="TestName"))
assert tup._fields == ("Index", "a", "b")
assert (tup.Index, tup.a, tup.b) == tup
assert type(tup).__name__ == "TestName"
df.columns = ["def", "return"]
tup2 = next(df.itertuples(name="TestName"))
assert tup2 == (0, 1, 4)
assert tup2._fields == ("Index", "_1", "_2")
df3 = DataFrame({"f" + str(i): [i] for i in range(1024)})
# will raise SyntaxError if trying to create namedtuple
tup3 = next(df3.itertuples())
assert not hasattr(tup3, "_fields")
assert isinstance(tup3, tuple)
def test_sequence_like_with_categorical(self):
# GH 7839
# make sure can iterate
df = DataFrame(
{"id": [1, 2, 3, 4, 5, 6], "raw_grade": ["a", "b", "b", "a", "a", "e"]}
)
df["grade"] = Categorical(df["raw_grade"])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.items():
str(s)
def test_len(self, float_frame):
assert len(float_frame) == len(float_frame.index)
def test_values(self, float_frame, float_string_frame):
frame = float_frame
arr = frame.values
frame_cols = frame.columns
for i, row in enumerate(arr):
for j, value in enumerate(row):
col = frame_cols[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
arr = float_string_frame[["foo", "A"]].values
assert arr[0, 0] == "bar"
df = self.klass({"complex": [1j, 2j, 3j], "real": [1, 2, 3]})
arr = df.values
assert arr[0, 0] == 1j
# single block corner case
arr = float_frame[["A", "B"]].values
expected = float_frame.reindex(columns=["A", "B"]).values
assert_almost_equal(arr, expected)
def test_to_numpy(self):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4.5]})
expected = np.array([[1, 3], [2, 4.5]])
result = df.to_numpy()
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_dtype(self):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4.5]})
expected = np.array([[1, 3], [2, 4]], dtype="int64")
result = df.to_numpy(dtype="int64")
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_copy(self):
arr = np.random.randn(4, 3)
df = pd.DataFrame(arr)
assert df.values.base is arr
assert df.to_numpy(copy=False).base is arr
assert df.to_numpy(copy=True).base is None
def test_transpose(self, float_frame):
frame = float_frame
dft = frame.T
for idx, series in dft.items():
for col, value in series.items():
if np.isnan(value):
assert np.isnan(frame[col][idx])
else:
assert value == frame[col][idx]
# mixed type
index, data = tm.getMixedTypeDict()
mixed = self.klass(data, index=index)
mixed_T = mixed.T
for col, s in mixed_T.items():
assert s.dtype == np.object_
def test_swapaxes(self):
df = self.klass(np.random.randn(10, 5))
self._assert_frame_equal(df.T, df.swapaxes(0, 1))
self._assert_frame_equal(df.T, df.swapaxes(1, 0))
self._assert_frame_equal(df, df.swapaxes(0, 0))
msg = (
"No axis named 2 for object type"
r" <class 'pandas.core(.sparse)?.frame.(Sparse)?DataFrame'>"
)
with pytest.raises(ValueError, match=msg):
df.swapaxes(2, 5)
def test_axis_aliases(self, float_frame):
f = float_frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis="index")
assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis="columns")
assert_series_equal(result, expected)
def test_class_axis(self):
# GH 18147
# no exception and no empty docstring
assert pydoc.getdoc(DataFrame.index)
assert pydoc.getdoc(DataFrame.columns)
def test_more_values(self, float_string_frame):
values = float_string_frame.values
assert values.shape[1] == len(float_string_frame.columns)
def test_repr_with_mi_nat(self, float_string_frame):
df = self.klass(
{"X": [1, 2]}, index=[[pd.NaT, pd.Timestamp("20130101")], ["a", "b"]]
)
result = repr(df)
expected = " X\nNaT a 1\n2013-01-01 b 2"
assert result == expected
def test_items_names(self, float_string_frame):
for k, v in float_string_frame.items():
assert v.name == k
def test_series_put_names(self, float_string_frame):
series = float_string_frame._series
for k, v in series.items():
assert v.name == k
def test_empty_nonzero(self):
df = self.klass([1, 2, 3])
assert not df.empty
df = self.klass(index=[1], columns=[1])
assert not df.empty
df = self.klass(index=["a", "b"], columns=["c", "d"]).dropna()
assert df.empty
assert df.T.empty
empty_frames = [
self.klass(),
self.klass(index=[1]),
self.klass(columns=[1]),
self.klass({1: []}),
]
for df in empty_frames:
assert df.empty
assert df.T.empty
def test_with_datetimelikes(self):
df = self.klass(
{
"A": date_range("20130101", periods=10),
"B": timedelta_range("1 day", periods=10),
}
)
t = df.T
result = t.dtypes.value_counts()
if self.klass is DataFrame:
expected = Series({np.dtype("object"): 10})
else:
expected = Series({SparseDtype(dtype=object): 10})
tm.assert_series_equal(result, expected)
class TestDataFrameMisc(SharedWithSparse):
klass = DataFrame
# SharedWithSparse tests use generic, klass-agnostic assertion
_assert_frame_equal = staticmethod(assert_frame_equal)
_assert_series_equal = staticmethod(assert_series_equal)
def test_values(self, float_frame):
float_frame.values[:, 0] = 5.0
assert (float_frame.values[:, 0] == 5).all()
def test_as_matrix_deprecated(self, float_frame):
# GH 18458
with tm.assert_produces_warning(FutureWarning):
cols = float_frame.columns.tolist()
result = float_frame.as_matrix(columns=cols)
expected = float_frame.values
tm.assert_numpy_array_equal(result, expected)
def test_deepcopy(self, float_frame):
cp = deepcopy(float_frame)
series = cp["A"]
series[:] = 10
for idx, value in series.items():
assert float_frame["A"][idx] != value
def test_transpose_get_view(self, float_frame):
dft = float_frame.T
dft.values[:, 5:10] = 5
assert (float_frame.values[5:10] == 5).all()
def test_inplace_return_self(self):
# GH 1893
data = DataFrame(
{"a": ["foo", "bar", "baz", "qux"], "b": [0, 0, 1, 1], "c": [1, 2, 3, 4]}
)
def _check_f(base, f):
result = f(base)
assert result is None
# -----DataFrame-----
# set_index
f = lambda x: x.set_index("a", inplace=True)
_check_f(data.copy(), f)
# reset_index
f = lambda x: x.reset_index(inplace=True)
_check_f(data.set_index("a"), f)
# drop_duplicates
f = lambda x: x.drop_duplicates(inplace=True)
_check_f(data.copy(), f)
# sort
f = lambda x: x.sort_values("b", inplace=True)
_check_f(data.copy(), f)
# sort_index
f = lambda x: x.sort_index(inplace=True)
_check_f(data.copy(), f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(data.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(data.copy(), f)
# rename
f = lambda x: x.rename({1: "foo"}, inplace=True)
_check_f(data.copy(), f)
# -----Series-----
d = data.copy()["c"]
# reset_index
f = lambda x: x.reset_index(inplace=True, drop=True)
_check_f(data.set_index("a")["c"], f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(d.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(d.copy(), f)
# rename
f = lambda x: x.rename({1: "foo"}, inplace=True)
_check_f(d.copy(), f)
def test_tab_complete_warning(self, ip):
# GH 16409
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; df = pd.DataFrame()"
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("df.", 1))
def test_get_values_deprecated(self):
df = DataFrame({"a": [1, 2], "b": [0.1, 0.2]})
with tm.assert_produces_warning(FutureWarning):
res = df.get_values()
tm.assert_numpy_array_equal(res, df.values)
|
apache-2.0
|
ricket1978/ggplot
|
ggplot/tests/test_stat_calculate_methods.py
|
12
|
2240
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from nose.tools import (assert_equal, assert_is, assert_is_not,
assert_raises)
import pandas as pd
from ggplot import *
from ggplot.utils.exceptions import GgplotError
from . import cleanup
@cleanup
def test_stat_bin():
# stat_bin needs the 'x' aesthetic to be numeric or a categorical
# and should complain if given anything else
class unknown(object):
pass
x = [unknown()] * 3
y = [1, 2, 3]
df = pd.DataFrame({'x': x, 'y': y})
gg = ggplot(aes(x='x', y='y'), df)
with assert_raises(GgplotError):
print(gg + stat_bin())
@cleanup
def test_stat_abline():
# slope and intercept function should return values
# of the same length
def fn_xy(x, y):
return [1, 2]
def fn_xy2(x, y):
return [1, 2, 3]
gg = ggplot(aes(x='wt', y='mpg'), mtcars)
# same length, no problem
print(gg + stat_abline(slope=fn_xy, intercept=fn_xy))
with assert_raises(GgplotError):
print(gg + stat_abline(slope=fn_xy, intercept=fn_xy2))
@cleanup
def test_stat_vhabline_functions():
def fn_x(x):
return 1
def fn_y(y):
return 1
def fn_xy(x, y):
return 1
gg = ggplot(aes(x='wt'), mtcars)
# needs y aesthetic
with assert_raises(GgplotError):
print(gg + stat_abline(slope=fn_xy))
# needs y aesthetic
with assert_raises(GgplotError):
print(gg + stat_abline(intercept=fn_xy))
gg = ggplot(aes(x='wt', y='mpg'), mtcars)
# Functions with 2 args, no problem
print(gg + stat_abline(slope=fn_xy, intercept=fn_xy))
# slope function should take 2 args
with assert_raises(GgplotError):
print(gg + stat_abline(slope=fn_x, intercept=fn_xy))
# intercept function should take 2 args
with assert_raises(GgplotError):
print(gg + stat_abline(slope=fn_xy, intercept=fn_y))
# intercept function should take 1 arg
with assert_raises(GgplotError):
print(gg + stat_vline(xintercept=fn_xy))
# intercept function should take 1 arg
with assert_raises(GgplotError):
print(gg + stat_hline(yintercept=fn_xy))
|
bsd-2-clause
|
carrillo/scikit-learn
|
sklearn/metrics/metrics.py
|
233
|
1262
|
import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
|
bsd-3-clause
|
sauloal/cnidaria
|
scripts/venv/lib/python2.7/site-packages/matplotlib/lines.py
|
10
|
47376
|
"""
This module contains all the 2D line class which can draw with a
variety of line styles, markers and colors.
"""
# TODO: expose cap and join style attrs
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
import numpy as np
from numpy import ma
from matplotlib import verbose
from . import artist
from .artist import Artist
from .cbook import iterable, is_string_like, is_numlike, ls_mapper
from .colors import colorConverter
from .path import Path
from .transforms import Bbox, TransformedPath, IdentityTransform
from matplotlib import rcParams
from .artist import allow_rasterization
from matplotlib import docstring
from matplotlib.markers import MarkerStyle
# Imported here for backward compatibility, even though they don't
# really belong.
from matplotlib.markers import TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN
from matplotlib.markers import CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN
def segment_hits(cx, cy, x, y, radius):
"""
Determine if any line segments are within radius of a
point. Returns the list of line segments that are within that
radius.
"""
# Process single points specially
if len(x) < 2:
res, = np.nonzero((cx - x) ** 2 + (cy - y) ** 2 <= radius ** 2)
return res
# We need to lop the last element off a lot.
xr, yr = x[:-1], y[:-1]
# Only look at line segments whose nearest point to C on the line
# lies within the segment.
dx, dy = x[1:] - xr, y[1:] - yr
Lnorm_sq = dx ** 2 + dy ** 2 # Possibly want to eliminate Lnorm==0
u = ((cx - xr) * dx + (cy - yr) * dy) / Lnorm_sq
candidates = (u >= 0) & (u <= 1)
#if any(candidates): print "candidates",xr[candidates]
# Note that there is a little area near one side of each point
# which will be near neither segment, and another which will
# be near both, depending on the angle of the lines. The
# following radius test eliminates these ambiguities.
point_hits = (cx - x) ** 2 + (cy - y) ** 2 <= radius ** 2
#if any(point_hits): print "points",xr[candidates]
candidates = candidates & ~(point_hits[:-1] | point_hits[1:])
# For those candidates which remain, determine how far they lie away
# from the line.
px, py = xr + u * dx, yr + u * dy
line_hits = (cx - px) ** 2 + (cy - py) ** 2 <= radius ** 2
#if any(line_hits): print "lines",xr[candidates]
line_hits = line_hits & candidates
points, = point_hits.ravel().nonzero()
lines, = line_hits.ravel().nonzero()
#print points,lines
return np.concatenate((points, lines))
def _mark_every_path(markevery, tpath, affine, ax_transform):
"""
Helper function that sorts out how to deal the input
`markevery` and returns the points where markers should be drawn.
Takes in the `markevery` value and the line path and returns the
sub-sampled path.
"""
# pull out the two bits of data we want from the path
codes, verts = tpath.codes, tpath.vertices
def _slice_or_none(in_v, slc):
'''
Helper function to cope with `codes` being an
ndarray or `None`
'''
if in_v is None:
return None
return in_v[slc]
# if just a float, assume starting at 0.0 and make a tuple
if isinstance(markevery, float):
markevery = (0.0, markevery)
# if just an int, assume starting at 0 and make a tuple
elif isinstance(markevery, int):
markevery = (0, markevery)
if isinstance(markevery, tuple):
if len(markevery) != 2:
raise ValueError('`markevery` is a tuple but its '
'len is not 2; '
'markevery=%s' % (markevery,))
start, step = markevery
# if step is an int, old behavior
if isinstance(step, int):
#tuple of 2 int is for backwards compatibility,
if not(isinstance(start, int)):
raise ValueError('`markevery` is a tuple with '
'len 2 and second element is an int, but '
'the first element is not an int; '
'markevery=%s' % (markevery,))
# just return, we are done here
return Path(verts[slice(start, None, step)],
_slice_or_none(codes, slice(start, None, step)))
elif isinstance(step, float):
if not (isinstance(start, int) or
isinstance(start, float)):
raise ValueError('`markevery` is a tuple with '
'len 2 and second element is a float, but '
'the first element is not a float or an '
'int; '
'markevery=%s' % (markevery,))
#calc cumulative distance along path (in display
# coords):
disp_coords = affine.transform(tpath.vertices)
delta = np.empty((len(disp_coords), 2),
dtype=float)
delta[0, :] = 0.0
delta[1:, :] = (disp_coords[1:, :] -
disp_coords[:-1, :])
delta = np.sum(delta**2, axis=1)
delta = np.sqrt(delta)
delta = np.cumsum(delta)
#calc distance between markers along path based on
# the axes bounding box diagonal being a distance
# of unity:
scale = ax_transform.transform(
np.array([[0, 0], [1, 1]]))
scale = np.diff(scale, axis=0)
scale = np.sum(scale**2)
scale = np.sqrt(scale)
marker_delta = np.arange(start * scale,
delta[-1],
step * scale)
#find closest actual data point that is closest to
# the theoretical distance along the path:
inds = np.abs(delta[np.newaxis, :] -
marker_delta[:, np.newaxis])
inds = inds.argmin(axis=1)
inds = np.unique(inds)
# return, we are done here
return Path(verts[inds],
_slice_or_none(codes, inds))
else:
raise ValueError('`markevery` is a tuple with '
'len 2, but its second element is not an int '
'or a float; '
'markevery=%s' % (markevery,))
elif isinstance(markevery, slice):
# mazol tov, it's already a slice, just return
return Path(verts[markevery],
_slice_or_none(codes, markevery))
elif iterable(markevery):
#fancy indexing
try:
return Path(verts[markevery],
_slice_or_none(codes, markevery))
except (ValueError, IndexError):
raise ValueError('`markevery` is iterable but '
'not a valid form of numpy fancy indexing; '
'markevery=%s' % (markevery,))
else:
raise ValueError('Value of `markevery` is not '
'recognized; '
'markevery=%s' % (markevery,))
class Line2D(Artist):
"""
A line - the line can have both a solid linestyle connecting all
the vertices, and a marker at each vertex. Additionally, the
drawing of the solid line is influenced by the drawstyle, e.g., one
can create "stepped" lines in various styles.
"""
lineStyles = _lineStyles = { # hidden names deprecated
'-': '_draw_solid',
'--': '_draw_dashed',
'-.': '_draw_dash_dot',
':': '_draw_dotted',
'None': '_draw_nothing',
' ': '_draw_nothing',
'': '_draw_nothing',
}
_drawStyles_l = {
'default': '_draw_lines',
'steps-mid': '_draw_steps_mid',
'steps-pre': '_draw_steps_pre',
'steps-post': '_draw_steps_post',
}
_drawStyles_s = {
'steps': '_draw_steps_pre',
}
drawStyles = {}
drawStyles.update(_drawStyles_l)
drawStyles.update(_drawStyles_s)
# Need a list ordered with long names first:
drawStyleKeys = (list(six.iterkeys(_drawStyles_l)) +
list(six.iterkeys(_drawStyles_s)))
# Referenced here to maintain API. These are defined in
# MarkerStyle
markers = MarkerStyle.markers
filled_markers = MarkerStyle.filled_markers
fillStyles = MarkerStyle.fillstyles
zorder = 2
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
def __str__(self):
if self._label != "":
return "Line2D(%s)" % (self._label)
elif hasattr(self, '_x') and len(self._x) > 3:
return "Line2D((%g,%g),(%g,%g),...,(%g,%g))"\
% (self._x[0], self._y[0], self._x[0],
self._y[0], self._x[-1], self._y[-1])
elif hasattr(self, '_x'):
return "Line2D(%s)"\
% (",".join(["(%g,%g)" % (x, y) for x, y
in zip(self._x, self._y)]))
else:
return "Line2D()"
def __init__(self, xdata, ydata,
linewidth=None, # all Nones default to rc
linestyle=None,
color=None,
marker=None,
markersize=None,
markeredgewidth=None,
markeredgecolor=None,
markerfacecolor=None,
markerfacecoloralt='none',
fillstyle='full',
antialiased=None,
dash_capstyle=None,
solid_capstyle=None,
dash_joinstyle=None,
solid_joinstyle=None,
pickradius=5,
drawstyle=None,
markevery=None,
**kwargs
):
"""
Create a :class:`~matplotlib.lines.Line2D` instance with *x*
and *y* data in sequences *xdata*, *ydata*.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
See :meth:`set_linestyle` for a decription of the line styles,
:meth:`set_marker` for a description of the markers, and
:meth:`set_drawstyle` for a description of the draw styles.
"""
Artist.__init__(self)
#convert sequences to numpy arrays
if not iterable(xdata):
raise RuntimeError('xdata must be a sequence')
if not iterable(ydata):
raise RuntimeError('ydata must be a sequence')
if linewidth is None:
linewidth = rcParams['lines.linewidth']
if linestyle is None:
linestyle = rcParams['lines.linestyle']
if marker is None:
marker = rcParams['lines.marker']
if color is None:
color = rcParams['lines.color']
if markersize is None:
markersize = rcParams['lines.markersize']
if antialiased is None:
antialiased = rcParams['lines.antialiased']
if dash_capstyle is None:
dash_capstyle = rcParams['lines.dash_capstyle']
if dash_joinstyle is None:
dash_joinstyle = rcParams['lines.dash_joinstyle']
if solid_capstyle is None:
solid_capstyle = rcParams['lines.solid_capstyle']
if solid_joinstyle is None:
solid_joinstyle = rcParams['lines.solid_joinstyle']
if drawstyle is None:
drawstyle = 'default'
self.set_dash_capstyle(dash_capstyle)
self.set_dash_joinstyle(dash_joinstyle)
self.set_solid_capstyle(solid_capstyle)
self.set_solid_joinstyle(solid_joinstyle)
self.set_linestyle(linestyle)
self.set_drawstyle(drawstyle)
self.set_linewidth(linewidth)
self.set_color(color)
self._marker = MarkerStyle()
self.set_marker(marker)
self.set_markevery(markevery)
self.set_antialiased(antialiased)
self.set_markersize(markersize)
self._dashSeq = None
self.set_markerfacecolor(markerfacecolor)
self.set_markerfacecoloralt(markerfacecoloralt)
self.set_markeredgecolor(markeredgecolor)
self.set_markeredgewidth(markeredgewidth)
self.set_fillstyle(fillstyle)
self.verticalOffset = None
# update kwargs before updating data to give the caller a
# chance to init axes (and hence unit support)
self.update(kwargs)
self.pickradius = pickradius
self.ind_offset = 0
if is_numlike(self._picker):
self.pickradius = self._picker
self._xorig = np.asarray([])
self._yorig = np.asarray([])
self._invalidx = True
self._invalidy = True
self.set_data(xdata, ydata)
def __getstate__(self):
state = super(Line2D, self).__getstate__()
# _linefunc will be restored on draw time.
state.pop('_lineFunc', None)
return state
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the line. The pick
radius determines the precision of the location test (usually
within five points of the value). Use
:meth:`~matplotlib.lines.Line2D.get_pickradius` or
:meth:`~matplotlib.lines.Line2D.set_pickradius` to view or
modify it.
Returns *True* if any values are within the radius along with
``{'ind': pointlist}``, where *pointlist* is the set of points
within the radius.
TODO: sort returned indices by distance
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
if not is_numlike(self.pickradius):
raise ValueError("pick radius should be a distance")
# Make sure we have data to plot
if self._invalidy or self._invalidx:
self.recache()
if len(self._xy) == 0:
return False, {}
# Convert points to pixels
transformed_path = self._get_transformed_path()
path, affine = transformed_path.get_transformed_path_and_affine()
path = affine.transform_path(path)
xy = path.vertices
xt = xy[:, 0]
yt = xy[:, 1]
# Convert pick radius from points to pixels
if self.figure is None:
warnings.warn('no figure set when check if mouse is on line')
pixels = self.pickradius
else:
pixels = self.figure.dpi / 72. * self.pickradius
# the math involved in checking for containment (here and inside of
# segment_hits) assumes that it is OK to overflow. In case the
# application has set the error flags such that an exception is raised
# on overflow, we temporarily set the appropriate error flags here and
# set them back when we are finished.
olderrflags = np.seterr(all='ignore')
try:
# Check for collision
if self._linestyle in ['None', None]:
# If no line, return the nearby point(s)
d = (xt - mouseevent.x) ** 2 + (yt - mouseevent.y) ** 2
ind, = np.nonzero(np.less_equal(d, pixels ** 2))
else:
# If line, return the nearby segment(s)
ind = segment_hits(mouseevent.x, mouseevent.y, xt, yt, pixels)
finally:
np.seterr(**olderrflags)
ind += self.ind_offset
# Debugging message
if False and self._label != '':
print("Checking line", self._label,
"at", mouseevent.x, mouseevent.y)
print('xt', xt)
print('yt', yt)
#print 'dx,dy', (xt-mouseevent.x)**2., (yt-mouseevent.y)**2.
print('ind', ind)
# Return the point(s) within radius
return len(ind) > 0, dict(ind=ind)
def get_pickradius(self):
"""return the pick radius used for containment tests"""
return self.pickradius
def set_pickradius(self, d):
"""Sets the pick radius used for containment tests
ACCEPTS: float distance in points
"""
self.pickradius = d
def get_fillstyle(self):
"""
return the marker fillstyle
"""
return self._marker.get_fillstyle()
def set_fillstyle(self, fs):
"""
Set the marker fill style; 'full' means fill the whole marker.
'none' means no filling; other options are for half-filled markers.
ACCEPTS: ['full' | 'left' | 'right' | 'bottom' | 'top' | 'none']
"""
self._marker.set_fillstyle(fs)
def set_markevery(self, every):
"""Set the markevery property to subsample the plot when using markers.
e.g., if `every=5`, every 5-th marker will be plotted.
ACCEPTS: [None | int | length-2 tuple of int | slice |
list/array of int | float | length-2 tuple of float]
Parameters
----------
every: None | int | length-2 tuple of int | slice | list/array of int |
float | length-2 tuple of float
Which markers to plot.
- every=None, every point will be plotted.
- every=N, every N-th marker will be plotted starting with
marker 0.
- every=(start, N), every N-th marker, starting at point
start, will be plotted.
- every=slice(start, end, N), every N-th marker, starting at
point start, upto but not including point end, will be plotted.
- every=[i, j, m, n], only markers at points i, j, m, and n
will be plotted.
- every=0.1, (i.e. a float) then markers will be spaced at
approximately equal distances along the line; the distance
along the line between markers is determined by multiplying the
display-coordinate distance of the axes bounding-box diagonal
by the value of every.
- every=(0.5, 0.1) (i.e. a length-2 tuple of float), the
same functionality as every=0.1 is exhibited but the first
marker will be 0.5 multiplied by the
display-cordinate-diagonal-distance along the line.
Notes
-----
Setting the markevery property will only show markers at actual data
points. When using float arguments to set the markevery property
on irregularly spaced data, the markers will likely not appear evenly
spaced because the actual data points do not coincide with the
theoretical spacing between markers.
When using a start offset to specify the first marker, the offset will
be from the first data point which may be different from the first
the visible data point if the plot is zoomed in.
If zooming in on a plot when using float arguments then the actual
data points that have markers will change because the distance between
markers is always determined from the display-coordinates
axes-bounding-box-diagonal regardless of the actual axes data limits.
"""
self._markevery = every
def get_markevery(self):
"""return the markevery setting"""
return self._markevery
def set_picker(self, p):
"""Sets the event picker details for the line.
ACCEPTS: float distance in points or callable pick function
``fn(artist, event)``
"""
if six.callable(p):
self._contains = p
else:
self.pickradius = p
self._picker = p
def get_window_extent(self, renderer):
bbox = Bbox([[0, 0], [0, 0]])
trans_data_to_xy = self.get_transform().transform
bbox.update_from_data_xy(trans_data_to_xy(self.get_xydata()),
ignore=True)
# correct for marker size, if any
if self._marker:
ms = (self._markersize / 72.0 * self.figure.dpi) * 0.5
bbox = bbox.padded(ms)
return bbox
def set_axes(self, ax):
Artist.set_axes(self, ax)
if ax.xaxis is not None:
self._xcid = ax.xaxis.callbacks.connect('units',
self.recache_always)
if ax.yaxis is not None:
self._ycid = ax.yaxis.callbacks.connect('units',
self.recache_always)
set_axes.__doc__ = Artist.set_axes.__doc__
def set_data(self, *args):
"""
Set the x and y data
ACCEPTS: 2D array (rows are x, y) or two 1D arrays
"""
if len(args) == 1:
x, y = args[0]
else:
x, y = args
self.set_xdata(x)
self.set_ydata(y)
def recache_always(self):
self.recache(always=True)
def recache(self, always=False):
if always or self._invalidx:
xconv = self.convert_xunits(self._xorig)
if ma.isMaskedArray(self._xorig):
x = ma.asarray(xconv, np.float_)
else:
x = np.asarray(xconv, np.float_)
x = x.ravel()
else:
x = self._x
if always or self._invalidy:
yconv = self.convert_yunits(self._yorig)
if ma.isMaskedArray(self._yorig):
y = ma.asarray(yconv, np.float_)
else:
y = np.asarray(yconv, np.float_)
y = y.ravel()
else:
y = self._y
if len(x) == 1 and len(y) > 1:
x = x * np.ones(y.shape, np.float_)
if len(y) == 1 and len(x) > 1:
y = y * np.ones(x.shape, np.float_)
if len(x) != len(y):
raise RuntimeError('xdata and ydata must be the same length')
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if ma.isMaskedArray(x) or ma.isMaskedArray(y):
self._xy = ma.concatenate((x, y), 1)
else:
self._xy = np.concatenate((x, y), 1)
self._x = self._xy[:, 0] # just a view
self._y = self._xy[:, 1] # just a view
self._subslice = False
if (self.axes and len(x) > 100 and self._is_sorted(x) and
self.axes.name == 'rectilinear' and
self.axes.get_xscale() == 'linear' and
self._markevery is None and
self.get_clip_on() is True):
self._subslice = True
if hasattr(self, '_path'):
interpolation_steps = self._path._interpolation_steps
else:
interpolation_steps = 1
self._path = Path(self._xy, None, interpolation_steps)
self._transformed_path = None
self._invalidx = False
self._invalidy = False
def _transform_path(self, subslice=None):
"""
Puts a TransformedPath instance at self._transformed_path,
all invalidation of the transform is then handled by the
TransformedPath instance.
"""
# Masked arrays are now handled by the Path class itself
if subslice is not None:
_path = Path(self._xy[subslice, :])
else:
_path = self._path
self._transformed_path = TransformedPath(_path, self.get_transform())
def _get_transformed_path(self):
"""
Return the :class:`~matplotlib.transforms.TransformedPath` instance
of this line.
"""
if self._transformed_path is None:
self._transform_path()
return self._transformed_path
def set_transform(self, t):
"""
set the Transformation instance used by this artist
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Artist.set_transform(self, t)
self._invalidx = True
self._invalidy = True
def _is_sorted(self, x):
"""return true if x is sorted"""
if len(x) < 2:
return 1
return np.amin(x[1:] - x[0:-1]) >= 0
@allow_rasterization
def draw(self, renderer):
"""draw the Line with `renderer` unless visibility is False"""
if not self.get_visible():
return
if self._invalidy or self._invalidx:
self.recache()
self.ind_offset = 0 # Needed for contains() method.
if self._subslice and self.axes:
# Need to handle monotonically decreasing case also...
x0, x1 = self.axes.get_xbound()
i0, = self._x.searchsorted([x0], 'left')
i1, = self._x.searchsorted([x1], 'right')
subslice = slice(max(i0 - 1, 0), i1 + 1)
self.ind_offset = subslice.start
self._transform_path(subslice)
transf_path = self._get_transformed_path()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
renderer.open_group('line2d', self.get_gid())
gc = renderer.new_gc()
self._set_gc_clip(gc)
ln_color_rgba = self._get_rgba_ln_color()
gc.set_foreground(ln_color_rgba, isRGBA=True)
gc.set_alpha(ln_color_rgba[3])
gc.set_antialiased(self._antialiased)
gc.set_linewidth(self._linewidth)
if self.is_dashed():
cap = self._dashcapstyle
join = self._dashjoinstyle
else:
cap = self._solidcapstyle
join = self._solidjoinstyle
gc.set_joinstyle(join)
gc.set_capstyle(cap)
gc.set_snap(self.get_snap())
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
funcname = self._lineStyles.get(self._linestyle, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = transf_path.get_transformed_path_and_affine()
if len(tpath.vertices):
self._lineFunc = getattr(self, funcname)
funcname = self.drawStyles.get(self._drawstyle, '_draw_lines')
drawFunc = getattr(self, funcname)
drawFunc(renderer, gc, tpath, affine.frozen())
if self._marker:
gc = renderer.new_gc()
self._set_gc_clip(gc)
rgbaFace = self._get_rgba_face()
rgbaFaceAlt = self._get_rgba_face(alt=True)
edgecolor = self.get_markeredgecolor()
if is_string_like(edgecolor) and edgecolor.lower() == 'none':
gc.set_linewidth(0)
gc.set_foreground(rgbaFace, isRGBA=True)
else:
gc.set_foreground(edgecolor)
gc.set_linewidth(self._markeredgewidth)
marker = self._marker
tpath, affine = transf_path.get_transformed_points_and_affine()
if len(tpath.vertices):
# subsample the markers if markevery is not None
markevery = self.get_markevery()
if markevery is not None:
subsampled = _mark_every_path(markevery, tpath,
affine, self.axes.transAxes)
else:
subsampled = tpath
snap = marker.get_snap_threshold()
if type(snap) == float:
snap = renderer.points_to_pixels(self._markersize) >= snap
gc.set_snap(snap)
gc.set_joinstyle(marker.get_joinstyle())
gc.set_capstyle(marker.get_capstyle())
marker_path = marker.get_path()
marker_trans = marker.get_transform()
w = renderer.points_to_pixels(self._markersize)
if marker.get_marker() != ',':
# Don't scale for pixels, and don't stroke them
marker_trans = marker_trans.scale(w)
else:
gc.set_linewidth(0)
if rgbaFace is not None:
gc.set_alpha(rgbaFace[3])
renderer.draw_markers(gc, marker_path, marker_trans,
subsampled, affine.frozen(),
rgbaFace)
alt_marker_path = marker.get_alt_path()
if alt_marker_path:
if rgbaFaceAlt is not None:
gc.set_alpha(rgbaFaceAlt[3])
alt_marker_trans = marker.get_alt_transform()
alt_marker_trans = alt_marker_trans.scale(w)
renderer.draw_markers(
gc, alt_marker_path, alt_marker_trans, subsampled,
affine.frozen(), rgbaFaceAlt)
gc.restore()
gc.restore()
renderer.close_group('line2d')
def get_antialiased(self):
return self._antialiased
def get_color(self):
return self._color
def get_drawstyle(self):
return self._drawstyle
def get_linestyle(self):
return self._linestyle
def get_linewidth(self):
return self._linewidth
def get_marker(self):
return self._marker.get_marker()
def get_markeredgecolor(self):
mec = self._markeredgecolor
if (is_string_like(mec) and mec == 'auto'):
if self._marker.get_marker() in ('.', ','):
return self._color
if self._marker.is_filled() and self.get_fillstyle() != 'none':
return 'k' # Bad hard-wired default...
else:
return self._color
else:
return mec
def get_markeredgewidth(self):
return self._markeredgewidth
def _get_markerfacecolor(self, alt=False):
if alt:
fc = self._markerfacecoloralt
else:
fc = self._markerfacecolor
if (is_string_like(fc) and fc.lower() == 'auto'):
if self.get_fillstyle() == 'none':
return 'none'
else:
return self._color
else:
return fc
def get_markerfacecolor(self):
return self._get_markerfacecolor(alt=False)
def get_markerfacecoloralt(self):
return self._get_markerfacecolor(alt=True)
def get_markersize(self):
return self._markersize
def get_data(self, orig=True):
"""
Return the xdata, ydata.
If *orig* is *True*, return the original data.
"""
return self.get_xdata(orig=orig), self.get_ydata(orig=orig)
def get_xdata(self, orig=True):
"""
Return the xdata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._xorig
if self._invalidx:
self.recache()
return self._x
def get_ydata(self, orig=True):
"""
Return the ydata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._yorig
if self._invalidy:
self.recache()
return self._y
def get_path(self):
"""
Return the :class:`~matplotlib.path.Path` object associated
with this line.
"""
if self._invalidy or self._invalidx:
self.recache()
return self._path
def get_xydata(self):
"""
Return the *xy* data as a Nx2 numpy array.
"""
if self._invalidy or self._invalidx:
self.recache()
return self._xy
def set_antialiased(self, b):
"""
True if line should be drawin with antialiased rendering
ACCEPTS: [True | False]
"""
self._antialiased = b
def set_color(self, color):
"""
Set the color of the line
ACCEPTS: any matplotlib color
"""
self._color = color
def set_drawstyle(self, drawstyle):
"""
Set the drawstyle of the plot
'default' connects the points with lines. The steps variants
produce step-plots. 'steps' is equivalent to 'steps-pre' and
is maintained for backward-compatibility.
ACCEPTS: ['default' | 'steps' | 'steps-pre' | 'steps-mid' |
'steps-post']
"""
self._drawstyle = drawstyle
def set_linewidth(self, w):
"""
Set the line width in points
ACCEPTS: float value in points
"""
self._linewidth = w
def set_linestyle(self, linestyle):
"""
Set the linestyle of the line (also accepts drawstyles)
================ =================
linestyle description
================ =================
``'-'`` solid
``'--'`` dashed
``'-.'`` dash_dot
``':'`` dotted
``'None'`` draw nothing
``' '`` draw nothing
``''`` draw nothing
================ =================
'steps' is equivalent to 'steps-pre' and is maintained for
backward-compatibility.
.. seealso::
:meth:`set_drawstyle`
To set the drawing style (stepping) of the plot.
ACCEPTS: [``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` |
``' '`` | ``''``]
and any drawstyle in combination with a linestyle, e.g., ``'steps--'``.
"""
for ds in self.drawStyleKeys: # long names are first in the list
if linestyle.startswith(ds):
self.set_drawstyle(ds)
if len(linestyle) > len(ds):
linestyle = linestyle[len(ds):]
else:
linestyle = '-'
break
if linestyle not in self._lineStyles:
if linestyle in ls_mapper:
linestyle = ls_mapper[linestyle]
else:
verbose.report('Unrecognized line style %s, %s' %
(linestyle, type(linestyle)))
if linestyle in [' ', '']:
linestyle = 'None'
self._linestyle = linestyle
@docstring.dedent_interpd
def set_marker(self, marker):
"""
Set the line marker
ACCEPTS: :mod:`A valid marker style <matplotlib.markers>`
Parameters
-----------
marker: marker style
See `~matplotlib.markers` for full description of possible
argument
"""
self._marker.set_marker(marker)
def set_markeredgecolor(self, ec):
"""
Set the marker edge color
ACCEPTS: any matplotlib color
"""
if ec is None:
ec = 'auto'
self._markeredgecolor = ec
def set_markeredgewidth(self, ew):
"""
Set the marker edge width in points
ACCEPTS: float value in points
"""
if ew is None:
ew = rcParams['lines.markeredgewidth']
self._markeredgewidth = ew
def set_markerfacecolor(self, fc):
"""
Set the marker face color.
ACCEPTS: any matplotlib color
"""
if fc is None:
fc = 'auto'
self._markerfacecolor = fc
def set_markerfacecoloralt(self, fc):
"""
Set the alternate marker face color.
ACCEPTS: any matplotlib color
"""
if fc is None:
fc = 'auto'
self._markerfacecoloralt = fc
def set_markersize(self, sz):
"""
Set the marker size in points
ACCEPTS: float
"""
self._markersize = sz
def set_xdata(self, x):
"""
Set the data np.array for x
ACCEPTS: 1D array
"""
self._xorig = x
self._invalidx = True
def set_ydata(self, y):
"""
Set the data np.array for y
ACCEPTS: 1D array
"""
self._yorig = y
self._invalidy = True
def set_dashes(self, seq):
"""
Set the dash sequence, sequence of dashes with on off ink in
points. If seq is empty or if seq = (None, None), the
linestyle will be set to solid.
ACCEPTS: sequence of on/off ink in points
"""
if seq == (None, None) or len(seq) == 0:
self.set_linestyle('-')
else:
self.set_linestyle('--')
self._dashSeq = seq # TODO: offset ignored for now
def _draw_lines(self, renderer, gc, path, trans):
self._lineFunc(renderer, gc, path, trans)
def _draw_steps_pre(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2 * len(vertices) - 1, 2), np.float_)
steps[0::2, 0], steps[1::2, 0] = vertices[:, 0], vertices[:-1, 0]
steps[0::2, 1], steps[1:-1:2, 1] = vertices[:, 1], vertices[1:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_post(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2 * len(vertices) - 1, 2), np.float_)
steps[::2, 0], steps[1:-1:2, 0] = vertices[:, 0], vertices[1:, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:-1, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_mid(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2 * len(vertices), 2), np.float_)
steps[1:-1:2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[2::2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[0, 0] = vertices[0, 0]
steps[-1, 0] = vertices[-1, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_solid(self, renderer, gc, path, trans):
gc.set_linestyle('solid')
renderer.draw_path(gc, path, trans)
def _draw_dashed(self, renderer, gc, path, trans):
gc.set_linestyle('dashed')
if self._dashSeq is not None:
gc.set_dashes(0, self._dashSeq)
renderer.draw_path(gc, path, trans)
def _draw_dash_dot(self, renderer, gc, path, trans):
gc.set_linestyle('dashdot')
renderer.draw_path(gc, path, trans)
def _draw_dotted(self, renderer, gc, path, trans):
gc.set_linestyle('dotted')
renderer.draw_path(gc, path, trans)
def update_from(self, other):
"""copy properties from other to self"""
Artist.update_from(self, other)
self._linestyle = other._linestyle
self._linewidth = other._linewidth
self._color = other._color
self._markersize = other._markersize
self._markerfacecolor = other._markerfacecolor
self._markerfacecoloralt = other._markerfacecoloralt
self._markeredgecolor = other._markeredgecolor
self._markeredgewidth = other._markeredgewidth
self._dashSeq = other._dashSeq
self._dashcapstyle = other._dashcapstyle
self._dashjoinstyle = other._dashjoinstyle
self._solidcapstyle = other._solidcapstyle
self._solidjoinstyle = other._solidjoinstyle
self._linestyle = other._linestyle
self._marker = MarkerStyle(other._marker.get_marker(),
other._marker.get_fillstyle())
self._drawstyle = other._drawstyle
def _get_rgb_face(self, alt=False):
facecolor = self._get_markerfacecolor(alt=alt)
if is_string_like(facecolor) and facecolor.lower() == 'none':
rgbFace = None
else:
rgbFace = colorConverter.to_rgb(facecolor)
return rgbFace
def _get_rgba_face(self, alt=False):
facecolor = self._get_markerfacecolor(alt=alt)
if is_string_like(facecolor) and facecolor.lower() == 'none':
rgbaFace = None
else:
rgbaFace = colorConverter.to_rgba(facecolor, self._alpha)
return rgbaFace
def _get_rgba_ln_color(self, alt=False):
return colorConverter.to_rgba(self._color, self._alpha)
# some aliases....
def set_aa(self, val):
'alias for set_antialiased'
self.set_antialiased(val)
def set_c(self, val):
'alias for set_color'
self.set_color(val)
def set_ls(self, val):
"""alias for set_linestyle"""
self.set_linestyle(val)
def set_lw(self, val):
"""alias for set_linewidth"""
self.set_linewidth(val)
def set_mec(self, val):
"""alias for set_markeredgecolor"""
self.set_markeredgecolor(val)
def set_mew(self, val):
"""alias for set_markeredgewidth"""
self.set_markeredgewidth(val)
def set_mfc(self, val):
"""alias for set_markerfacecolor"""
self.set_markerfacecolor(val)
def set_mfcalt(self, val):
"""alias for set_markerfacecoloralt"""
self.set_markerfacecoloralt(val)
def set_ms(self, val):
"""alias for set_markersize"""
self.set_markersize(val)
def get_aa(self):
"""alias for get_antialiased"""
return self.get_antialiased()
def get_c(self):
"""alias for get_color"""
return self.get_color()
def get_ls(self):
"""alias for get_linestyle"""
return self.get_linestyle()
def get_lw(self):
"""alias for get_linewidth"""
return self.get_linewidth()
def get_mec(self):
"""alias for get_markeredgecolor"""
return self.get_markeredgecolor()
def get_mew(self):
"""alias for get_markeredgewidth"""
return self.get_markeredgewidth()
def get_mfc(self):
"""alias for get_markerfacecolor"""
return self.get_markerfacecolor()
def get_mfcalt(self, alt=False):
"""alias for get_markerfacecoloralt"""
return self.get_markerfacecoloralt()
def get_ms(self):
"""alias for get_markersize"""
return self.get_markersize()
def set_dash_joinstyle(self, s):
"""
Set the join style for dashed linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_dash_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._dashjoinstyle = s
def set_solid_joinstyle(self, s):
"""
Set the join style for solid linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_solid_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._solidjoinstyle = s
def get_dash_joinstyle(self):
"""
Get the join style for dashed linestyles
"""
return self._dashjoinstyle
def get_solid_joinstyle(self):
"""
Get the join style for solid linestyles
"""
return self._solidjoinstyle
def set_dash_capstyle(self, s):
"""
Set the cap style for dashed linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_dash_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._dashcapstyle = s
def set_solid_capstyle(self, s):
"""
Set the cap style for solid linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_solid_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._solidcapstyle = s
def get_dash_capstyle(self):
"""
Get the cap style for dashed linestyles
"""
return self._dashcapstyle
def get_solid_capstyle(self):
"""
Get the cap style for solid linestyles
"""
return self._solidcapstyle
def is_dashed(self):
'return True if line is dashstyle'
return self._linestyle in ('--', '-.', ':')
class VertexSelector:
"""
Manage the callbacks to maintain a list of selected vertices for
:class:`matplotlib.lines.Line2D`. Derived classes should override
:meth:`~matplotlib.lines.VertexSelector.process_selected` to do
something with the picks.
Here is an example which highlights the selected verts with red
circles::
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
class HighlightSelected(lines.VertexSelector):
def __init__(self, line, fmt='ro', **kwargs):
lines.VertexSelector.__init__(self, line)
self.markers, = self.axes.plot([], [], fmt, **kwargs)
def process_selected(self, ind, xs, ys):
self.markers.set_data(xs, ys)
self.canvas.draw()
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = np.random.rand(2, 30)
line, = ax.plot(x, y, 'bs-', picker=5)
selector = HighlightSelected(line)
plt.show()
"""
def __init__(self, line):
"""
Initialize the class with a :class:`matplotlib.lines.Line2D`
instance. The line should already be added to some
:class:`matplotlib.axes.Axes` instance and should have the
picker property set.
"""
if not hasattr(line, 'axes'):
raise RuntimeError('You must first add the line to the Axes')
if line.get_picker() is None:
raise RuntimeError('You must first set the picker property '
'of the line')
self.axes = line.axes
self.line = line
self.canvas = self.axes.figure.canvas
self.cid = self.canvas.mpl_connect('pick_event', self.onpick)
self.ind = set()
def process_selected(self, ind, xs, ys):
"""
Default "do nothing" implementation of the
:meth:`process_selected` method.
*ind* are the indices of the selected vertices. *xs* and *ys*
are the coordinates of the selected vertices.
"""
pass
def onpick(self, event):
"""When the line is picked, update the set of selected indicies."""
if event.artist is not self.line:
return
for i in event.ind:
if i in self.ind:
self.ind.remove(i)
else:
self.ind.add(i)
ind = list(self.ind)
ind.sort()
xdata, ydata = self.line.get_data()
self.process_selected(ind, xdata[ind], ydata[ind])
lineStyles = Line2D._lineStyles
lineMarkers = MarkerStyle.markers
drawStyles = Line2D.drawStyles
fillStyles = MarkerStyle.fillstyles
docstring.interpd.update(Line2D=artist.kwdoc(Line2D))
# You can not set the docstring of an instancemethod,
# but you can on the underlying function. Go figure.
docstring.dedent_interpd(Line2D.__init__)
|
mit
|
larsmans/scikit-learn
|
sklearn/utils/extmath.py
|
14
|
20521
|
"""
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state, deprecated
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : arrays
The output of `linalg.svd` or `sklearn.utils.extmath.randomized_svd`,
with matching inner dimensions so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
@deprecated('to be removed in 0.17; use scipy.special.expit or log_logistic')
def logistic_sigmoid(X, log=False, out=None):
"""Logistic function, ``1 / (1 + e ** (-x))``, or its log."""
from .fixes import expit
fn = log_logistic if log else expit
return fn(X, out)
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
|
bsd-3-clause
|
cython-testbed/pandas
|
pandas/tests/series/test_missing.py
|
2
|
51981
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytz
import pytest
from datetime import timedelta, datetime
from distutils.version import LooseVersion
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, DataFrame, isna, date_range,
MultiIndex, Index, Timestamp, NaT, IntervalIndex,
Categorical)
from pandas.compat import range
from pandas._libs.tslib import iNaT
from pandas.core.series import remove_na
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.errors import PerformanceWarning
try:
import scipy
_is_scipy_ge_0190 = (LooseVersion(scipy.__version__) >=
LooseVersion('0.19.0'))
except ImportError:
_is_scipy_ge_0190 = False
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.Akima1DInterpolator missing')
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestSeriesMissingData():
def test_remove_na_deprecation(self):
# see gh-16971
with tm.assert_produces_warning(FutureWarning):
remove_na(Series([]))
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(NaT)
expected = Series([NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
result = s.fillna(NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-01-03 10:00'), pd.NaT])
null_loc = pd.Series([False, True, False, True])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00')])
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
assert s.dtype == 'datetime64[ns, {0}]'.format(tz)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00',
tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00', tz=tz)])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# with timezone
# GH 15855
df = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'), pd.NaT])
exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')])
assert_series_equal(df.fillna(method='pad'), exp)
df = pd.Series([pd.NaT, pd.Timestamp('2012-11-11 00:00:00+01:00')])
exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')])
assert_series_equal(df.fillna(method='bfill'), exp)
def test_fillna_consistency(self):
# GH 16402
# fillna with a tz aware to a tz-naive, should result in object
s = Series([Timestamp('20130101'), pd.NaT])
result = s.fillna(Timestamp('20130101', tz='US/Eastern'))
expected = Series([Timestamp('20130101'),
Timestamp('2013-01-01', tz='US/Eastern')],
dtype='object')
assert_series_equal(result, expected)
# where (we ignore the errors=)
result = s.where([True, False],
Timestamp('20130101', tz='US/Eastern'),
errors='ignore')
assert_series_equal(result, expected)
result = s.where([True, False],
Timestamp('20130101', tz='US/Eastern'),
errors='ignore')
assert_series_equal(result, expected)
# with a non-datetime
result = s.fillna('foo')
expected = Series([Timestamp('20130101'),
'foo'])
assert_series_equal(result, expected)
# assignment
s2 = s.copy()
s2[1] = 'foo'
assert_series_equal(s2, expected)
def test_datetime64tz_fillna_round_issue(self):
# GH 14872
data = pd.Series([pd.NaT, pd.NaT,
datetime(2016, 12, 12, 22, 24, 6, 100001,
tzinfo=pytz.utc)])
filled = data.fillna(method='bfill')
expected = pd.Series([datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc)])
assert_series_equal(filled, expected)
def test_fillna_downcast(self):
# GH 15277
# infer int64 from float64
s = pd.Series([1., np.nan])
result = s.fillna(0, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
s = pd.Series([1., np.nan])
result = s.fillna({1: 0}, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
pytest.raises(TypeError, s.fillna, [1, 2])
pytest.raises(TypeError, s.fillna, (1, 2))
# related GH 9217, make sure limit is an int and greater than 0
s = Series([1, 2, 3, None])
for limit in [-1, 0, 1., 2.]:
for method in ['backfill', 'bfill', 'pad', 'ffill', None]:
with pytest.raises(ValueError):
s.fillna(1, limit=limit, method=method)
def test_categorical_nan_equality(self):
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_categorical_nan_handling(self):
# NaNs are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(s.values.codes,
np.array([0, 1, -1, 0], dtype=np.int8))
@pytest.mark.parametrize('fill_value, expected_output', [
('a', ['a', 'a', 'b', 'a', 'a']),
({1: 'a', 3: 'b', 4: 'b'}, ['a', 'a', 'b', 'b', 'b']),
({1: 'a'}, ['a', 'a', 'b', np.nan, np.nan]),
({1: 'a', 3: 'b'}, ['a', 'a', 'b', 'b', np.nan]),
(Series('a'), ['a', np.nan, 'b', np.nan, np.nan]),
(Series('a', index=[1]), ['a', 'a', 'b', np.nan, np.nan]),
(Series({1: 'a', 3: 'b'}), ['a', 'a', 'b', 'b', np.nan]),
(Series(['a', 'b'], index=[3, 4]), ['a', np.nan, 'b', 'a', 'b'])
])
def test_fillna_categorical(self, fill_value, expected_output):
# GH 17033
# Test fillna for a Categorical series
data = ['a', np.nan, 'b', np.nan, np.nan]
s = Series(Categorical(data, categories=['a', 'b']))
exp = Series(Categorical(expected_output, categories=['a', 'b']))
tm.assert_series_equal(s.fillna(fill_value), exp)
def test_fillna_categorical_raise(self):
data = ['a', np.nan, 'b', np.nan, np.nan]
s = Series(Categorical(data, categories=['a', 'b']))
with tm.assert_raises_regex(ValueError,
"fill value must be in categories"):
s.fillna('d')
with tm.assert_raises_regex(ValueError,
"fill value must be in categories"):
s.fillna(Series('d'))
with tm.assert_raises_regex(ValueError,
"fill value must be in categories"):
s.fillna({1: 'd', 3: 'a'})
with tm.assert_raises_regex(TypeError,
'"value" parameter must be a scalar or '
'dict, but you passed a "list"'):
s.fillna(['a', 'b'])
with tm.assert_raises_regex(TypeError,
'"value" parameter must be a scalar or '
'dict, but you passed a "tuple"'):
s.fillna(('a', 'b'))
with tm.assert_raises_regex(TypeError,
'"value" parameter must be a scalar, dict '
'or Series, but you passed a "DataFrame"'):
s.fillna(DataFrame({1: ['a'], 3: ['b']}))
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_isna_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_na', True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
@tm.capture_stdout
def test_isnull_for_inf_deprecated(self):
# gh-17115
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self, datetime_series):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
pytest.raises(ValueError, ts.fillna)
pytest.raises(ValueError, datetime_series.fillna, value=0,
method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self, datetime_series):
try:
datetime_series.fillna(method='ffil')
except ValueError as inst:
assert 'ffil' in str(inst)
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_ffill_mixed_dtypes_without_missing_data(self):
# GH14956
series = pd.Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])
result = series.ffill()
assert_series_equal(series, result)
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
assert isna(td1[0])
assert td1[0].value == iNaT
td1[0] = td[0]
assert not isna(td1[0])
td1[1] = iNaT
assert isna(td1[1])
assert td1[1].value == iNaT
td1[1] = td[1]
assert not isna(td1[1])
td1[2] = NaT
assert isna(td1[2])
assert td1[2].value == iNaT
td1[2] = td[2]
assert not isna(td1[2])
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# assert isna(result).sum() == 7
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= datetime_series <= 0.5
# expected = (datetime_series >= -0.5) & (datetime_series <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
assert len(s.dropna()) == 0
s.dropna(inplace=True)
assert len(s) == 0
# invalid axis
pytest.raises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
tm.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
assert s.dtype == 'datetime64[ns, Asia/Tokyo]'
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
assert result.dtype == 'datetime64[ns, Asia/Tokyo]'
tm.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
tm.assert_series_equal(result, s)
assert result is not s
s2 = s.copy()
s2.dropna(inplace=True)
tm.assert_series_equal(s2, s)
def test_dropna_intervals(self):
s = Series([np.nan, 1, 2, 3], IntervalIndex.from_arrays(
[np.nan, 0, 1, 2],
[np.nan, 1, 2, 3]))
result = s.dropna()
expected = s.iloc[1:]
assert_series_equal(result, expected)
def test_valid(self, datetime_series):
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.dropna()
assert len(result) == ts.count()
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notna(ts)])
def test_isna(self):
ser = Series([0, 5.4, 3, nan, -0.001])
expected = Series([False, False, False, True, False])
tm.assert_series_equal(ser.isna(), expected)
ser = Series(["hi", "", nan])
expected = Series([False, False, True])
tm.assert_series_equal(ser.isna(), expected)
def test_notna(self):
ser = Series([0, 5.4, 3, nan, -0.001])
expected = Series([True, True, True, False, True])
tm.assert_series_equal(ser.notna(), expected)
ser = Series(["hi", "", nan])
expected = Series([True, True, False])
tm.assert_series_equal(ser.notna(), expected)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
assert np.isnan(x[0]), np.isnan(expected[0])
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
# neither monotonic increasing or decreasing
rng2 = rng[[1, 0, 2]]
pytest.raises(ValueError, rng2.get_indexer, rng, method='pad')
def test_dropna_preserve_name(self, datetime_series):
datetime_series[:5] = np.nan
result = datetime_series.dropna()
assert result.name == datetime_series.name
name = datetime_series.name
ts = datetime_series.copy()
ts.dropna(inplace=True)
assert ts.name == name
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
# TODO: what is this test doing? why are result an expected
# the same call to fillna?
with tm.assert_produces_warning(PerformanceWarning):
# TODO: release-note fillna performance warning
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
with tm.assert_produces_warning(PerformanceWarning):
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
with tm.assert_produces_warning(PerformanceWarning):
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
with tm.assert_produces_warning(PerformanceWarning):
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
class TestSeriesInterpolateData():
def test_interpolate(self, datetime_series, string_series):
ts = Series(np.arange(len(datetime_series), dtype=float),
datetime_series.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in datetime_series.index],
index=datetime_series.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
tm.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = string_series.copy()
non_ts[0] = np.NaN
pytest.raises(ValueError, non_ts.interpolate, method='time')
@td.skip_if_no_scipy
def test_interpolate_pchip(self):
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
@td.skip_if_no_scipy
def test_interpolate_akima(self):
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_piecewise_polynomial(self):
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_from_derivatives(self):
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='from_derivatives')
assert_series_equal(interp_s[1:3], expected)
@pytest.mark.parametrize("kwargs", [
{},
pytest.param({'method': 'polynomial', 'order': 1},
marks=td.skip_if_no_scipy)
])
def test_interpolate_corners(self, kwargs):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(**kwargs), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(**kwargs), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isna(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with pytest.raises(ValueError):
s.interpolate(method='time')
@pytest.mark.parametrize("kwargs", [
{},
pytest.param({'method': 'polynomial', 'order': 1},
marks=td.skip_if_no_scipy)
])
def test_nan_interpolate(self, kwargs):
s = Series([0, 1, np.nan, 3])
result = s.interpolate(**kwargs)
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_quad(self):
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_scipy_basic(self):
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
# GH #15662.
# new cubic and quadratic interpolation algorithms from scipy 0.19.0.
# previously `splmake` was used. See scipy/scipy#6710
if _is_scipy_ge_0190:
expected = Series([1, 3., 6.823529, 12., 18.058824, 25.])
else:
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
# GH 9217, make sure limit is an int and greater than 0
methods = ['linear', 'time', 'index', 'values', 'nearest', 'zero',
'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh',
'polynomial', 'spline', 'piecewise_polynomial', None,
'from_derivatives', 'pchip', 'akima']
s = pd.Series([1, 2, np.nan, np.nan, 5])
for limit in [-1, 0, 1., 2.]:
for method in methods:
with pytest.raises(ValueError):
s.interpolate(limit=limit, method=method)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_unlimited(self):
# these test are for issue #16282 default Limit=None is unlimited
s = Series([np.nan, 1., 3., np.nan, np.nan, np.nan, 11., np.nan])
expected = Series([1., 1., 3., 5., 7., 9., 11., 11.])
result = s.interpolate(method='linear',
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([np.nan, 1., 3., 5., 7., 9., 11., 11.])
result = s.interpolate(method='linear',
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([1., 1., 3., 5., 7., 9., 11., np.nan])
result = s.interpolate(method='linear',
limit_direction='backward')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
pytest.raises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
pytest.raises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
# limit_area introduced GH #16284
def test_interp_limit_area(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([nan, nan, 3, nan, nan, nan, 7, nan, nan])
expected = Series([nan, nan, 3., 4., 5., 6., 7., nan, nan])
result = s.interpolate(method='linear', limit_area='inside')
assert_series_equal(result, expected)
expected = Series([nan, nan, 3., 4., nan, nan, 7., nan, nan])
result = s.interpolate(method='linear', limit_area='inside',
limit=1)
expected = Series([nan, nan, 3., 4., nan, 6., 7., nan, nan])
result = s.interpolate(method='linear', limit_area='inside',
limit_direction='both', limit=1)
assert_series_equal(result, expected)
expected = Series([nan, nan, 3., nan, nan, nan, 7., 7., 7.])
result = s.interpolate(method='linear', limit_area='outside')
assert_series_equal(result, expected)
expected = Series([nan, nan, 3., nan, nan, nan, 7., 7., nan])
result = s.interpolate(method='linear', limit_area='outside',
limit=1)
expected = Series([nan, 3., 3., nan, nan, nan, 7., 7., nan])
result = s.interpolate(method='linear', limit_area='outside',
limit_direction='both', limit=1)
assert_series_equal(result, expected)
expected = Series([3., 3., 3., nan, nan, nan, 7., nan, nan])
result = s.interpolate(method='linear', limit_area='outside',
direction='backward')
# raises an error even if limit type is wrong.
pytest.raises(ValueError, s.interpolate, method='linear',
limit_area='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_all_good(self):
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
@pytest.mark.parametrize("check_scipy", [
False,
pytest.param(True, marks=td.skip_if_no_scipy)
])
def test_interp_multiIndex(self, check_scipy):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
if check_scipy:
with pytest.raises(ValueError):
s.interpolate(method='polynomial', order=1)
@td.skip_if_no_scipy
def test_interp_nonmono_raise(self):
s = Series([1, np.nan, 3], index=[0, 2, 1])
with pytest.raises(ValueError):
s.interpolate(method='krogh')
@td.skip_if_no_scipy
def test_interp_datetime64(self):
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize("method", ['polynomial', 'spline'])
def test_no_order(self, method):
s = Series([0, 1, np.nan, 3])
with pytest.raises(ValueError):
s.interpolate(method=method)
@td.skip_if_no_scipy
def test_spline(self):
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
@td.skip_if_no('scipy', min_version='0.15')
def test_spline_extrapolate(self):
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
@td.skip_if_no_scipy
def test_spline_smooth(self):
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
assert (s.interpolate(method='spline', order=3, s=0)[5] !=
s.interpolate(method='spline', order=3)[5])
@td.skip_if_no_scipy
def test_spline_interpolation(self):
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
@td.skip_if_no_scipy
def test_spline_error(self):
# see gh-10633
s = pd.Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
with pytest.raises(ValueError):
s.interpolate(method='spline')
with pytest.raises(ValueError):
s.interpolate(method='spline', order=0)
def test_interp_timedelta64(self):
# GH 6424
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 3]))
result = df.interpolate(method='time')
expected = Series([1., 2., 3.],
index=pd.to_timedelta([1, 2, 3]))
assert_series_equal(result, expected)
# test for non uniform spacing
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 4]))
result = df.interpolate(method='time')
expected = Series([1., 1.666667, 3.],
index=pd.to_timedelta([1, 2, 4]))
assert_series_equal(result, expected)
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).sort_values()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).sort_values()
result = ts.reindex(new_index).interpolate(method='time')
tm.assert_numpy_array_equal(result.values, exp.values)
|
bsd-3-clause
|
smousavi05/EQTransformer
|
EQTransformer/utils/associator.py
|
1
|
36991
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 27 18:52:42 2019
@author: mostafamousavi
last update: 01/29/2021
"""
from datetime import datetime, timedelta
from tqdm import tqdm
import numpy as np
import json
import os
import platform
import sqlite3
import pandas as pd
import csv
from os import listdir
import h5py
#import matplotlib.pyplot as plt
from obspy import UTCDateTime
from obspy.signal.trigger import ar_pick
from obspy.signal.trigger import recursive_sta_lta, trigger_onset
from itertools import combinations
from obspy.core.event import Catalog, Event, Origin, Arrival, Pick, WaveformStreamID
def run_associator(input_dir,
start_time,
end_time,
moving_window=15,
pair_n=3,
output_dir='.',
consider_combination=False):
"""
It performs a very simple association based on detection times on multiple stations. It works fine when you have a small and local network of seismic stations.
Parameters
----------
input_dir: str, default=None
Directory name containing hdf5 and csv files-preprocessed data.
start_time: str, default=None
Start of a time period of interest in 'YYYY-MM-DD hh:mm:ss.f' format.
end_time: str, default=None
End of a timeperiod of interest in 'YYYY-MM-DD hh:mm:ss.f' format.
moving_window: int, default=15
The length of time window used for association in second.
pair_n: int, default=2
The minimum number of stations used for the association.
output_dir: str, default='.'
Path to the directory to write the output file.
consider_combination: bool, default=False
If True, it will write down all possible combinations of picked arrival times for each event. This will generate multiple events with the same ID, and you will need to remove those with poor solutions after location. This helps to remove the false positives from the associated event.
Returns
----------
output_dir/Y2000.phs: Phase information for the associated events in hypoInverse format.
output_dir/associations.xml: quakeml output (containing origin and pick objects - using ObsPy functions). QuakeML is useful so that the user can then easily use ObsPy to generate input for other relocator methods (e.g. NonLinLoc). Contributed by Stephen Hicks
output_dir/traceNmae_dic.json: A dictionary where the trace name for all the detections associated to an event are listed. This can be used later to access the traces for calculating the cross-correlations during the relocation process.
Warning
----------
Unlike the other modules, this function does not create the ouput directory. So if the given path does not exist will give an error.
"""
if os.path.exists("phase_dataset"):
os.remove("phase_dataset")
conn = sqlite3.connect("phase_dataset")
cur = conn.cursor()
cur.execute('''
CREATE TABLE phase_dataset (traceID TEXT,
network TEXT,
station TEXT,
instrument_type TEXT,
stlat NUMERIC,
stlon NUMERIC,
stelv NUMERIC,
event_start_time DateTime,
event_end_time DateTime,
detection_prob NUMERIC,
detection_unc NUMERIC,
p_arrival_time DateTime,
p_prob NUMERIC,
p_unc NUMERIC,
p_snr NUMERIC,
s_arrival_time DateTime,
s_prob NUMERIC,
s_unc NUMERIC,
s_snr NUMERIC,
amp NUMERIC
)''')
if platform.system() == 'Windows':
station_list = [ev for ev in listdir(input_dir) if ev.split("\\")[-1] != ".DS_Store"];
else:
station_list = [ev for ev in listdir(input_dir) if ev.split("/")[-1] != ".DS_Store"];
station_list = sorted(set(station_list))
for st in station_list:
print(f'reading {st} ...')
if platform.system() == 'Windows':
_pick_database_maker(conn, cur, input_dir+"\\"+st+'"\\"X_prediction_results.csv')
else:
_pick_database_maker(conn, cur, input_dir+"/"+st+'/X_prediction_results.csv')
# read the database as dataframe
conn = sqlite3.connect("phase_dataset")
tbl = pd.read_sql_query("SELECT * FROM phase_dataset", conn);
#tbl = tbl[tbl.p_prob > 0.3]
#tbl = tbl[tbl.s_prob > 0.3]
tbl['event_start_time'] = tbl['event_start_time'].apply(lambda row : _date_convertor(row))
tbl['event_end_time'] = tbl['event_end_time'].apply(lambda row : _date_convertor(row))
tbl['p_arrival_time'] = tbl['p_arrival_time'].apply(lambda row : _date_convertor(row))
tbl['s_arrival_time'] = tbl['s_arrival_time'].apply(lambda row : _date_convertor(row))
_dbs_associator(start_time,
end_time,
moving_window,
tbl,
pair_n,
output_dir,
station_list,
consider_combination)
os.remove("phase_dataset")
def _pick_database_maker(conn, cur, input_file):
csv_file = open(input_file)
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
# print(f'Column names are {", ".join(row)}')
line_count += 1
else:
line_count += 1
traceID = row[0]
network = row[1]
station = row[2]
instrument_type = row[3]
stlat = float(row[4])
stlon = float(row[5])
stelv = float(row[6])
mls = row[7].split('.')
if len(mls) == 1:
event_start_time = datetime.strptime(row[7], '%Y-%m-%d %H:%M:%S')
else:
event_start_time = datetime.strptime(row[7], '%Y-%m-%d %H:%M:%S.%f')
mls = row[8].split('.')
if len(mls) == 1:
event_end_time = datetime.strptime(row[8], '%Y-%m-%d %H:%M:%S')
else:
event_end_time = datetime.strptime(row[8], '%Y-%m-%d %H:%M:%S.%f')
detection_prob = float(row[9])
try:
detection_unc = float(row[10])
except Exception:
detection_unc = None
if len(row[11]) > 10:
# p_arrival_time = UTCDateTime(row[11].replace(' ', 'T')+'Z')
mls = row[11].split('.')
if len(mls) == 1:
p_arrival_time = datetime.strptime(row[11], '%Y-%m-%d %H:%M:%S')
else:
p_arrival_time = datetime.strptime(row[11], '%Y-%m-%d %H:%M:%S.%f')
p_prob = float(row[12])
try:
p_unc = float(row[13])
except Exception:
p_unc = None
else:
p_arrival_time = None
p_prob = None
p_unc = None
try:
p_snr = float(row[14])
except Exception:
p_snr = None
if len(row[15]) > 10:
mls = row[15].split('.')
if len(mls) == 1:
s_arrival_time = datetime.strptime(row[15], '%Y-%m-%d %H:%M:%S')
else:
s_arrival_time = datetime.strptime(row[15], '%Y-%m-%d %H:%M:%S.%f')
s_prob = float(row[16])
try:
s_unc = float(row[17])
except Exception:
s_unc = None
else:
s_arrival_time = None
s_prob = None
s_unc = None
try:
s_snr = float(row[18])
except Exception:
s_snr = None
amp = None
cur.execute('''INSERT INTO phase_dataset VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?, ?)''',
(traceID, network, station, instrument_type, stlat, stlon, stelv,
event_start_time, event_end_time, detection_prob, detection_unc,
p_arrival_time, p_prob, p_unc, p_snr, s_arrival_time, s_prob, s_unc, s_snr,
amp))
conn.commit()
def _decimalDegrees2DMS(value,type):
'Converts a Decimal Degree Value into Degrees Minute Seconds Notation. Pass value as double type = {Latitude or Longitude} as string returns a string as D:M:S:Direction created by: anothergisblog.blogspot.com'
degrees = int(value)
submin = abs( (value - int(value) ) * 60)
direction = ""
if type == "Longitude":
if degrees < 0:
direction = "W"
elif degrees > 0:
direction = " "
else:
direction = ""
notation = ["{:>3}".format(str(abs(degrees))), direction, "{:>5}".format(str(round(submin, 2)))]
elif type == "Latitude":
if degrees < 0:
direction = "S"
elif degrees > 0:
direction = " "
else:
direction = ""
notation =["{:>2}".format(str(abs(degrees))), direction, "{:>5}".format(str(round(submin, 2)))]
return notation
def _weighcalculator_prob(pr):
'calculate the picks weights'
weight = 4
if pr > 0.6:
weight = 0
elif pr <= 0.6 and pr > 0.5:
weight = 1
elif pr <= 0.5 and pr > 0.2:
weight = 2
elif pr <= 0.2 and pr > 0.1:
weight = 3
elif pr <= 0.1:
weight = 4
return weight
def _date_convertor(r):
'convert datatime form string'
if r and len(r)>5:
mls = r.split('.')
if len(mls) == 1:
new_t = datetime.strptime(r, '%Y-%m-%d %H:%M:%S')
else:
new_t = datetime.strptime(r, '%Y-%m-%d %H:%M:%S.%f')
return new_t
def _doubleChecking(station_list, detections, preprocessed_dir, moving_window, thr_on=3.7, thr_of=0.5):
'this function perform traditional detection (STA/LTA) and picker (AIC) to double check for events on the remaining stations when an event has been detected on more than two stations'
for stt in station_list:
sttt = stt.split('_')[0]
# print(sttt)
if sttt not in detections['station'].to_list():
new_picks = {}
if platform.system() == 'Windows':
file_name = preprocessed_dir+"\\"+sttt+".hdf5"
file_csv = preprocessed_dir+"\\"+sttt+".csv"
else:
file_name = preprocessed_dir+"/"+sttt+".hdf5"
file_csv = preprocessed_dir+"/"+sttt+".csv"
df = pd.read_csv(file_csv)
df['start_time'] = pd.to_datetime(df['start_time'])
mask = (df['start_time'] > detections.iloc[0]['event_start_time']-timedelta(seconds = moving_window)) & (df['start_time'] < detections.iloc[0]['event_start_time']+timedelta(seconds = moving_window))
df = df.loc[mask]
dtfl = h5py.File(file_name, 'r')
dataset = dtfl.get('data/'+df['trace_name'].to_list()[0])
data = np.array(dataset)
cft = recursive_sta_lta(data[:,2], int(2.5 * 100), int(10. * 100))
on_of = trigger_onset(cft, thr_on, thr_of)
if len(on_of) >= 1:
p_pick, s_pick = ar_pick(data[:,2], data[:,1], data[:,0], 100, 1.0, 20.0, 1.0, 0.1, 4.0, 1.0, 2, 8, 0.1, 0.2)
if (on_of[0][1]+100)/100 > p_pick > (on_of[0][0]-100)/100:
# print('got one')
new_picks['traceID'] = df['trace_name'].to_list()[0]
new_picks['network'] = dataset.attrs["network_code"]
new_picks['station'] = sttt
new_picks['instrument_type'] = df['trace_name'].to_list()[0].split('_')[2]
new_picks['stlat'] = round(dataset.attrs["receiver_latitude"], 4)
new_picks['stlon'] = round(dataset.attrs["receiver_longitude"], 4)
new_picks['stelv'] = round(dataset.attrs["receiver_elevation_m"], 2)
new_picks['event_start_time'] = datetime.strptime(str(UTCDateTime(dataset.attrs['trace_start_time'].replace(' ', 'T')+'Z')+(on_of[0][0]/100)).replace('T', ' ').replace('Z', ''), '%Y-%m-%d %H:%M:%S.%f')
new_picks['event_end_time'] = datetime.strptime(str(UTCDateTime(dataset.attrs['trace_start_time'].replace(' ', 'T')+'Z')+(on_of[0][1]/100)).replace('T', ' ').replace('Z', ''), '%Y-%m-%d %H:%M:%S.%f')
new_picks['detection_prob'] = 0.3
new_picks['detection_unc'] = 0.6
new_picks['p_arrival_time'] = datetime.strptime(str(UTCDateTime(dataset.attrs['trace_start_time'].replace(' ', 'T')+'Z')+p_pick).replace('T', ' ').replace('Z', ''), '%Y-%m-%d %H:%M:%S.%f')
new_picks['p_prob'] = 0.3
new_picks['p_unc'] = 0.6
new_picks['p_snr'] = None
new_picks['s_arrival_time'] = None
new_picks['s_prob'] = 0.0
new_picks['s_unc'] = None
new_picks['s_snr'] = None
new_picks['amp'] = None
detections = detections.append(new_picks , ignore_index=True)
return detections
def _dbs_associator(start_time, end_time, moving_window,
tbl, pair_n, save_dir, station_list,
consider_combination=False):
if consider_combination==True:
if platform.system() == 'Windows':
Y2000_writer = open(save_dir+"\\"+"Y2000.phs", "w")
else:
Y2000_writer = open(save_dir+"/"+"Y2000.phs", "w")
traceNmae_dic = dict()
st = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S.%f')
et = datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S.%f')
total_t = et-st;
evid = 0;
tt = st
pbar = tqdm(total= int(np.ceil(total_t.total_seconds()/moving_window)), ncols=100)
while tt < et:
detections = tbl[(tbl.event_start_time >= tt) & (tbl.event_start_time < tt+timedelta(seconds = moving_window))]
pbar.update()
if len(detections) >= pair_n:
evid += 1
yr = "{:>4}".format(str(detections.iloc[0]['event_start_time']).split(' ')[0].split('-')[0])
mo = "{:>2}".format(str(detections.iloc[0]['event_start_time']).split(' ')[0].split('-')[1])
dy = "{:>2}".format(str(detections.iloc[0]['event_start_time']).split(' ')[0].split('-')[2])
hr = "{:>2}".format(str(detections.iloc[0]['event_start_time']).split(' ')[1].split(':')[0])
mi = "{:>2}".format(str(detections.iloc[0]['event_start_time']).split(' ')[1].split(':')[1])
sec = "{:>4}".format(str(detections.iloc[0]['event_start_time']).split(' ')[1].split(':')[2])
st_lat_DMS = _decimalDegrees2DMS(float(detections.iloc[0]['stlat']), "Latitude")
st_lon_DMS = _decimalDegrees2DMS(float(detections.iloc[0]['stlon']), "Longitude")
depth = 5.0
mag = 0.0
# QuakeML
print(detections.iloc[0]['event_start_time'])
if len(detections)/pair_n <= 2:
ch = pair_n
else:
ch = int(len(detections)-pair_n)
picks = []
for ns in range(ch, len(detections)+1):
comb = 0
for ind in list(combinations(detections.index, ns)):
comb+=1
selected_detections = detections.loc[ind,:]
sorted_detections = selected_detections.sort_values('p_arrival_time')
Y2000_writer.write("%4d%2d%2d%2d%2d%4.2f%2.0f%1s%4.2f%3.0f%1s%4.2f%5.2f%3.2f\n"%
(int(yr),int(mo),int(dy), int(hr),int(mi),float(sec),float(st_lat_DMS[0]),
str(st_lat_DMS[1]), float(st_lat_DMS[2]),float(st_lon_DMS[0]), str(st_lon_DMS[1]),
float(st_lon_DMS[2]),float(depth), float(mag)));
station_buffer=[]; row_buffer=[]; tr_names=[]; tr_names2=[]
for _, row in sorted_detections.iterrows():
trace_name = row['traceID']+'*'+row['station']+'*'+str(row['event_start_time'])
p_unc = row['p_unc']
p_prob = row['p_prob']
s_unc = row['s_unc']
s_prob = row['s_prob']
station = "{:<5}".format(row['station'])
network = "{:<2}".format(row['network'])
try:
yrp = "{:>4}".format(str(row['p_arrival_time']).split(' ')[0].split('-')[0])
mop = "{:>2}".format(str(row['p_arrival_time']).split(' ')[0].split('-')[1])
dyp = "{:>2}".format(str(row['p_arrival_time']).split(' ')[0].split('-')[2])
hrp = "{:>2}".format(str(row['p_arrival_time']).split(' ')[1].split(':')[0])
mip = "{:>2}".format(str(row['p_arrival_time']).split(' ')[1].split(':')[1])
sec_p = "{:>4}".format(str(row['p_arrival_time']).split(' ')[1].split(':')[2])
p = Pick(time=UTCDateTime(row['p_arrival_time']),
waveform_id=WaveformStreamID(network_code=network,
station_code=station.rstrip()),
phase_hint="P")
picks.append(p)
if p_unc:
Pweihgt = _weighcalculator_prob(p_prob*(1-p_unc))
else:
Pweihgt = _weighcalculator_prob(p_prob)
try:
Pweihgt = int(Pweihgt)
except Exception:
Pweihgt = 4
except Exception:
sec_p = None
try:
yrs = "{:>4}".format(str(row['s_arrival_time']).split(' ')[0].split('-')[0])
mos = "{:>2}".format(str(row['s_arrival_time']).split(' ')[0].split('-')[1])
dys = "{:>2}".format(str(row['s_arrival_time']).split(' ')[0].split('-')[2])
hrs = "{:>2}".format(str(row['s_arrival_time']).split(' ')[1].split(':')[0])
mis = "{:>2}".format(str(row['s_arrival_time']).split(' ')[1].split(':')[1])
sec_s = "{:>4}".format(str(row['s_arrival_time']).split(' ')[1].split(':')[2])
p = Pick(time=UTCDateTime(row['p_arrival_time']),
waveform_id=WaveformStreamID(network_code=network, station_code=station.rstrip()),
phase_hint="S")
picks.append(p)
if s_unc:
Sweihgt = _weighcalculator_prob(s_prob*(1-s_unc))
else:
Sweihgt = _weighcalculator_prob(s_prob)
try:
Sweihgt = int(Sweihgt)
except Exception:
Sweihgt = 4
except Exception:
sec_s = None
if row['station'] not in station_buffer:
tr_names.append(trace_name)
station_buffer.append(row['station'])
if sec_s:
Y2000_writer.write("%5s%2s HHE %4d%2d%2d%2d%2d%5.2f %5.2fES %1d\n"%
(station,network,int(yrs),int(mos),int(dys),int(hrs),int(mis),
float(0.0),float(sec_s), Sweihgt))
if sec_p:
Y2000_writer.write("%5s%2s HHZ IP %1d%4d%2d%2d%2d%2d%5.2f %5.2f 0\n"%
(station,network,Pweihgt,int(yrp),int(mop),int(dyp),int(hrp),
int(mip),float(sec_p),float(0.0)))
else :
tr_names2.append(trace_name)
if sec_s:
row_buffer.append("%5s%2s HHE %4d%2d%2d%2d%2d%5.2f %5.2fES %1d\n"%(station,network,
int(yrs),int(mos),int(dys),
int(hrs),int(mis),0.0,
float(sec_s), Sweihgt));
if sec_p:
row_buffer.append("%5s%2s HHZ IP %1d%4d%2d%2d%2d%2d%5.2f %5.2f 0\n"%(station,network,
Pweihgt,
int(yrp),int(mop),int(dyp),
int(hrp),int(mip),float(sec_p),
float(0.0)));
Y2000_writer.write("{:<62}".format(' ')+"%10d"%(evid)+'\n');
traceNmae_dic[str(evid)] = tr_names
if len(row_buffer) >= 2*pair_n:
Y2000_writer.write("%4d%2d%2d%2d%2d%4.2f%2.0f%1s%4.2f%3.0f%1s%4.2f%5.2f%3.2f\n"%
(int(yr),int(mo),int(dy),int(hr),int(mi),float(sec),
float(st_lat_DMS[0]), str(st_lat_DMS[1]), float(st_lat_DMS[2]),
float(st_lon_DMS[0]), str(st_lon_DMS[1]), float(st_lon_DMS[2]),
float(depth), float(mag)));
for rr in row_buffer:
Y2000_writer.write(rr);
Y2000_writer.write("{:<62}".format(' ')+"%10d"%(evid)+'\n');
traceNmae_dic[str(evid)] = tr_names2
tt += timedelta(seconds= moving_window)
# plt.scatter(LTTP, TTP, s=10, marker='o', c='b', alpha=0.4, label='P')
# plt.scatter(LTTS, TTS, s=10, marker='o', c='r', alpha=0.4, label='S')
# plt.legend('upper right')
# plt.show()
print('The Number of Realizations: '+str(evid)+'\n', flush=True)
jj = json.dumps(traceNmae_dic)
if platform.system() == 'Windows':
f = open(save_dir+"\\"+"traceNmae_dic.json","w")
else:
f = open(save_dir+"/"+"traceNmae_dic.json","w")
f.write(jj)
f.close()
else:
if platform.system() == 'Windows':
Y2000_writer = open(save_dir+"\\"+"Y2000.phs", "w")
else:
Y2000_writer = open(save_dir+"/"+"Y2000.phs", "w")
cat = Catalog()
traceNmae_dic = dict()
st = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S.%f')
et = datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S.%f')
total_t = et-st;
evid = 200000; evidd = 100000
tt = st
pbar = tqdm(total= int(np.ceil(total_t.total_seconds()/moving_window)))
while tt < et:
detections = tbl[(tbl.event_start_time >= tt) & (tbl.event_start_time < tt+timedelta(seconds = moving_window))]
pbar.update()
if len(detections) >= pair_n:
yr = "{:>4}".format(str(detections.iloc[0]['event_start_time']).split(' ')[0].split('-')[0])
mo = "{:>2}".format(str(detections.iloc[0]['event_start_time']).split(' ')[0].split('-')[1])
dy = "{:>2}".format(str(detections.iloc[0]['event_start_time']).split(' ')[0].split('-')[2])
hr = "{:>2}".format(str(detections.iloc[0]['event_start_time']).split(' ')[1].split(':')[0])
mi = "{:>2}".format(str(detections.iloc[0]['event_start_time']).split(' ')[1].split(':')[1])
sec = "{:>4}".format(str(detections.iloc[0]['event_start_time']).split(' ')[1].split(':')[2])
st_lat_DMS = _decimalDegrees2DMS(float(detections.iloc[0]['stlat']), "Latitude")
st_lon_DMS = _decimalDegrees2DMS(float(detections.iloc[0]['stlon']), "Longitude")
depth = 5.0
mag = 0.0
Y2000_writer.write("%4d%2d%2d%2d%2d%4.2f%2.0f%1s%4.2f%3.0f%1s%4.2f%5.2f%3.2f\n"%(int(yr),int(mo),int(dy),
int(hr),int(mi),float(sec),
float(st_lat_DMS[0]), str(st_lat_DMS[1]), float(st_lat_DMS[2]),
float(st_lon_DMS[0]), str(st_lon_DMS[1]), float(st_lon_DMS[2]),
float(depth), float(mag)));
event = Event()
origin = Origin(time=UTCDateTime(detections.iloc[0]['event_start_time']),
longitude=detections.iloc[0]['stlon'],
latitude=detections.iloc[0]['stlat'],
method="EqTransformer")
event.origins.append(origin)
station_buffer = []
row_buffer = []
sorted_detections = detections.sort_values('p_arrival_time')
tr_names = []
tr_names2 = []
picks = []
for _, row in sorted_detections.iterrows():
trace_name = row['traceID']+'*'+row['station']+'*'+str(row['event_start_time'])
p_unc = row['p_unc']
p_prob = row['p_prob']
s_unc = row['s_unc']
s_prob = row['s_prob']
station = "{:<5}".format(row['station'])
network = "{:<2}".format(row['network'])
try:
yrp = "{:>4}".format(str(row['p_arrival_time']).split(' ')[0].split('-')[0])
mop = "{:>2}".format(str(row['p_arrival_time']).split(' ')[0].split('-')[1])
dyp = "{:>2}".format(str(row['p_arrival_time']).split(' ')[0].split('-')[2])
hrp = "{:>2}".format(str(row['p_arrival_time']).split(' ')[1].split(':')[0])
mip = "{:>2}".format(str(row['p_arrival_time']).split(' ')[1].split(':')[1])
sec_p = "{:>4}".format(str(row['p_arrival_time']).split(' ')[1].split(':')[2])
p = Pick(time=UTCDateTime(row['p_arrival_time']),
waveform_id=WaveformStreamID(network_code=network, station_code=station.rstrip()),
phase_hint="P", method_id="EqTransformer")
picks.append(p)
if p_unc:
Pweihgt = _weighcalculator_prob(p_prob*(1-p_unc))
else:
Pweihgt = _weighcalculator_prob(p_prob)
try:
Pweihgt = int(Pweihgt)
except Exception:
Pweihgt = 4
except Exception:
sec_p = None
try:
yrs = "{:>4}".format(str(row['s_arrival_time']).split(' ')[0].split('-')[0])
mos = "{:>2}".format(str(row['s_arrival_time']).split(' ')[0].split('-')[1])
dys = "{:>2}".format(str(row['s_arrival_time']).split(' ')[0].split('-')[2])
hrs = "{:>2}".format(str(row['s_arrival_time']).split(' ')[1].split(':')[0])
mis = "{:>2}".format(str(row['s_arrival_time']).split(' ')[1].split(':')[1])
sec_s = "{:>4}".format(str(row['s_arrival_time']).split(' ')[1].split(':')[2])
p = Pick(time=UTCDateTime(row['s_arrival_time']),
waveform_id=WaveformStreamID(network_code=network, station_code=station.rstrip()),
phase_hint="S", method_id="EqTransformer")
picks.append(p)
if s_unc:
Sweihgt = _weighcalculator_prob(s_prob*(1-s_unc))
else:
Sweihgt = _weighcalculator_prob(s_prob)
try:
Sweihgt = int(Sweihgt)
except Exception:
Sweihgt = 4
except Exception:
sec_s = None
if row['station'] not in station_buffer:
tr_names.append(trace_name)
station_buffer.append(row['station'])
if sec_s:
Y2000_writer.write("%5s%2s HHE %4d%2d%2d%2d%2d%5.2f %5.2fES %1d\n"%(station,network,
int(yrs),int(mos),int(dys),
int(hrs),int(mis),float(0.0),
float(sec_s), Sweihgt))
if sec_p:
Y2000_writer.write("%5s%2s HHZ IP %1d%4d%2d%2d%2d%2d%5.2f %5.2f 0\n"%(station,network,
Pweihgt,
int(yrp),int(mop),int(dyp),
int(hrp),int(mip),float(sec_p),
float(0.0)))
else :
tr_names2.append(trace_name)
if sec_s:
row_buffer.append("%5s%2s HHE %4d%2d%2d%2d%2d%5.2f %5.2fES %1d\n"%(station,network,
int(yrs),int(mos),int(dys),
int(hrs),int(mis),0.0,
float(sec_s), Sweihgt));
if sec_p:
row_buffer.append("%5s%2s HHZ IP %1d%4d%2d%2d%2d%2d%5.2f %5.2f 0\n"%(station,network,
Pweihgt,
int(yrp),int(mop),int(dyp),
int(hrp),int(mip),float(sec_p),
float(0.0)));
event.picks = picks
event.preferred_origin_id = event.origins[0].resource_id
cat.append(event)
evid += 1
Y2000_writer.write("{:<62}".format(' ')+"%10d"%(evid)+'\n');
traceNmae_dic[str(evid)] = tr_names
if len(row_buffer) >= 2*pair_n:
Y2000_writer.write("%4d%2d%2d%2d%2d%4.2f%2.0f%1s%4.2f%3.0f%1s%4.2f%5.2f%3.2f\n"%
(int(yr),int(mo),int(dy),int(hr),int(mi),float(sec),
float(st_lat_DMS[0]), str(st_lat_DMS[1]), float(st_lat_DMS[2]),
float(st_lon_DMS[0]), str(st_lon_DMS[1]), float(st_lon_DMS[2]),
float(depth), float(mag)));
for rr in row_buffer:
Y2000_writer.write(rr);
evid += 1
Y2000_writer.write("{:<62}".format(' ')+"%10d"%(evid)+'\n');
traceNmae_dic[str(evid)] = tr_names2
elif len(row_buffer) < pair_n and len(row_buffer) != 0:
evidd += 1
traceNmae_dic[str(evidd)] = tr_names2
elif len(detections) < pair_n and len(detections) != 0:
tr_names = []
for _, row in detections.iterrows():
trace_name = row['traceID']
tr_names.append(trace_name)
evidd += 1
traceNmae_dic[str(evidd)] = tr_names
tt += timedelta(seconds= moving_window)
print('The Number of Associated Events: '+str(evid-200000)+'\n', flush=True)
jj = json.dumps(traceNmae_dic)
if platform.system() == 'Windows':
f = open(save_dir+"\\"+"traceNmae_dic.json","w")
else:
f = open(save_dir+"/"+"traceNmae_dic.json","w")
f.write(jj)
f.close()
print(cat.__str__(print_all=True))
cat.write(save_dir+"/associations.xml", format="QUAKEML")
|
mit
|
zasdfgbnm/qutip
|
setup.py
|
1
|
4890
|
#!/usr/bin/env python
"""QuTiP: The Quantum Toolbox in Python
QuTiP is open-source software for simulating the dynamics of closed and open
quantum systems. The QuTiP library depends on the excellent Numpy, Scipy, and
Cython numerical packages. In addition, graphical output is provided by
Matplotlib. QuTiP aims to provide user-friendly and efficient numerical
simulations of a wide variety of quantum mechanical problems, including those
with Hamiltonians and/or collapse operators with arbitrary time-dependence,
commonly found in a wide range of physics applications. QuTiP is freely
available for use and/or modification on all common platforms. Being free of
any licensing fees, QuTiP is ideal for exploring quantum mechanics in research
as well as in the classroom.
"""
DOCLINES = __doc__.split('\n')
CLASSIFIERS = """\
Development Status :: 4 - Beta
Intended Audience :: Science/Research
License :: OSI Approved :: BSD License
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Scientific/Engineering
Operating System :: MacOS
Operating System :: POSIX
Operating System :: Unix
Operating System :: Microsoft :: Windows
"""
# import statements
import os
import sys
import numpy as np
import setuptools
from numpy.distutils.core import setup
# all information about QuTiP goes here
MAJOR = 3
MINOR = 2
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
REQUIRES = ['numpy (>=1.6)', 'scipy (>=0.11)', 'cython (>=0.15)',
'matplotlib (>=1.1)']
PACKAGES = ['qutip', 'qutip/ui', 'qutip/cy', 'qutip/qip', 'qutip/qip/models',
'qutip/qip/algorithms', 'qutip/control', 'qutip/tests']
PACKAGE_DATA = {
'qutip': ['configspec.ini'],
'qutip/tests': ['bucky.npy', 'bucky_perm.npy'],
'qutip/cy': ['*.pxi', '*.pxd', '*.pyx'],
'qutip/control': ['*.pyx']
}
INCLUDE_DIRS = [np.get_include()]
EXT_MODULES = []
NAME = "qutip"
AUTHOR = "Paul D. Nation, Robert J. Johansson"
AUTHOR_EMAIL = "[email protected], [email protected]"
LICENSE = "BSD"
DESCRIPTION = DOCLINES[0]
LONG_DESCRIPTION = "\n".join(DOCLINES[2:])
KEYWORDS = "quantum physics dynamics"
URL = "http://qutip.org"
CLASSIFIERS = [_f for _f in CLASSIFIERS.split('\n') if _f]
PLATFORMS = ["Linux", "Mac OSX", "Unix", "Windows"]
def write_f2py_f2cmap():
dirname = os.path.dirname(__file__)
with open(os.path.join(dirname, '.f2py_f2cmap'), 'w') as f:
f.write("dict(real=dict(sp='float', dp='double', wp='double'), " +
"complex=dict(sp='complex_float', dp='complex_double', " +
"wp='complex_double'))")
def git_short_hash():
try:
return "-" + os.popen('git log -1 --format="%h"').read().strip()
except:
return ""
FULLVERSION = VERSION
if not ISRELEASED:
FULLVERSION += '.dev' + git_short_hash()
def write_version_py(filename='qutip/version.py'):
cnt = """\
# THIS FILE IS GENERATED FROM QUTIP SETUP.PY
short_version = '%(version)s'
version = '%(fullversion)s'
release = %(isrelease)s
"""
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION, 'fullversion':
FULLVERSION, 'isrelease': str(ISRELEASED)})
finally:
a.close()
local_path = os.path.dirname(os.path.abspath(sys.argv[0]))
os.chdir(local_path)
sys.path.insert(0, local_path)
sys.path.insert(0, os.path.join(local_path, 'qutip')) # to retrive _version
# always rewrite _version
if os.path.exists('qutip/version.py'):
os.remove('qutip/version.py')
write_version_py()
# check for fortran option
if "--with-f90mc" in sys.argv:
with_f90mc = True
sys.argv.remove("--with-f90mc")
write_f2py_f2cmap()
else:
with_f90mc = False
if not with_f90mc:
os.environ['FORTRAN_LIBS'] = 'FALSE'
print("Installing without the fortran mcsolver.")
else:
os.environ['FORTRAN_LIBS'] = 'TRUE'
# using numpy distutils to simplify install of data directory for testing
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('qutip')
config.get_version('qutip/version.py') # sets config.version
config.add_data_dir('qutip/tests')
return config
# Setup commands go here
setup(
name=NAME,
packages=PACKAGES,
include_dirs=INCLUDE_DIRS,
ext_modules=EXT_MODULES,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=KEYWORDS,
url=URL,
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
requires=REQUIRES,
package_data=PACKAGE_DATA,
configuration=configuration
)
|
bsd-3-clause
|
madjelan/scikit-learn
|
sklearn/cluster/spectral.py
|
233
|
18153
|
# -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
|
bsd-3-clause
|
fsschneider/DeepOBS
|
setup.py
|
1
|
1219
|
# -*- coding: utf-8 -*-
"""Setup for the DeepOBS package"""
import setuptools
def readme():
with open("README.md") as f:
return f.read()
setuptools.setup(
name="deepobs",
version="1.1.2",
description="Deep Learning Optimizer Benchmark Suite",
long_description=readme(),
author="Frank Schneider, Lukas Balles and Philipp Hennig,",
author_email="[email protected]",
license="MIT",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.6",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
install_requires=[
"argparse",
"numpy",
"pandas",
"matplotlib",
"matplotlib2tikz==0.6.18",
"seaborn",
],
scripts=[
"deepobs/scripts/deepobs_prepare_data.sh",
"deepobs/scripts/deepobs_get_baselines.sh",
"deepobs/scripts/deepobs_plot_results.py",
"deepobs/scripts/deepobs_estimate_runtime.py",
],
zip_safe=False,
)
|
mit
|
samzhang111/scikit-learn
|
sklearn/decomposition/tests/test_sparse_pca.py
|
160
|
6028
|
# Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
bsd-3-clause
|
tmhm/scikit-learn
|
examples/svm/plot_custom_kernel.py
|
171
|
1546
|
"""
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
kklmn/xrt
|
examples/withRaycing/00_xRayCalculator/calc_bm.py
|
1
|
1837
|
# -*- coding: utf-8 -*-
__author__ = "Konstantin Klementiev, Roman Chernikov"
__date__ = "22 Jan 2016"
import numpy as np
import matplotlib.pyplot as plt
# path to xrt:
import os, sys; sys.path.append(os.path.join('..', '..', '..')) # analysis:ignore
import xrt.backends.raycing.sources as rs
xPrimeMax, zPrimeMax = 1., 0.3 # mrad
energy = np.linspace(1500, 37500, 601)
theta = np.linspace(-1., 1., 3) * xPrimeMax * 1e-3
psi = np.linspace(-1., 1., 51) * zPrimeMax * 1e-3
kwargs = dict(eE=3, eI=0.1, B0=1.7, distE='BW',
xPrimeMax=xPrimeMax, zPrimeMax=zPrimeMax)
compareWithLegacyCode = True
def main():
dtheta, dpsi = theta[1] - theta[0], psi[1] - psi[0]
source = rs.BendingMagnet(**kwargs)
I0xrt = source.intensities_on_mesh(energy, theta, psi)[0]
print(I0xrt.shape, I0xrt.max())
flux_xrt = I0xrt.sum(axis=(1, 2)) * dtheta * dpsi
plt.plot(energy/1e3, flux_xrt, 'r', label='xrt', lw=5)
if compareWithLegacyCode:
del(kwargs['distE'])
kwargs['eMin'] = energy[0]
kwargs['eMax'] = energy[-1]
kwargs['eN'] = len(energy)-1
kwargs['nz'] = len(psi)//2
source = rs.BendingMagnetWS(**kwargs)
I0ws = source.intensities_on_mesh()[0]
I0ws = np.concatenate((I0ws[:, :0:-1, :], I0ws), axis=1)
I0ws = np.concatenate((I0ws[:, :, :0:-1], I0ws), axis=2)
print(I0ws.shape, I0ws.max())
dtheta = (theta[-1] - theta[0]) / 2
flux_ws = I0ws.sum(axis=(1, 2)) * dtheta * dpsi * 1e6
plt.plot(energy/1e3, flux_ws, 'b', label='ws', lw=3)
ax = plt.gca()
ax.set_xlabel(u'energy (keV)')
ax.set_ylabel(u'total flux through {0}×{1} µrad² (ph/s/0.1%bw)'.format(
2*xPrimeMax, 2*zPrimeMax))
plt.legend()
plt.savefig('bm_flux.png')
plt.show()
if __name__ == '__main__':
main()
|
mit
|
srivathsmit/raop
|
bin/train_predict.py
|
1
|
2272
|
import os
import sys
import simplejson
import numpy as np
from sklearn.cross_validation import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
from sklearn.metrics import roc_auc_score
######################
# Usage:
# For submission:
# python bin/train_predict.py submission data/
# For test:
# python bin/train_predict.py test data/
cmd = sys.argv[1]
directory = sys.argv[2]
def load_file(filename):
return np.genfromtxt(os.path.join(directory, filename), delimiter=',')
if cmd != 'submission':
train = load_file('train_sample.csv')
y_train = load_file('y_train_sample.csv')
test = load_file('test_sample.csv')
y_test = load_file('y_test_sample.csv')
else:
train = load_file('train.csv')
y_train = load_file('y_train.csv')
test = load_file('test.csv')
print(set(y_train))
def cross_eval(e, X, y):
print(X.shape, y.shape)
return roc_auc_score(y, e.predict_proba(X)[:, 1])
def normalize_feature(train, test):
scaler = preprocessing.StandardScaler(with_std=False).fit(train)
return scaler.transform(train), scaler.transform(test)
def feature_select(train, test):
lr = LogisticRegression(penalty='l1')
model = lr.fit(train, y_train)
sel_cols, _ = np.where(model.coef_.T > 1e-8)
return train[:, sel_cols], test[:, sel_cols]
train, test = normalize_feature(train, test)
train, test = feature_select(train, test)
lr = LogisticRegression()
if cmd != 'submission':
scores = cross_val_score(lr, train, y_train, cv=5, scoring=cross_eval)
print(scores, scores.mean(), scores.std())
model = lr.fit(train, y_train)
test_pred_probs = model.predict_proba(test)[:, 1]
test_roc = roc_auc_score(y_test, test_pred_probs)
print(test_roc)
else:
f = open(os.path.join(directory, 'test.json'), 'r')
all_data = simplejson.loads(f.read())
request_ids = [x['request_id'] for x in all_data]
model = lr.fit(train, y_train)
test_pred_probs = model.predict_proba(test)[:, 0]
with open(os.path.join(directory, 'submission.csv'), 'w') as f:
f.write('request_id,requester_received_pizza\n')
for request_id, prob in zip(request_ids, test_pred_probs):
f.write("%s, %f\n" % (request_id, prob))
|
unlicense
|
plotly/plotly.py
|
packages/python/plotly/plotly/graph_objs/histogram/_error_y.py
|
1
|
18744
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ErrorY(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram"
_path_str = "histogram.error_y"
_valid_props = {
"array",
"arrayminus",
"arrayminussrc",
"arraysrc",
"color",
"symmetric",
"thickness",
"traceref",
"tracerefminus",
"type",
"value",
"valueminus",
"visible",
"width",
}
# array
# -----
@property
def array(self):
"""
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["array"]
@array.setter
def array(self, val):
self["array"] = val
# arrayminus
# ----------
@property
def arrayminus(self):
"""
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["arrayminus"]
@arrayminus.setter
def arrayminus(self, val):
self["arrayminus"] = val
# arrayminussrc
# -------------
@property
def arrayminussrc(self):
"""
Sets the source reference on Chart Studio Cloud for arrayminus
.
The 'arrayminussrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arrayminussrc"]
@arrayminussrc.setter
def arrayminussrc(self, val):
self["arrayminussrc"] = val
# arraysrc
# --------
@property
def arraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for array .
The 'arraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arraysrc"]
@arraysrc.setter
def arraysrc(self, val):
self["arraysrc"] = val
# color
# -----
@property
def color(self):
"""
Sets the stoke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# symmetric
# ---------
@property
def symmetric(self):
"""
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["symmetric"]
@symmetric.setter
def symmetric(self, val):
self["symmetric"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# traceref
# --------
@property
def traceref(self):
"""
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["traceref"]
@traceref.setter
def traceref(self, val):
self["traceref"] = val
# tracerefminus
# -------------
@property
def tracerefminus(self):
"""
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["tracerefminus"]
@tracerefminus.setter
def tracerefminus(self, val):
self["tracerefminus"] = val
# type
# ----
@property
def type(self):
"""
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the square of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# value
# -----
@property
def value(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# valueminus
# ----------
@property
def valueminus(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["valueminus"]
@valueminus.setter
def valueminus(self, val):
self["valueminus"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
arrayminus .
arraysrc
Sets the source reference on Chart Studio Cloud for
array .
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
"""
def __init__(
self,
arg=None,
array=None,
arrayminus=None,
arrayminussrc=None,
arraysrc=None,
color=None,
symmetric=None,
thickness=None,
traceref=None,
tracerefminus=None,
type=None,
value=None,
valueminus=None,
visible=None,
width=None,
**kwargs
):
"""
Construct a new ErrorY object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram.ErrorY`
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
arrayminus .
arraysrc
Sets the source reference on Chart Studio Cloud for
array .
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
Returns
-------
ErrorY
"""
super(ErrorY, self).__init__("error_y")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram.ErrorY
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.ErrorY`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("array", None)
_v = array if array is not None else _v
if _v is not None:
self["array"] = _v
_v = arg.pop("arrayminus", None)
_v = arrayminus if arrayminus is not None else _v
if _v is not None:
self["arrayminus"] = _v
_v = arg.pop("arrayminussrc", None)
_v = arrayminussrc if arrayminussrc is not None else _v
if _v is not None:
self["arrayminussrc"] = _v
_v = arg.pop("arraysrc", None)
_v = arraysrc if arraysrc is not None else _v
if _v is not None:
self["arraysrc"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("symmetric", None)
_v = symmetric if symmetric is not None else _v
if _v is not None:
self["symmetric"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("traceref", None)
_v = traceref if traceref is not None else _v
if _v is not None:
self["traceref"] = _v
_v = arg.pop("tracerefminus", None)
_v = tracerefminus if tracerefminus is not None else _v
if _v is not None:
self["tracerefminus"] = _v
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
_v = arg.pop("valueminus", None)
_v = valueminus if valueminus is not None else _v
if _v is not None:
self["valueminus"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
mit
|
russel1237/scikit-learn
|
examples/model_selection/plot_roc_crossval.py
|
247
|
3253
|
"""
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
ecrc/girih
|
scripts/sisc/paper_plot_thread_scaling_naive_out_cache.py
|
2
|
6303
|
#!/usr/bin/env python
def main():
import sys
raw_data = load_csv(sys.argv[1])
k_l = set()
for k in raw_data:
k_l.add(get_stencil_num(k))
k_l = list(k_l)
# for ts in ['Naive', 'Dynamic-Intra-Diamond']
for k in k_l:
for is_dp in [1]:
plot_lines(raw_data, k, is_dp)
def get_stencil_num(k):
# add the stencil operator
if k['Stencil Kernel coefficients'] in 'constant':
if int(k['Stencil Kernel semi-bandwidth'])==4:
stencil = 0
else:
stencil = 1
elif 'no-symmetry' in k['Stencil Kernel coefficients']:
stencil = 5
elif 'sym' in k['Stencil Kernel coefficients']:
if int(k['Stencil Kernel semi-bandwidth'])==1:
stencil = 3
else:
stencil = 4
else:
stencil = 2
return stencil
def plot_lines(raw_data, stencil_kernel, is_dp):
from operator import itemgetter
import matplotlib.pyplot as plt
import matplotlib
import pylab
from pylab import arange,pi,sin,cos,sqrt
if stencil_kernel == 1:
fig_width = 3.4*0.393701 # inches
else:
fig_width = 3.0*0.393701 # inches
fig_height = 1.8*fig_width #* 210.0/280.0#433.62/578.16
fig_size = [fig_width,fig_height]
params = {
'axes.labelsize': 7,
'axes.linewidth': 0.5,
'lines.linewidth': 0.75,
'text.fontsize': 7,
'legend.fontsize': 5,
'xtick.labelsize': 7,
'ytick.labelsize': 7,
'lines.markersize': 3,
'text.usetex': True,
'figure.figsize': fig_size}
pylab.rcParams.update(params)
ts_l = set()
for k in raw_data:
ts_l.add(k['Time stepper orig name'])
ts_l = list(ts_l)
th = set()
for k in raw_data:
th.add(int(k['OpenMP Threads']))
th = list(th)
#tb_l = [3, 7]
tb_l = set()
for k in raw_data:
tb_l.add(k['Time unroll'])
tb_l = list(tb_l)
tb_l = map(int,tb_l)
tb_l.sort()
#print tb_l
req_fields = [('Thread group size', int), ('WD main-loop RANK0 MStencil/s MAX', float), ('Time stepper orig name', str), ('OpenMP Threads', int), ('MStencil/s MAX', float), ('Time unroll',int), ('Sustained Memory BW', float)]
data = []
for k in raw_data:
tup = {}
# add the general fileds
for f in req_fields:
tup[f[0]] = map(f[1], [k[f[0]]] )[0]
# add the stencil operato
tup['stencil'] = get_stencil_num(k)
# add the precision information
if k['Precision'] in 'DP':
p = 1
else:
p = 0
tup['Precision'] = p
data.append(tup)
data = sorted(data, key=itemgetter('Time stepper orig name', 'Time unroll', 'Thread group size', 'OpenMP Threads'))
#for i in data: print i
max_single = 0
fig, ax1 = plt.subplots()
lns = []
col = 'g'
ts2 = 'Spatial blk.'
x = []
y = []
y_m = []
for k in data:
if ( ('Naive' in k['Time stepper orig name']) and (k['stencil']==stencil_kernel) and (k['Precision']==is_dp)):
if k['OpenMP Threads'] == 1 and max_single < k['MStencil/s MAX']/10**3: max_single = k['MStencil/s MAX']/10**3
y_m.append(k['Sustained Memory BW']/10**3)
x.append(k['OpenMP Threads'])
y.append(k['MStencil/s MAX']/10**3)
if(x):
lns = lns + ax1.plot(x, y, color=col, marker='o', linestyle='-', label='Perf. (GLUP/s)')
if(y_m):
ax2 = ax1.twinx()
lns = lns + ax2.plot(x, y_m, color='r', marker='^', linestyle='-', label='BW (GBytes/s)')
# add ideal scaling
ideal = [i*max_single for i in th]
lns2 = ax1.plot(th, ideal, color='g', linestyle='--', label='Ideal scaling')
# add limits
mem_limit=0
# sus_mem_bw = 36500 #SB
sus_mem_bw = 40.0 #IB
if stencil_kernel == 0:
mem_limit = sus_mem_bw/(4*4)
elif stencil_kernel == 1:
mem_limit = sus_mem_bw/(4*3)
elif stencil_kernel == 4:
mem_limit = sus_mem_bw/(4*(3+13))
elif stencil_kernel == 5:
mem_limit = sus_mem_bw/(4*(3+7))
if is_dp == 1: mem_limit = mem_limit / 2
#if stencil_kernel ==1:
lns3 = ax1.plot([5, len(th)], [mem_limit, mem_limit], color='k', linestyle='-', label='Perf. limit')
# lns = lns + ax2.plot([6, len(th)], [sus_mem_bw, sus_mem_bw], color='m', linestyle='-', label='STREAM BW')
#title = 'Thread scaling of'
title = ''
if stencil_kernel == 0:
title = '25_pt_const_naive_large'
# title = title + ' 25_pt constant coeff. star stencil'
elif stencil_kernel == 1:
title = '7_pt_const_naive_large'
# title = title + ' 7_pt constant coeff. star stencil'
elif stencil_kernel == 2:
title = title + ' 7_pt varialbe. coeff. star stencil'
elif stencil_kernel == 3:
title = title + ' 7_pt varialbe. coeff. axis symm star stencil'
elif stencil_kernel == 4:
title = '25_pt_var_naive_large'
# title = title + ' 25_pt varialbe. coeff. axis symm star stencil'
elif stencil_kernel == 5:
title = '7_pt_var_naive_large'
# title = title + ' 7_pt varialbe. coeff. no symm star stencil'
#if is_dp == 1:
# title = title + ' in double precision'
#else:
# title = title + ' in single precision'
f_name = title.replace(' ', '_')
ax1.set_xlabel('Threads')
plt.xticks(range(2,11,2))
if stencil_kernel == 1:
ax1.set_ylabel('GLUP/s', color='g', labelpad=0)
ax2.set_ylabel('GBytes/s', color='r', labelpad=0)
# plt.title(title)
lns = lns2 + lns3 + lns
labs = [l.get_label() for l in lns]
if stencil_kernel == 1:
ax1.legend(lns, labs, loc='lower right')
ax2.set_ylim(ymin=0, ymax=42)
ax1.grid()
pylab.savefig(f_name+'.png', bbox_inches="tight", pad_inches=0.04)
pylab.savefig(f_name+'.pdf', format='pdf', bbox_inches="tight", pad_inches=0)
#plt.show()
plt.clf()
def load_csv(data_file):
from csv import DictReader
with open(data_file, 'rb') as output_file:
data = DictReader(output_file)
data = [k for k in data]
return data
if __name__ == "__main__":
main()
|
bsd-3-clause
|
tknapen/AIN_PC_model
|
versions/first_ode_dict_pp.py
|
1
|
6168
|
#!/usr/bin/env python
# encoding: utf-8
"""
EyeLinkSession.py
Created by Tomas Knapen on 2011-04-27.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import os, sys, pickle, math, thread, time
from subprocess import *
import scipy as sp
import scipy.stats as stats
import numpy as np
import matplotlib.pylab as pl
from matplotlib.backends.backend_pdf import PdfPages
from tables import *
import pp
#class for callbacks
class Saver:
def __init__(self, nr_timepoints, nr_simulations):
self.result_array = np.zeros((nr_simulations, nr_timepoints, 4))
self.parameter_array = []# [np.zeros((nr_simulations, nr_parameters))]
self.lock = thread.allocate_lock()
self.count = 0
#the callback function
def save_to_array(self, value):
# we must use lock here because array stuff is not atomic (?)
self.lock.acquire()
self.parameter_array.append( value[0] )
self.result_array[self.count] = value[1]
self.lock.release()
self.count += 1
def save_to_hdf_file(self, hdf5_filename):
# saving the data
if os.path.isfile(hdf5_filename):
os.system('rm ' + hdf5_filename)
h5file = openFile(hdf5_filename, mode = "w", title = " file")
try:
thisRunGroup = h5file.getNode(where = "/", name = '', classname='Group')
except NoSuchNodeError:
# import actual data
thisRunGroup = h5file.createGroup(where = "/", name = str(sim))
h5file.createArray(thisRunGroup, 'simulation_data', self.result_array, '')
ptd = [(k, np.float64) for k in np.unique(np.concatenate([k.keys() for k in self.parameter_array]))]
self.parameterTypeDictionary = np.dtype(ptd)
# create a table for the parameters of these runs
parameterTable = h5file.createTable(thisRunGroup, 'sim_parameters', self.parameterTypeDictionary)
# fill up the table
trial = parameterTable.row
for r in self.parameter_array:
for par in r.keys():
trial[par] = r[par]
trial.append()
parameterTable.flush()
h5file.close()
def plot_activities(self, plot_file_name, sort_variable = None):
plot_file = PdfPages(plot_file_name)
if sort_variable == None:
order = range(len(self.parameter_array))
else:
order = np.argsort(np.array([p[sort_variable] for p in self.parameter_array]))
for i in order:
fig = pl.figure(figsize = (15, 6))
s = fig.add_subplot(211)
s.set_title('simulation results')
s.set_xlabel('time [steps]')
s.set_ylabel('activity strength')
s.set_ylim([0,1.3])
pl.plot(self.result_array[i,::10,0], 'r', alpha = 0.75, label = 'H1')
pl.plot(self.result_array[i,::10,1], 'g', alpha = 0.75, label = 'H2')
leg = s.legend(fancybox = True)
leg.get_frame().set_alpha(0.5)
if leg:
for t in leg.get_texts():
t.set_fontsize('small') # the legend text fontsize
for l in leg.get_lines():
l.set_linewidth(3.5) # the legend line width
s = fig.add_subplot(212)
s.set_title('simulation results')
s.set_xlabel('time [steps]')
s.set_ylabel('adaptation strength')
s.set_ylim([0,1.3])
pl.plot(self.result_array[i,::10,2], 'r--', alpha = 0.25, label = 'A1')
pl.plot(self.result_array[i,::10,3], 'g--', alpha = 0.25, label = 'A2')
pl.text(10, 1.0, str(self.parameter_array[i]), fontsize = 8)
leg = s.legend(fancybox = True)
leg.get_frame().set_alpha(0.5)
if leg:
for t in leg.get_texts():
t.set_fontsize('small') # the legend text fontsize
for l in leg.get_lines():
l.set_linewidth(3.5) # the legend line width
plot_file.savefig()
pl.close()
plot_file.close()
# pl.show()
# mu parameters based on dictionary
mu = {'XL': 0.9, 'XR': 1.0, 'beta': 0.24, 'gamma': 3.3, 'exponent': 1.0, 'alpha': 4.0, 'tau': 100.0, 'NRa': 2.0, 'NRs': 1.0, 'noise_level': 0.05}
#defining variables based on indices on y
H1, H2 = 0,1
A1, A2 = 2,3
def npS( input, mu ):
input[input < 0] = 0.0
input = pow(input,mu['NRa'])/(pow(input,mu['NRa']) + pow(mu['NRs'],mu['NRa']))
def func(t, y, mu):
import pygsl._numobj
from pygsl import odeiv, Float
def S( input, mu ):
if input >= 0. :
return pow(input,mu['NRa'])/(pow(input,mu['NRa']) + pow(mu['NRs'],mu['NRa']))
else:
return 0.
dydt = pygsl._numobj.zeros((4,), Float) * 1.0
#defining variables based on indices on y
H1, H2 = 0,1
A1, A2 = 2,3
dydt[H1] = mu['XL'] - (1. + y[A1]) * y[H1] + mu['beta'] * y[A1] - mu['gamma'] * S(y[H2], mu);
dydt[H2] = mu['XR'] - (1. + y[A2]) * y[H2] + mu['beta'] * y[A2] - mu['gamma'] * S(y[H1], mu);
dydt[A1] = ( -pow(y[A1],mu['exponent']) + ( mu['alpha'] * S(y[H1], mu) ) ) / mu['tau'];
dydt[A2] = ( -pow(y[A2],mu['exponent']) + ( mu['alpha'] * S(y[H2], mu) ) ) / mu['tau'];
return dydt
def run_sim(mu, nr_timepoints, func, npS):
import pygsl._numobj
import pygsl
from pygsl import odeiv, Float
import numpy
dimension = 4
step = odeiv.step_rkf45(dimension, func, None, mu)
control = odeiv.control_y_new(step, 1e-6, 1e-6)
evolve = odeiv.evolve(step, control, dimension)
h = 1
t1 = 10000.0
y = pygsl._numobj.array((0.0, 0.0, 0.0, 0.01))
op = numpy.zeros((nr_timepoints, dimension))
iters = 0
for t in numpy.linspace(0, t1, nr_timepoints):
t, h, y = evolve.apply(t, t1, h, y)
op[iters] = y
y += numpy.concatenate((numpy.random.randn(2) * mu['noise_level'], [0.0, 0.0]))
iters += 1
op = numpy.array(op)
# naka rushton on activities:
npS(op[:,0], mu)
npS(op[:,1], mu)
# return both output and parameter dictionary
return [mu, op]
# Create an instance of callback class
nr_simulations = 40
nr_timepoints = 20000
saver = Saver(nr_timepoints = nr_timepoints, nr_simulations = nr_simulations)
# running these in parallel
# Creates jobserver with automatically detected number of workers
job_server = pp.Server(ppservers=())
# Execute the same task with different amount of active workers and measure the time
for index in xrange(nr_simulations):
mu['XL'] += 0.2/nr_simulations
job_server.submit(run_sim, (mu, nr_timepoints, func, npS), callback=saver.save_to_array)
#wait for jobs in all groups to finish
job_server.wait()
saver.save_to_hdf_file('data/test2.hdf')
saver.plot_activities('data/test2.pdf', sort_variable = 'XL')
|
mit
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/sklearn/decomposition/tests/test_dict_learning.py
|
3
|
5110
|
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal, \
assert_equal
from nose import SkipTest
from nose.tools import assert_true
from sklearn.utils.testing import assert_less
from .. import DictionaryLearning, MiniBatchDictionaryLearning, SparseCoder, \
dict_learning_online, sparse_encode
rng = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_atoms = 5
dico = DictionaryLearning(n_atoms).fit(X)
assert_true(dico.components_.shape == (n_atoms, n_features))
def test_dict_learning_overcomplete():
n_atoms = 12
X = rng.randn(n_samples, n_features)
dico = DictionaryLearning(n_atoms).fit(X)
assert_true(dico.components_.shape == (n_atoms, n_features))
def test_dict_learning_reconstruction():
n_atoms = 12
dico = DictionaryLearning(n_atoms, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_nonzero_coefs():
n_atoms = 4
dico = DictionaryLearning(n_atoms, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_split():
n_atoms = 5
dico = DictionaryLearning(n_atoms, transform_algorithm='threshold')
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_atoms] - split_code[:, n_atoms:], code)
def test_dict_learning_online_shapes():
# rng = np.random.RandomState(0)
# X = rng.randn(12, 10)
n_atoms = 8
code, dictionary = dict_learning_online(X, n_atoms=n_atoms, alpha=1,
random_state=rng)
assert_equal(code.shape, (n_samples, n_atoms))
assert_equal(dictionary.shape, (n_atoms, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_estimator_shapes():
n_atoms = 5
dico = MiniBatchDictionaryLearning(n_atoms, n_iter=20).fit(X)
assert_true(dico.components_.shape == (n_atoms, n_features))
def test_dict_learning_online_overcomplete():
n_atoms = 12
dico = MiniBatchDictionaryLearning(n_atoms, n_iter=20).fit(X)
assert_true(dico.components_.shape == (n_atoms, n_features))
def test_dict_learning_online_initialization():
n_atoms = 12
V = rng.randn(n_atoms, n_features)
dico = MiniBatchDictionaryLearning(n_atoms, n_iter=0, dict_init=V).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
# this test was not actually passing before!
raise SkipTest
n_atoms = 12
V = rng.randn(n_atoms, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
rng1 = np.random.RandomState(0)
rng2 = np.random.RandomState(0)
dico1 = MiniBatchDictionaryLearning(n_atoms, n_iter=10, chunk_size=1,
shuffle=False, dict_init=V,
random_state=rng1).fit(X)
dico2 = MiniBatchDictionaryLearning(n_atoms, n_iter=1, dict_init=V,
random_state=rng2)
for ii, sample in enumerate(X):
dico2.partial_fit(sample, iter_offset=ii * dico2.n_iter)
# if ii == 1: break
assert_true(not np.all(sparse_encode(X, dico1.components_, alpha=100) ==
0))
assert_array_equal(dico1.components_, dico2.components_)
def test_sparse_encode_shapes():
n_atoms = 12
V = rng.randn(n_atoms, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_atoms))
def test_sparse_encode_error():
n_atoms = 12
V = rng.randn(n_atoms, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_coder_estimator():
n_atoms = 12
V = rng.randn(n_atoms, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
|
agpl-3.0
|
wanderknight/trading-with-python
|
lib/functions.py
|
76
|
11627
|
# -*- coding: utf-8 -*-
"""
twp support functions
@author: Jev Kuznetsov
Licence: GPL v2
"""
from scipy import polyfit, polyval
import datetime as dt
#from datetime import datetime, date
from pandas import DataFrame, Index, Series
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def nans(shape, dtype=float):
''' create a nan numpy array '''
a = np.empty(shape, dtype)
a.fill(np.nan)
return a
def plotCorrelationMatrix(price, thresh = None):
''' plot a correlation matrix as a heatmap image
inputs:
price: prices DataFrame
thresh: correlation threshold to use for checking, default None
'''
symbols = price.columns.tolist()
R = price.pct_change()
correlationMatrix = R.corr()
if thresh is not None:
correlationMatrix = correlationMatrix > thresh
plt.imshow(abs(correlationMatrix.values),interpolation='none')
plt.xticks(range(len(symbols)),symbols)
plt.yticks(range(len(symbols)),symbols)
plt.colorbar()
plt.title('Correlation matrix')
return correlationMatrix
def pca(A):
""" performs principal components analysis
(PCA) on the n-by-p DataFrame A
Rows of A correspond to observations, columns to variables.
Returns :
coeff : principal components, column-wise
transform: A in principal component space
latent : eigenvalues
"""
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - A.mean()).T # subtract the mean (along columns)
[latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted
idx = np.argsort(latent) # sort eigenvalues
idx = idx[::-1] # in ascending order
coeff = coeff[:,idx]
latent = latent[idx]
score = np.dot(coeff.T,A.T) # projection of the data in the new space
transform = DataFrame(index = A.index, data = score.T)
return coeff,transform,latent
def pos2pnl(price,position , ibTransactionCost=False ):
"""
calculate pnl based on price and position
Inputs:
---------
price: series or dataframe of price
position: number of shares at each time. Column names must be same as in price
ibTransactionCost: use bundled Interactive Brokers transaction cost of 0.005$/share
Returns a portfolio DataFrame
"""
delta=position.diff()
port = DataFrame(index=price.index)
if isinstance(price,Series): # no need to sum along 1 for series
port['cash'] = (-delta*price).cumsum()
port['stock'] = (position*price)
else: # dealing with DataFrame here
port['cash'] = (-delta*price).sum(axis=1).cumsum()
port['stock'] = (position*price).sum(axis=1)
if ibTransactionCost:
tc = -0.005*position.diff().abs() # basic transaction cost
tc[(tc>-1) & (tc<0)] = -1 # everything under 1$ will be ceil'd to 1$
if isinstance(price,DataFrame):
tc = tc.sum(axis=1)
port['tc'] = tc.cumsum()
else:
port['tc'] = 0.
port['total'] = port['stock']+port['cash']+port['tc']
return port
def tradeBracket(price,entryBar,maxTradeLength,bracket):
'''
trade a symmetrical bracket on price series, return price delta and exit bar #
Input
------
price : series of price values
entryBar: entry bar number
maxTradeLength : max trade duration in bars
bracket : allowed price deviation
'''
lastBar = min(entryBar+maxTradeLength,len(price)-1)
p = price[entryBar:lastBar]-price[entryBar]
idxOutOfBound = np.nonzero(abs(p)>bracket) # find indices where price comes out of bracket
if idxOutOfBound[0].any(): # found match
priceDelta = p[idxOutOfBound[0][0]]
exitBar = idxOutOfBound[0][0]+entryBar
else: # all in bracket, exiting based on time
priceDelta = p[-1]
exitBar = lastBar
return priceDelta, exitBar
def estimateBeta(priceY,priceX,algo = 'standard'):
'''
estimate stock Y vs stock X beta using iterative linear
regression. Outliers outside 3 sigma boundary are filtered out
Parameters
--------
priceX : price series of x (usually market)
priceY : price series of y (estimate beta of this price)
Returns
--------
beta : stockY beta relative to stock X
'''
X = DataFrame({'x':priceX,'y':priceY})
if algo=='returns':
ret = (X/X.shift(1)-1).dropna().values
#print len(ret)
x = ret[:,0]
y = ret[:,1]
# filter high values
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
iteration = 1
nrOutliers = 1
while iteration < 10 and nrOutliers > 0 :
(a,b) = polyfit(x,y,1)
yf = polyval([a,b],x)
#plot(x,y,'x',x,yf,'r-')
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
elif algo=='log':
x = np.log(X['x'])
y = np.log(X['y'])
(a,b) = polyfit(x,y,1)
beta = a
elif algo=='standard':
ret =np.log(X).diff().dropna()
beta = ret['x'].cov(ret['y'])/ret['x'].var()
else:
raise TypeError("unknown algorithm type, use 'standard', 'log' or 'returns'")
return beta
def estimateVolatility(ohlc, N=10, algo='YangZhang'):
"""
Volatility estimation
Possible algorithms: ['YangZhang', 'CC']
"""
cc = np.log(ohlc.close/ohlc.close.shift(1))
if algo == 'YangZhang': # Yang-zhang volatility
ho = np.log(ohlc.high/ohlc.open)
lo = np.log(ohlc.low/ohlc.open)
co = np.log(ohlc.close/ohlc.open)
oc = np.log(ohlc.open/ohlc.close.shift(1))
oc_sq = oc**2
cc_sq = cc**2
rs = ho*(ho-co)+lo*(lo-co)
close_vol = pd.rolling_sum(cc_sq, window=N) * (1.0 / (N - 1.0))
open_vol = pd.rolling_sum(oc_sq, window=N) * (1.0 / (N - 1.0))
window_rs = pd.rolling_sum(rs, window=N) * (1.0 / (N - 1.0))
result = (open_vol + 0.164333 * close_vol + 0.835667 * window_rs).apply(np.sqrt) * np.sqrt(252)
result[:N-1] = np.nan
elif algo == 'CC': # standard close-close estimator
result = np.sqrt(252)*np.sqrt(((pd.rolling_sum(cc**2,N))/N))
else:
raise ValueError('Unknown algo type.')
return result*100
def rank(current,past):
''' calculate a relative rank 0..1 for a value against series '''
return (current>past).sum()/float(past.count())
def returns(df):
return (df/df.shift(1)-1)
def logReturns(df):
t = np.log(df)
return t-t.shift(1)
def dateTimeToDate(idx):
''' convert datetime index to date '''
dates = []
for dtm in idx:
dates.append(dtm.date())
return dates
def readBiggerScreener(fName):
''' import data from Bigger Capital screener '''
with open(fName,'rb') as f:
reader = csv.reader(f)
rows = [row for row in reader]
header = rows[0]
data = [[] for i in range(len(header))]
for row in rows[1:]:
for i,elm in enumerate(row):
try:
data[i].append(float(elm))
except Exception:
data[i].append(str(elm))
return DataFrame(dict(zip(header,data)),index=Index(range(len(data[0]))))[header]
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
def drawdown(s):
"""
calculate max drawdown and duration
Input:
s, price or cumulative pnl curve $
Returns:
drawdown : vector of drawdwon values
duration : vector of drawdown duration
"""
# convert to array if got pandas series, 10x speedup
if isinstance(s,pd.Series):
idx = s.index
s = s.values
returnSeries = True
else:
returnSeries = False
if s.min() < 0: # offset if signal minimum is less than zero
s = s-s.min()
highwatermark = np.zeros(len(s))
drawdown = np.zeros(len(s))
drawdowndur = np.zeros(len(s))
for t in range(1,len(s)):
highwatermark[t] = max(highwatermark[t-1], s[t])
drawdown[t] = (highwatermark[t]-s[t])
drawdowndur[t]= (0 if drawdown[t] == 0 else drawdowndur[t-1]+1)
if returnSeries:
return pd.Series(index=idx,data=drawdown), pd.Series(index=idx,data=drawdowndur)
else:
return drawdown , drawdowndur
def profitRatio(pnl):
'''
calculate profit ratio as sum(pnl)/drawdown
Input: pnl - daily pnl, Series or DataFrame
'''
def processVector(pnl): # process a single column
s = pnl.fillna(0)
dd = drawdown(s)[0]
p = s.sum()/dd.max()
return p
if isinstance(pnl,Series):
return processVector(pnl)
elif isinstance(pnl,DataFrame):
p = Series(index = pnl.columns)
for col in pnl.columns:
p[col] = processVector(pnl[col])
return p
else:
raise TypeError("Input must be DataFrame or Series, not "+str(type(pnl)))
def candlestick(df,width=0.5, colorup='b', colordown='r'):
''' plot a candlestick chart of a dataframe '''
O = df['open'].values
H = df['high'].values
L = df['low'].values
C = df['close'].values
fig = plt.gcf()
ax = plt.axes()
#ax.hold(True)
X = df.index
#plot high and low
ax.bar(X,height=H-L,bottom=L,width=0.1,color='k')
idxUp = C>O
ax.bar(X[idxUp],height=(C-O)[idxUp],bottom=O[idxUp],width=width,color=colorup)
idxDown = C<=O
ax.bar(X[idxDown],height=(O-C)[idxDown],bottom=C[idxDown],width=width,color=colordown)
try:
fig.autofmt_xdate()
except Exception: # pragma: no cover
pass
ax.grid(True)
#ax.bar(x,height=H-L,bottom=L,width=0.01,color='k')
def datetime2matlab(t):
''' convert datetime timestamp to matlab numeric timestamp '''
mdn = t + dt.timedelta(days = 366)
frac = (t-dt.datetime(t.year,t.month,t.day,0,0,0)).seconds / (24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def getDataSources(fName = None):
''' return data sources directories for this machine.
directories are defined in datasources.ini or provided filepath'''
import socket
from ConfigParser import ConfigParser
pcName = socket.gethostname()
p = ConfigParser()
p.optionxform = str
if fName is None:
fName = 'datasources.ini'
p.read(fName)
if pcName not in p.sections():
raise NameError('Host name section %s not found in file %s' %(pcName,fName))
dataSources = {}
for option in p.options(pcName):
dataSources[option] = p.get(pcName,option)
return dataSources
if __name__ == '__main__':
df = DataFrame({'open':[1,2,3],'high':[5,6,7],'low':[-2,-1,0],'close':[2,1,4]})
plt.clf()
candlestick(df)
|
bsd-3-clause
|
theoryno3/scikit-learn
|
sklearn/utils/tests/test_fixes.py
|
281
|
1829
|
# Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
|
bsd-3-clause
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/examples/pylab_examples/line_collection.py
|
12
|
1511
|
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import colorConverter
import numpy as np
# In order to efficiently plot many lines in a single set of axes,
# Matplotlib has the ability to add the lines all at once. Here is a
# simple example showing how it is done.
x = np.arange(100)
# Here are many sets of y to plot vs x
ys = x[:50, np.newaxis] + x[np.newaxis, :]
segs = np.zeros((50, 100, 2), float)
segs[:,:,1] = ys
segs[:,:,0] = x
# Mask some values to test masked array support:
segs = np.ma.masked_where((segs > 50) & (segs < 60), segs)
# We need to set the plot limits.
ax = plt.axes()
ax.set_xlim(x.min(), x.max())
ax.set_ylim(ys.min(), ys.max())
# colors is sequence of rgba tuples
# linestyle is a string or dash tuple. Legal string values are
# solid|dashed|dashdot|dotted. The dash tuple is (offset, onoffseq)
# where onoffseq is an even length tuple of on and off ink in points.
# If linestyle is omitted, 'solid' is used
# See matplotlib.collections.LineCollection for more information
line_segments = LineCollection(segs,
linewidths = (0.5,1,1.5,2),
colors = [colorConverter.to_rgba(i) \
for i in ('b','g','r','c','m','y','k')],
linestyle = 'solid')
ax.add_collection(line_segments)
ax.set_title('Line collection with masked arrays')
plt.show()
|
mit
|
edlectrico/twitter_nltk_volkswagen
|
sentiment_train.py
|
2
|
4995
|
import nltk
import random
#from nltk.corpus import movie_reviews
from nltk.classify.scikitlearn import SklearnClassifier
import pickle
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from nltk.classify import ClassifierI
from statistics import mode
from nltk.tokenize import word_tokenize
class VoteClassifier(ClassifierI):
def __init__(self, *classifiers):
self._classifiers = classifiers
def classify(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
return mode(votes)
def confidence(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
choice_votes = votes.count(mode(votes))
conf = choice_votes / len(votes)
return conf
short_pos = open("data/positive.txt", encoding = "ISO-8859-1").read()
short_neg = open("data/negative.txt", encoding = "ISO-8859-1").read()
# move this up here
all_words = []
documents = []
# j is adject, r is adverb, and v is verb
#allowed_word_types = ["J","R","V"]
allowed_word_types = ["J"]
for p in short_pos.split('\n'):
documents.append( (p, "pos") )
words = word_tokenize(p)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allowed_word_types:
all_words.append(w[0].lower())
for p in short_neg.split('\n'):
documents.append( (p, "neg") )
words = word_tokenize(p)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allowed_word_types:
all_words.append(w[0].lower())
save_documents = open("pickled_algos/documents.pickle","wb")
pickle.dump(documents, save_documents)
save_documents.close()
all_words = nltk.FreqDist(all_words)
word_features = list(all_words.keys())[:5000]
save_word_features = open("pickled_algos/word_features5k.pickle","wb")
pickle.dump(word_features, save_word_features)
save_word_features.close()
def find_features(document):
words = word_tokenize(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
featuresets = [(find_features(rev), category) for (rev, category) in documents]
save_featuresets = open("pickled_algos/featuresets.pickle","wb")
pickle.dump(featuresets, save_featuresets)
save_featuresets.close()
random.shuffle(featuresets)
print(len(featuresets))
testing_set = featuresets[10000:]
training_set = featuresets[:10000]
classifier = nltk.NaiveBayesClassifier.train(training_set)
print("Original Naive Bayes Algo accuracy percent:", (nltk.classify.accuracy(classifier, testing_set))*100)
classifier.show_most_informative_features(15)
###############
save_classifier = open("pickled_algos/originalnaivebayes5k.pickle","wb")
pickle.dump(classifier, save_classifier)
save_classifier.close()
MNB_classifier = SklearnClassifier(MultinomialNB())
MNB_classifier.train(training_set)
print("MNB_classifier accuracy percent:", (nltk.classify.accuracy(MNB_classifier, testing_set))*100)
save_classifier = open("pickled_algos/MNB_classifier5k.pickle","wb")
pickle.dump(MNB_classifier, save_classifier)
save_classifier.close()
BernoulliNB_classifier = SklearnClassifier(BernoulliNB())
BernoulliNB_classifier.train(training_set)
print("BernoulliNB_classifier accuracy percent:", (nltk.classify.accuracy(BernoulliNB_classifier, testing_set))*100)
save_classifier = open("pickled_algos/BernoulliNB_classifier5k.pickle","wb")
pickle.dump(BernoulliNB_classifier, save_classifier)
save_classifier.close()
LogisticRegression_classifier = SklearnClassifier(LogisticRegression())
LogisticRegression_classifier.train(training_set)
print("LogisticRegression_classifier accuracy percent:", (nltk.classify.accuracy(LogisticRegression_classifier, testing_set))*100)
save_classifier = open("pickled_algos/LogisticRegression_classifier5k.pickle","wb")
pickle.dump(LogisticRegression_classifier, save_classifier)
save_classifier.close()
LinearSVC_classifier = SklearnClassifier(LinearSVC())
LinearSVC_classifier.train(training_set)
print("LinearSVC_classifier accuracy percent:", (nltk.classify.accuracy(LinearSVC_classifier, testing_set))*100)
save_classifier = open("pickled_algos/LinearSVC_classifier5k.pickle","wb")
pickle.dump(LinearSVC_classifier, save_classifier)
save_classifier.close()
##NuSVC_classifier = SklearnClassifier(NuSVC())
##NuSVC_classifier.train(training_set)
##print("NuSVC_classifier accuracy percent:", (nltk.classify.accuracy(NuSVC_classifier, testing_set))*100)
SGDC_classifier = SklearnClassifier(SGDClassifier())
SGDC_classifier.train(training_set)
print("SGDClassifier accuracy percent:",nltk.classify.accuracy(SGDC_classifier, testing_set)*100)
save_classifier = open("pickled_algos/SGDC_classifier5k.pickle","wb")
pickle.dump(SGDC_classifier, save_classifier)
save_classifier.close()
|
apache-2.0
|
compmem/ptsa
|
ptsa/plotting/topo.py
|
1
|
11554
|
#emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
#ex: set sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See the COPYING file distributed along with the PTSA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import numpy as np
import matplotlib.pyplot as plt
from ptsa.helper import pol2cart, cart2pol, deg2rad
from scipy.interpolate import griddata
default_head_props = {'head_linewidth': 3,
'head_linecolor': 'black',
'nose_linewidth': 2,
'ear_linewidth': 2,
}
default_label_props = {'ha': 'center',
'va': 'center'}
default_sensor_props = {'marker': 'o',
'c': 'k',
's': 8}
default_contour_props = {'linewidths': 0,
'linestyle': '-',
'colors': 'black',}
def topoplot(values=None, labels=None, sensors=None, axes=None,
center=(0,0), nose_dir=0., radius=0.5,
head_props=None, sensor_props=None,
label_props=None,
contours=15, contour_props=None,
resolution=400, axis_props='off',
plot_mask='circular', plot_radius_buffer=.2,
**kwargs):
"""
Plot a topographic map of the scalp in a 2-D circular view
(looking down at the top of the head).
Parameters
----------
values : {None, array-like}, optional
Values to plot. There must be one value for each electrode.
labels : {None, array-like}, optional
Electrode labels/names to plot. There must be one for each electrode.
sensors : {None, tuple of floats}, optional
Polar coordinates of the sensor locations. If not None,
sensors[0] specifies the angle (in degrees) and sensors[1]
specifies the radius.
axes : {matplotlib.axes}, optional
Axes to which the topoplot should be added.
center : {tuple of floats}, optional
x and y coordinates of the center of the head.
nose_dir : {float}, optional
Angle (in degrees) where the nose is pointing. 0 is
up, 90 is left, 180 is down, 270 is right, etc.
radius : {float}, optional
Radius of the head.
head_props : dict
Dictionary of head properties. See default_head_props for choices.
sensor_props : dict
Dictionary of sensor properties. See options for scatter in mpl and
default_sensor_props.
label_props : dict
Dictionary of sensor label properties. See options for text in mpl
and default_label_props.
contours : {int}, optional
Number of contours.
contour_props : dict
Dictionary of contour properties. See options for contour in mpl and
default_contour_props.
resolution : {int}, optional
Resolution of the interpolated grid. Higher numbers give
smoother edges of the plot, but increase memory and
computational demands.
axis_props : {str}, optional
Axis properties.
plot_mask : {str}, optional
The mask around the plotted values. 'linear' conects the outer
electrodes with straight lines, 'circular' draws a circle
around the outer electrodes (see plot_radius_buffer).
plot_radius_buffer : float, optional
Buffer outside the electrode circumference for generating
interpolated values with a circular mask.
This should be greater than zero to aviod interpolation errors.
**kwargs : optional
Optional keyword arguments to be passed on to contourf.
"""
if axes is not None: # axes are given
a=axes
else: # a new subplot is created
a=plt.subplot(1, 1, 1, aspect='equal')
a.axis(axis_props)
if True: # head should be plotted
# deal with the head props
hprops = default_head_props.copy()
if not head_props is None:
hprops.update(head_props)
# Set up head
head = plt.Circle(center, radius, fill=False,
linewidth=hprops['head_linewidth'],
edgecolor=hprops['head_linecolor'],
axes=a)
# Nose:
nose_width = 0.18*radius
# Distance from the center of the head to the point where the
# nose touches the outline of the head:
nose_dist = np.cos(np.arcsin((nose_width/2.)/radius))*radius
# Distance from the center of the head to the tip of the nose:
nose_tip_dist = 1.15*radius
# Convert to polar coordinates for rotating:
nose_polar_angle, nose_polar_radius = cart2pol(
np.array([-nose_width/2, 0, nose_width/2]),
np.array([nose_dist, nose_tip_dist, nose_dist]))
nose_polar_angle = nose_polar_angle + deg2rad(nose_dir)
# And back to cartesian coordinates for plotting:
nose_x, nose_y = pol2cart(nose_polar_angle, nose_polar_radius)
# Move nose with head:
nose_x = nose_x + center[0]
nose_y = nose_y + center[1]
nose = plt.Line2D(nose_x, nose_y,
solid_joinstyle='round', solid_capstyle='round',
color=hprops['head_linecolor'],
linewidth=hprops['nose_linewidth'],
axes=a)
# Ears:
q = .04 # ear lengthening
ear_x = np.array(
[.497-.005, .510,.518, .5299, .5419, .54, .547,
.532, .510, .489-.005])*(radius/0.5)
ear_y = np.array(
[q+.0555, q+.0775, q+.0783, q+.0746, q+.0555,
-.0055, -.0932, -.1313, -.1384, -.1199])*(radius/0.5)
# Convert to polar coordinates for rotating:
rightear_polar_angle, rightear_polar_radius = cart2pol(ear_x, ear_y)
leftear_polar_angle, leftear_polar_radius = cart2pol(-ear_x, ear_y)
rightear_polar_angle = rightear_polar_angle+deg2rad(nose_dir)
leftear_polar_angle = leftear_polar_angle+deg2rad(nose_dir)
# And back to cartesian coordinates for plotting:
rightear_x, rightear_y = pol2cart(rightear_polar_angle,
rightear_polar_radius)
leftear_x, leftear_y = pol2cart(leftear_polar_angle,
leftear_polar_radius)
# Move ears with head:
rightear_x = rightear_x + center[0]
rightear_y = rightear_y + center[1]
leftear_x = leftear_x + center[0]
leftear_y = leftear_y + center[1]
ear_right = plt.Line2D(rightear_x, rightear_y,
color=hprops['head_linecolor'],
linewidth=hprops['ear_linewidth'],
solid_joinstyle='round',
solid_capstyle='round',
axes=a)
ear_left = plt.Line2D(leftear_x, leftear_y,
color=hprops['head_linecolor'],
linewidth=hprops['ear_linewidth'],
solid_joinstyle='round',
solid_capstyle='round',
axes=a)
a.add_artist(head)
a.add_artist(nose)
a.add_artist(ear_right)
a.add_artist(ear_left)
if sensors is None:
if axes is None:
a.set_xlim(-radius*1.2+center[0], radius*1.2+center[0])
a.set_ylim(-radius*1.2+center[1], radius*1.2+center[1])
return("No sensor locations specified!")
# Convert & rotate sensor locations:
angles = -sensors[0]+90
angles = angles+nose_dir
angles = deg2rad(angles)
radii = sensors[1]
# expand or shrink electrode locations with radius of head:
radii = radii*(radius/0.5)
# plotting radius is determined by largest sensor radius:
plot_radius = max(radii)*(1.0+plot_radius_buffer)
# convert electrode locations to cartesian coordinates for plotting:
x, y = pol2cart(angles, radii)
x = x + center[0]
y = y + center[1]
if True: # plot electrodes
sprops = default_sensor_props.copy()
if not sensor_props is None:
sprops.update(sensor_props)
#a.plot(x,y,markerfacecolor=colors[1],marker='o',linestyle='')
a.scatter(x, y, zorder=10, **sprops)
if not labels is None:
lprops = default_label_props.copy()
if not label_props is None:
lprops.update(label_props)
for i in range(len(labels)):
a.text(x[i],y[i],labels[i],**lprops)
if values is None:
return #('No values to plot specified!')
if np.size(values) != np.size(sensors,1):
return('Numer of values to plot is different from number of sensors!'+
'\nNo values have been plotted!')
# set the values
z = values
# resolution determines the number of interpolated points per unit
nx = round(resolution*plot_radius)
ny = round(resolution*plot_radius)
# now set up the grid:
xi, yi = np.meshgrid(np.linspace(-plot_radius, plot_radius,nx),
np.linspace(-plot_radius, plot_radius,ny))
# and move the center to coincide with the center of the head:
xi = xi + center[0]
yi = yi + center[1]
# interploate points:
if plot_mask=='linear':
# masked = True means that no extrapolation outside the
# electrode boundaries is made this effectively creates a mask
# with a linear boundary (connecting the outer electrode
# locations)
#zi = griddata(x,y,z,xi,yi,masked=True)
#zi = griddata(x,y,z,xi,yi)
pass
elif plot_mask=='circular':
npts = np.mean((nx,ny))*2
t = np.linspace(0, 2*np.pi,npts)[:-1]
x = np.r_[x, np.cos(t)*plot_radius]
y = np.r_[y, np.sin(t)*plot_radius]
z = np.r_[z, np.zeros(len(t))]
else:
# we need a custom mask:
#zi = griddata(x,y,z,xi,yi,ext=1,masked=False)
#zi = griddata(x,y,z,xi,yi)
# zi = griddata((x,y),z,(xi,yi),method='cubic')
# if plot_mask=='circular':
# # the interpolated array doesn't know about its position
# # in space and hence we need to subtract head center from
# # xi & xi to calculate the mask
# mask = (np.sqrt(np.power(xi-center[0],2) +
# np.power(yi-center[1],2)) > plot_radius)
# zi[mask] = 0
# zi[np.isnan(zi)] = 0.0
# zi[mask] = np.nan
# other masks may be added here and can be defined as shown
# for the circular mask. All other plot_mask values result in
# no mask which results in showing interpolated values for the
# square surrounding the head.
pass
# calc the grid
zi = griddata((x, y), z, (xi, yi), method='cubic')
# # If no colormap is specified, use default colormap:
# if cmap is None:
# cmap = plt.get_cmap()
# make contours
cprops = default_contour_props.copy()
if not contour_props is None:
cprops.update(contour_props)
if np.any(cprops['linewidths'] > 0):
a.contour(xi, yi, zi, contours, **cprops)
# make countour color patches:
# a.contourf(xi, yi, zi, contours, cmap=cmap, extend='both')
a.contourf(xi, yi, zi, contours, extend='both', **kwargs)
|
gpl-3.0
|
DonBeo/scikit-learn
|
sklearn/cluster/bicluster.py
|
38
|
19313
|
"""Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
|
bsd-3-clause
|
ShaperTools/openhtf
|
setup.py
|
1
|
7098
|
# Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for OpenHTF."""
import errno
import glob
import os
import platform
import subprocess
import sys
from distutils.command.build import build
from distutils.command.clean import clean
from distutils.cmd import Command
from setuptools import find_packages
from setuptools import setup
from setuptools.command.test import test
class CleanCommand(clean):
"""Custom logic for the clean command."""
def run(self):
clean.run(self)
targets = [
'./dist',
'./*.egg-info',
'./openhtf/output/proto/*_pb2.py',
'./openhtf/**/*.pyc',
]
os.system('shopt -s globstar; rm -vrf %s' % ' '.join(targets))
class BuildProtoCommand(Command):
"""Custom setup command to build protocol buffers."""
description = 'Builds the proto files into python files.'
user_options = [('protoc=', None, 'Path to the protoc compiler.'),
('protodir=', None, 'Path to protobuf install.'),
('indir=', 'i', 'Directory containing input .proto files.'),
('outdir=', 'o', 'Where to output .py files.')]
def initialize_options(self):
self.skip_proto = False
try:
prefix = subprocess.check_output(
'pkg-config --variable prefix protobuf'.split()).strip().decode('utf-8')
except (subprocess.CalledProcessError, OSError):
if platform.system() == 'Linux':
# Default to /usr?
prefix = '/usr'
elif platform.system() in ['Mac', 'Darwin']:
# Default to /usr/local for Homebrew
prefix = '/usr/local'
else:
print('Warning: mfg-inspector output is not fully implemented for '
'Windows. OpenHTF will be installed without it.')
self.skip_proto = True
maybe_protoc = os.path.join(prefix, 'bin', 'protoc')
if os.path.isfile(maybe_protoc) and os.access(maybe_protoc, os.X_OK):
self.protoc = maybe_protoc
else:
print('Warning: protoc not found at %s' % maybe_protoc)
print('setup will attempt to run protoc with no prefix.')
self.protoc = 'protoc'
self.protodir = os.path.join(prefix, 'include')
self.indir = os.getcwd()
self.outdir = os.getcwd()
def finalize_options(self):
pass
def run(self):
if self.skip_proto:
print('Skipping building protocol buffers.')
return
# Build regular proto files.
protos = glob.glob(
os.path.join(self.indir, 'openhtf', 'output', 'proto', '*.proto'))
if protos:
print('Attempting to build proto files:\n%s' % '\n'.join(protos))
cmd = [
self.protoc,
'--proto_path', self.indir,
'--proto_path', self.protodir,
'--python_out', self.outdir,
] + protos
try:
subprocess.check_call(cmd)
except OSError as e:
if e.errno == errno.ENOENT:
print('Could not find the protobuf compiler at \'%s\'' % self.protoc)
if sys.platform.startswith('linux'):
print('On many Linux systems, this is fixed by installing the '
'"protobuf-compiler" and "libprotobuf-dev" packages.')
elif sys.platform == 'darwin':
print('On Mac, protobuf is often installed via homebrew.')
raise
except subprocess.CalledProcessError:
print('Could not build proto files.')
print('This could be due to missing helper files. On many Linux '
'systems, this is fixed by installing the '
'"libprotobuf-dev" package.')
raise
else:
print('Found no proto files to build.')
# Make building protos part of building overall.
build.sub_commands.insert(0, ('build_proto', None))
INSTALL_REQUIRES = [
'colorama>=0.3.9,<1.0',
'contextlib2>=0.5.1,<1.0',
'future>=0.16.0',
'mutablerecords>=0.4.1,<2.0',
'oauth2client>=1.5.2,<2.0',
'protobuf>=3.0.0,<4.0',
'PyYAML>=3.13,<4.0',
'pyOpenSSL>=17.1.0,<18.0',
'sockjs-tornado>=1.0.3,<2.0',
'tornado>=4.3,<5.0',
]
# Not all versions of setuptools support semicolon syntax for specifying
# platform-specific dependencies, so we do it the old school way.
if sys.version_info < (3,4):
INSTALL_REQUIRES.append('enum34>=1.1.2,<2.0')
class PyTestCommand(test):
# Derived from
# https://github.com/chainreactionmfg/cara/blob/master/setup.py
user_options = [
('pytest-args=', None, 'Arguments to pass to py.test'),
('pytest-cov=', None, 'Enable coverage. Choose output type: '
'term, html, xml, annotate, or multiple with comma separation'),
]
def initialize_options(self):
test.initialize_options(self)
self.pytest_args = 'test'
self.pytest_cov = None
def finalize_options(self):
test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
self.run_command('build_proto')
import pytest
cov = ''
if self.pytest_cov is not None:
outputs = ' '.join('--cov-report %s' % output
for output in self.pytest_cov.split(','))
cov = ' --cov openhtf ' + outputs
sys.argv = [sys.argv[0]]
print('invoking pytest.main with %s' % (self.pytest_args + cov))
sys.exit(pytest.main(self.pytest_args + cov))
setup(
name='openhtf',
version='1.3.0+shaper.0',
description='OpenHTF, the open hardware testing framework.',
author='John Hawley',
author_email='[email protected]',
maintainer='Joe Ethier',
maintainer_email='[email protected]',
packages=find_packages(exclude='examples'),
package_data={'openhtf': ['output/proto/*.proto',
'output/web_gui/dist/*.*',
'output/web_gui/dist/css/*',
'output/web_gui/dist/js/*',
'output/web_gui/dist/img/*',
'output/web_gui/*.*']},
cmdclass={
'build_proto': BuildProtoCommand,
'clean': CleanCommand,
'test': PyTestCommand,
},
install_requires=INSTALL_REQUIRES,
extras_require={
'usb_plugs': [
'libusb1>=1.3.0,<2.0',
'M2Crypto>=0.22.3,<1.0',
],
'update_units': [
'xlrd>=1.0.0,<2.0',
],
'serial_collection_plug': [
'pyserial>=3.3.0,<4.0',
],
},
setup_requires=[
'wheel>=0.29.0,<1.0',
],
tests_require=[
'mock>=2.0.0',
'pandas>=0.22.0',
'pytest>=2.9.2',
'pytest-cov>=2.2.1',
],
)
|
apache-2.0
|
mblondel/scikit-learn
|
examples/applications/plot_species_distribution_modeling.py
|
28
|
7434
|
"""
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=["bradypus_variegatus_0",
"microryzomys_minutus_0"]):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
|
bsd-3-clause
|
jaantollander/Legendre
|
src/plotting/convergence_plotting.py
|
8
|
3846
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.widgets import Button
from src_legacy.analysis.convergence import max_slope
from src_legacy.io.load.load import LoadCsv
from src_legacy.other.settings import timeit
class ConvergencePlot:
"""
Interactive plot for results.
http://bastibe.de/2013-05-30-speeding-up-matplotlib.html
http://stackoverflow.com/questions/29277080/efficient-matplotlib-redrawing
"""
def __init__(self, filename, function):
self.initial = True # Flag
loads = LoadCsv(filename, function)
self.function = function
self.index = loads.errors.index.values
self.errors = loads.errors
self.a = loads.inputs[0]
self.x = loads.inputs[1]
self.fig, self.ax = plt.subplots(figsize=(10, 8))
self.fig.subplots_adjust(bottom=0.2)
self.ax.set(ylim=(10 ** -6, np.max(self.errors.values) + 0.1),
xlim=(self.index.min(), self.index.max()),
xlabel=r'$ p $',
ylabel=r'$ \varepsilon $')
self.line, = self.ax.loglog([], [], lw=1, marker='*')
self.cline, = self.ax.loglog([], [], lw=2, marker='*')
self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox)
# rect = [left, bottom, width, height] in normalized (0, 1) units
xprev = plt.axes([0.44, 0.05, 0.1, 0.075])
xnext = plt.axes([0.56, 0.05, 0.1, 0.075])
bnext = Button(xnext, r'$ x \Rightarrow $')
bprev = Button(xprev, r'$ \Leftarrow x $')
bnext.on_clicked(self.xnext)
bprev.on_clicked(self.xprev)
aprev = plt.axes([0.91, 0.45, 0.08, 0.075])
anext = plt.axes([0.91, 0.55, 0.08, 0.075])
cnext = Button(anext, r'$ a \Rightarrow $')
cprev = Button(aprev, r'$ \Leftarrow a $')
cnext.on_clicked(self.anext)
cprev.on_clicked(self.aprev)
self.index_x = 0
self.index_a = 0
self.line.set_xdata(self.index)
self.draw()
@timeit
def draw(self):
"""
Redraw the axis
"""
a_ = self.a[self.index_a]
x_ = self.x[self.index_x]
data = self.errors[str(a_)]
data = data[str(x_)]
mask = max_slope(data)
convergence = data.iloc[mask]
self.line.set_ydata(data.values)
self.cline.set_data(convergence.index.values, convergence.values)
self.ax.set_title(r'{function}: '.format(function=self.function) +
r'$ a: {}\approx {:.4f} $, '.format(a_, float(a_)) +
r'$ x: {}\approx {:.4f} $ '.format(x_, float(x_)))
if self.initial:
self.fig.canvas.draw()
self.initial = False
plt.show()
else:
self.fig.canvas.restore_region(self.background)
self.ax.draw_artist(self.ax.patch)
self.ax.draw_artist(self.line)
self.ax.draw_artist(self.cline)
# self.fig.canvas.update()
# self.fig.canvas.flush_events()
self.fig.canvas.blit(self.ax.bbox)
def xnext(self, event):
if self.index_x < len(self.x) - 1:
self.index_x += 1
self.draw()
def xprev(self, event):
if self.index_x > -len(self.x):
self.index_x -= 1
self.draw()
def anext(self, event):
if self.index_a < len(self.a) - 1:
self.index_a += 1
self.draw()
def aprev(self, event):
if self.index_a > 0:
self.index_a -= 1
self.draw()
sns.set()
ConvergencePlot('100000_391_1', 'step_function')
|
mit
|
jnvandermeer/PythonFeedback
|
idlex-1.11.2/demos/matplotlib_demo.py
|
1
|
1155
|
"""
Demo of Matplotlib interaction
==============================
Make sure that you have "Enable GUI Event Loop" checked
under the "Shell" menu, as well as the proper toolkit
selected.
Run this code. You'll have an interactive figure.
Click on the figure to see feedback in the shell.
The shell is still usable. If you are missing the ">>>" prompt,
press enter to create a new one.
If you want, change the backend to different toolkits, and then
select the appropriate one under the "Shell" menu.
"""
## code for interaction
from matplotlib import pyplot as plt
import numpy as np
print('You are using the %s backend.' % plt.rcParams['backend'])
plt.interactive(True)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(np.random.rand(10))
plt.title('Click on the plot')
def onclick(event):
try:
print ('button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
event.button, event.x, event.y, event.xdata, event.ydata))
plt.plot(event.xdata, event.ydata, 'o')
except TypeError as e:
print('Click event received, but outside of plot.', e)
cid = fig.canvas.mpl_connect('button_press_event', onclick)
|
gpl-2.0
|
EnvGen/toolbox
|
scripts/concoct/extract_stats_for_approved.py
|
1
|
1044
|
#!/usr/bin/env python
"""
Based on the checkm results, approves bins according to the leves of contamination and completeness.
Prints the corresponding stats to stdout.
@author: alneberg
"""
from __future__ import print_function
import sys
import os
import argparse
import pandas as pd
from shutil import copyfile
def main(args):
# Read in the checkm table
df = pd.read_table(args.checkm_stats, index_col=0)
# extract the ids for all rows that meet the requirements
filtered_df = df[(df['Completeness'] >= args.min_completeness) & (df['Contamination'] <= args.max_contamination)]
filtered_df.to_csv(sys.stdout, sep='\t')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("checkm_stats", help="Checkm qa stats in tab_table format")
parser.add_argument("--min_completeness", default=85, type=float, help="default=85")
parser.add_argument("--max_contamination", default=5, type=float, help="default=5")
args = parser.parse_args()
main(args)
|
mit
|
FEniCS/dolfin
|
demo/undocumented/mixed-poisson-sphere/python/demo_mixed-poisson-sphere.py
|
1
|
2417
|
"""This demo demonstrates how to solve a mixed Poisson type equation
defined over a sphere (the surface of a ball in 3D) including how to
create a cell_orientation map, needed for some forms defined over
manifolds."""
# Copyright (C) 2012 Marie E. Rognes
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2012-12-09
# Last changed: 2012-12-09
# Begin demo
from dolfin import *
import numpy
import matplotlib.pyplot as plt
# Read mesh
mesh = Mesh("../sphere_16.xml.gz")
# Define global normal
global_normal = Expression(("x[0]", "x[1]", "x[2]"), degree=1)
mesh.init_cell_orientations(global_normal)
# Define function spaces and basis functions
RT1 = FiniteElement("RT", mesh.ufl_cell(), 1)
DG0 = FiniteElement("DG", mesh.ufl_cell(), 0)
R = FiniteElement("R", mesh.ufl_cell(), 0)
W = FunctionSpace(mesh, MixedElement((RT1, DG0, R)))
(sigma, u, r) = TrialFunctions(W)
(tau, v, t) = TestFunctions(W)
g = Expression("sin(0.5*pi*x[2])", degree=2)
# Define forms
a = (inner(sigma, tau) + div(sigma)*v + div(tau)*u + r*v + t*u)*dx
L = g*v*dx
# Tune some factorization options
if has_petsc():
# Avoid factors memory exhaustion due to excessive pivoting
PETScOptions.set("mat_mumps_icntl_14", 40.0)
PETScOptions.set("mat_mumps_icntl_7", "0")
# Avoid zero pivots on 64-bit SuperLU_dist
PETScOptions.set("mat_superlu_dist_colperm", "MMD_ATA")
# Solve problem
w = Function(W)
solve(a == L, w, solver_parameters={"symmetric": True})
(sigma, u, r) = w.split()
# Plot CG1 representation of solutions
sigma_cg = project(sigma, VectorFunctionSpace(mesh, "CG", 1))
u_cg = project(u, FunctionSpace(mesh, "CG", 1))
plt.figure()
plot(sigma_cg)
plt.figure()
plot(u_cg)
plt.show()
# Store solutions
file = File("sigma.pvd")
file << sigma
file = File("u.pvd")
file << u
|
lgpl-3.0
|
dblalock/dig
|
python/dig/datasets/utils.py
|
2
|
28325
|
#!/usr/bin/env/python
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib.patches import Rectangle
DEFAULT_LABEL = 0
from synthetic import concatWithPadding, ensure2D
from ..utils.sequence import splitElementsBy, splitIdxsBy
# ================================================================ Plotting
def plotVertLine(x, ymin=None, ymax=None, ax=None, **kwargs):
if ax and (not ymin or not ymax):
ymin, ymax = ax.get_ylim()
if not ax:
ax = plt
kwargs['color'] = kwargs.get('color') or 'k'
kwargs['linestyle'] = kwargs.get('linestyle') or '--'
kwargs['linewidth'] = kwargs.get('linewidth') or 2
ax.plot([x, x], [ymin, ymax], **kwargs)
def plotRect(ax, xmin, xmax, ymin=None, ymax=None, alpha=.2,
showBoundaries=True, color='grey', fill=True, **kwargs):
if ax and (ymin is None or ymax is None):
ymin, ymax = ax.get_ylim()
if fill:
ax.add_patch(Rectangle((xmin, ymin), xmax-xmin, ymax-ymin,
facecolor=color, alpha=alpha))
if showBoundaries:
plotVertLine(xmin, ymin, ymax, ax=ax, color=color, **kwargs)
plotVertLine(xmax, ymin, ymax, ax=ax, color=color, **kwargs)
# ================================================================ Animation
def animateSubseqs(X, windowLen, step=None, figsize=None, dataName=None,
ylimits=None, idxOffsetTitle=0, idxOffsetXLabel=0,
rangeStartIdxs=None, rangeEndIdxs=None,
rangeLabels=None):
"""plots the data in each sliding window position, and creates an
animation from the sequence of plots. If rangeStartIdxs and rangeEndIdxs
are specified, it will mark the associated regions in the plots, also
writing the content of the corresponding entry of rangeLabels in each
Parameters
----------
X: 2d array
the data to plot; each row is a sample
windowLen: int
length of the sliding window
step: int
the increment by which to slide the window
figsize: (x, y) tuple
the size of the figure in inches
dataName: string
displayed in the title
ylimits: (int, int)
ylimits for each plots
idxOffsetTitle: int
value to add to the indices within each; if this is, say, the data
starting at index 1000 from a longer dataset, this param can be used
to add 1000 to what's displayed in the title
idxOffsetXLabel: int
like idxOffsetTitle, but adjusts the values displayed along the x axis;
more useful since x values above 100,000 will be displayed in
scientific notation
rangeStartIdxs: vector of int
the start indices of any labeled ranges in the data; will be marked with
vertical lines
rangeEndIdxs: vector of int
the (non-inclusive) end indices of any labeled ranges in the data;
will be marked with vertical lines; must be the same length as
rangeStartIdxs if either of them are provided
rangeLabels: vector of string
the labels for each of the ranges specified by rangeStartIdxs and
rangeEndIdxs; must be the same length as these if any of them are
provided
Returns
-------
A matplotlib Animation object
"""
X = np.asarray(X)
if step < 1:
step = windowLen / 4
if ylimits is None:
ylimits = [np.min(X), np.max(X)]
yMin, yMax = ylimits[0], ylimits[1]
dataName = dataName + ", " if dataName else ""
# determine start locations for the window of data shown in each frame
lastPossibleStartIdx = len(X) - windowLen
windowStartIdxs = np.arange(0, lastPossibleStartIdx + 1, step)
if windowStartIdxs[-1] != lastPossibleStartIdx: # ensure we don't miss a section
windowStartIdxs = np.r_[windowStartIdxs, lastPossibleStartIdx]
fig = plt.figure(figsize=figsize)
ax = plt.gca()
def animateFunc(frameNum):
ax.cla()
# ------------------------ plot data
# show different indices for title and x axis so that x axis fits and
# doesn't get shortened into illegible scientific notation
startIdx = windowStartIdxs[frameNum]
endIdx = startIdx + windowLen
startIdxTitle = startIdx + idxOffsetTitle
endIdxTitle = endIdx + idxOffsetTitle
startIdxXlabel = startIdx + idxOffsetXLabel
endIdxXlabel = endIdx + idxOffsetXLabel
ax.set_title("{0}{1}-{2}".format(dataName, startIdxTitle, endIdxTitle))
ax.plot(np.arange(startIdxXlabel, endIdxXlabel), X[startIdx:endIdx])
ax.set_ylim(yMin, yMax)
ax.set_xlim(startIdxXlabel, endIdxXlabel)
# ------------------------ plot labeled ranges
# if no labeled ranges provided, we're done
if (rangeStartIdxs is None) or (len(rangeStartIdxs) < 1):
return
windowRangeStartIdx, windowRangeEndIdx = whereStartEndPairsInRange(
rangeStartIdxs, rangeEndIdxs, startIdx, endIdx)
# if no labeled ranges in this window, we're done
if windowRangeEndIdx <= windowRangeStartIdx:
return
# print("--- window #{}: {}-{}".format(frameNum, startIdxTitle, endIdxTitle))
for i in range(windowRangeStartIdx, windowRangeEndIdx):
label = str(rangeLabels[i])
ts, te = rangeStartIdxs[i], rangeEndIdxs[i]
ts, te = ts + idxOffsetXLabel, te + idxOffsetXLabel
if ts < 0 or te > endIdxXlabel: # can happen since only end idxs monotonic
continue
plotRect(ax, ts, te, fill=False) # no fill because it doesn't clear
# position label near start, but shifted based on width to
# differentiate ranges with the same start; also, stagger heights
# so that labels don't end up on top of one another
x = ts + (te - ts) / 10
yFrac = .67
yFrac += .04 * (i // 1 % 2)
yFrac += .08 * (i // 2 % 2)
yFrac += .16 * (i // 4 % 2)
y = yFrac * (yMax - yMin) + yMin
ax.annotate(label, xy=(x, y), xycoords='data')
# print("{}: {}-{}\t({}-{})\t\tx={:d}\tlbl={}".format(label, ts, te,
# rangeStartIdxs[i], rangeEndIdxs[i], x, label))
plt.close()
return animation.FuncAnimation(fig, animateFunc, frames=len(windowStartIdxs), blit=False)
def saveAnimation(anim, path, fps=25):
anim.save(path, writer='ffmpeg', fps=fps) # Note: ensure that ffmpeg is installed
def generateVideos(ar, dataName="Data", step=5000, windowLen=300,
epochSz=100000, saveInDir=None, rangeStartIdxs=None, rangeEndIdxs=None,
rangeLabels=None, ylimits=None, createSubdir=True):
"""Given a long time series ar, plots subsequences of length windowLen,
in videos spanning step samples. epochSz is used to sort the videos into
subdirectories. The range{Start,End}Idxs and rangeLabels are used to
plot an arbitrary set of labeled ranges in the data."""
# sanity check labeled ranges
hasRanges = rangeStartIdxs is not None or rangeEndIdxs is not None or rangeLabels is not None
hasRanges = hasRanges and len(rangeStartIdxs) > 0
if hasRanges:
assert(len(rangeStartIdxs) == len(rangeEndIdxs))
if rangeLabels is not None:
assert(len(rangeStartIdxs) == len(rangeLabels))
# determine window length
if windowLen is None or windowLen < 0:
windowLen = int(len(ar) / len(rangeStartIdxs))
elif windowLen <= 1.:
windowLen = int(windowLen * len(ar))
# determine video length
if step is None or step < 0:
step = len(ar) + 1 # put everything in one video
elif step <= 1.:
step = int(step * len(ar))
startIdxsInVid = None
endIdxsInVid = None
labelsInVid = None
lastRangeIdx = 0
for epochNum, epochStartIdx in enumerate(range(0, len(ar), epochSz)):
epochEndIdx = epochStartIdx + epochSz
epochData = ar[epochStartIdx:epochEndIdx]
if saveInDir:
subdirName = dataName
if not createSubdir:
subdirName = ""
if len(ar) > epochSz:
subdir = "{0}k-{1}k".format(epochStartIdx / 1000, epochEndIdx / 1000)
saveDir = os.path.join(saveInDir, subdirName, subdir)
else:
saveDir = os.path.join(saveInDir, subdirName)
if not os.path.exists(saveDir):
os.makedirs(saveDir)
n = len(epochData)
for startIdx in range(0, n, step): # for each video segment
endIdx = min(startIdx + step, n)
absoluteStartIdx = epochStartIdx + startIdx
absoluteEndIdx = epochStartIdx + endIdx
data = epochData[startIdx:endIdx]
print "generating vid for section {0}-{1}".format(absoluteStartIdx, absoluteEndIdx)
# figure out what labeled ranges are present in this video
if hasRanges:
rangeStartIdxs = rangeStartIdxs[lastRangeIdx:]
rangeEndIdxs = rangeEndIdxs[lastRangeIdx:]
rangeLabels = rangeLabels[lastRangeIdx:]
first, last = whereStartEndPairsInRange(
rangeStartIdxs, rangeEndIdxs, absoluteStartIdx, absoluteEndIdx)
if (first >= 0) and (last > first):
firstRangeIdx, lastRangeIdx = first, last
startIdxsInVid = rangeStartIdxs[firstRangeIdx:lastRangeIdx] - absoluteStartIdx
endIdxsInVid = rangeEndIdxs[firstRangeIdx:lastRangeIdx] - absoluteStartIdx
labelsInVid = rangeLabels[firstRangeIdx:lastRangeIdx]
else:
lastRangeIdx = 0 # didn't move forward in range list
startIdxsInVid = None
endIdxsInVid = None
labelsInVid = None
print("labeled ranges in vid:")
print np.c_[startIdxsInVid, endIdxsInVid, labelsInVid]
figName = dataName + "_{0}-{1}".format(absoluteStartIdx, absoluteEndIdx-1)
figPath = os.path.join(saveDir, figName + '.mp4')
anim = animateSubseqs(data, windowLen, figsize=(8,6),
dataName=dataName.title(), ylimits=ylimits,
idxOffsetTitle=absoluteStartIdx, idxOffsetXLabel=startIdx,
rangeStartIdxs=startIdxsInVid, rangeEndIdxs=endIdxsInVid,
rangeLabels=labelsInVid)
if saveInDir:
saveAnimation(anim, figPath, fps=25)
# ================================================================ Annotations
def whereStartEndPairsInRange(startIdxs, endIdxs, minStartIdx, maxEndIdx):
"""Given an ordered collection of start and end pairs, a minimum start
index, and a maximum end index, return the first and last index into the
start and end pairs collection such that the pair at that index is included
in the range (the returned last index is not inclusive, as is typical in
python).
Note that this assumes that both the startIdxs and endIdxs are already
sorted in ascending order. This is necessary for the returned indices
to be meaningful when used to index the arrays passed in.
Returns (-1, -1) if no (start, end) pairs fall in the range
>>> starts = [0, 5]
>>> ends = [1, 10]
>>> minStart, maxEnd = 0, 20
>>> whereStartEndPairsInRange(starts, ends, minStart, maxEnd)
(0, 2)
>>> minStart = 1
>>> whereStartEndPairsInRange(starts, ends, minStart, maxEnd)
(1, 2)
>>> minStart, maxEnd = 0, 8
>>> whereStartEndPairsInRange(starts, ends, minStart, maxEnd)
(0, 1)
>>> minStart, maxEnd = 1, 8
>>> whereStartEndPairsInRange(starts, ends, minStart, maxEnd)
(-1, -1)
"""
# fail fast
assert(len(startIdxs) == len(endIdxs))
empty = (-1, -1)
if not len(startIdxs):
return empty
# find first startIdx >= minStartIdx
tsIdx = -1
ts = -1
while ts < minStartIdx:
try:
tsIdx += 1
ts = startIdxs[tsIdx]
except IndexError: # reached end
break
# find last endIdx < maxEndIdx
teIdx = tsIdx - 1
te = -1
while te < maxEndIdx:
try:
teIdx += 1
te = endIdxs[teIdx]
except IndexError:
break
if tsIdx == teIdx: # empty set
return empty
return tsIdx, teIdx
def unionOfRanges(rangeStartIdxs, rangeEndIdxs, n, padLen=0):
"""
>>> starts = [0]
>>> ends = [10]
>>> n = 10
>>> padLen = 0
>>> unionOfRanges(starts, ends, n, padLen)
array([[ 0, 10]])
>>>
>>> starts = [2]
>>> ends = [5]
>>> unionOfRanges(starts, ends, n, padLen)
array([[2, 5]])
>>>
>>> starts = [1, 4]
>>> ends = [3, 7]
>>> unionOfRanges(starts, ends, n, padLen) # doctest: +NORMALIZE_WHITESPACE
array([[1, 3],
[4, 7]])
>>>
>>> starts = [1, 4]
>>> ends = [5, 6]
>>> unionOfRanges(starts, ends, n, padLen) # doctest: +NORMALIZE_WHITESPACE
array([[1, 6]])
>>>
>>> starts = [1, 3]
>>> ends = [4, 7]
>>> padLen = 1
>>> unionOfRanges(starts, ends, n, padLen) # doctest: +NORMALIZE_WHITESPACE
array([[0, 8]])
>>>
>>> n = 7
>>> unionOfRanges(starts, ends, n, padLen) # doctest: +NORMALIZE_WHITESPACE
array([[0, 7]])
>>>
>>> starts = [0, 6, 10, 20]
>>> ends = [4, 9, 15, 30]
>>> n = 100
>>> unionOfRanges(starts, ends, n, padLen) # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 5],
[ 5, 16],
[19, 31]])
>>>
>>> starts = starts[::-1]
>>> ends = ends[::-1]
>>> unionOfRanges(starts, ends, n, padLen) # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 5],
[ 5, 16],
[19, 31]])
>>>
>>> padLen = 5
>>> starts = [5, 10, 10]
>>> ends = [20, 20, 30]
>>> unionOfRanges(starts, ends, n, padLen) # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 35]])
"""
# sanity check args (just via asserts due to tactical laziness)
assert(len(rangeStartIdxs) > 0)
assert(len(rangeStartIdxs) == len(rangeEndIdxs))
assert(np.all(rangeStartIdxs <= rangeEndIdxs))
assert(np.min(rangeStartIdxs) >= 0)
assert(np.max(rangeEndIdxs) <= n)
assert(np.min(rangeEndIdxs) >= padLen)
rangeStartIdxs = np.asarray(rangeStartIdxs)
rangeEndIdxs = np.asarray(rangeEndIdxs)
# sort by endIdx
if len(rangeEndIdxs) > 1:
sortIdxs = np.argsort(rangeEndIdxs)
rangeStartIdxs = rangeStartIdxs[sortIdxs]
rangeEndIdxs = rangeEndIdxs[sortIdxs]
# compute earliest start time remaining (including current start time); eg,
# [1,1,3,3,2,4,5,4,6] -> [1 1 2 2 2 4 4 4 6]
# [1,1,3,3,0,4,5,4,6] -> [0 0 0 0 0 4 4 4 6]
earliestFutureStarts = np.minimum.accumulate(rangeStartIdxs[::-1])[::-1]
earliestFutureStarts -= padLen
earliestFutureStarts = np.maximum(0, earliestFutureStarts)
sectionStartIdxs = []
sectionEndIdxs = []
numInstances = len(rangeStartIdxs)
currentSectionStart = earliestFutureStarts[0] # first start time anywhere
for i in range(numInstances-1):
te = rangeEndIdxs[i] + padLen
earliestFutureStart = earliestFutureStarts[i+1]
if currentSectionStart < te <= earliestFutureStart:
sectionStartIdxs.append(currentSectionStart)
sectionEndIdxs.append(te)
currentSectionStart = earliestFutureStart
finalEndIdx = min(rangeEndIdxs[-1] + padLen, n)
sectionStartIdxs.append(currentSectionStart)
sectionEndIdxs.append(finalEndIdx)
return np.c_[sectionStartIdxs, sectionEndIdxs].astype(np.int)
def adjustedAnnotationIdxs(rangeStartIdxs, rangeEndIdxs, n, padLen=0):
"""given that we're extracting the data around the annotations,
adjust the start and end indices so that they're correct for the
extracted data
>>> starts = [0]
>>> ends = [10]
>>> n = 10
>>> padLen = 0
>>> adjustedAnnotationIdxs(starts, ends, n, padLen) # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 10]])
>>>
>>> starts = [5]
>>> ends = [7]
>>> adjustedAnnotationIdxs(starts, ends, n, padLen) # doctest: +NORMALIZE_WHITESPACE
array([[0, 2]])
>>>
>>> starts = [5, 20]
>>> ends = [7, 25]
>>> n = 100
>>> adjustedAnnotationIdxs(starts, ends, n, padLen) # doctest: +NORMALIZE_WHITESPACE
array([[0, 2],
[2, 7]])
>>>
>>> starts = [5, 10, 15]
>>> ends = [7, 12, 18]
>>> adjustedAnnotationIdxs(starts, ends, n, padLen) # doctest: +NORMALIZE_WHITESPACE
array([[0, 2],
[2, 4],
[4, 7]])
>>>
>>> starts = [5, 6, 15]
>>> ends = [7, 10, 18]
>>> adjustedAnnotationIdxs(starts, ends, n, padLen) # doctest: +NORMALIZE_WHITESPACE
array([[0, 2],
[1, 5],
[5, 8]])
>>>
>>> padLen = 1
>>> adjustedAnnotationIdxs(starts, ends, n, padLen) # doctest: +NORMALIZE_WHITESPACE
array([[ 1, 3],
[ 2, 6],
[ 8, 11]])
"""
if padLen is not None:
assert(padLen >= 0) # could make sense, but unsupported in this func
rangeStartIdxs = np.asarray(rangeStartIdxs)
rangeEndIdxs = np.asarray(rangeEndIdxs)
# if len(rangeStartIdxs) > 1:
# sortIdxs = np.argsort(rangeEndIdxs)
# rangeStartIdxs = rangeStartIdxs[sortIdxs]
# rangeEndIdxs = rangeEndIdxs[sortIdxs]
ranges = unionOfRanges(rangeStartIdxs, rangeEndIdxs, n, padLen)
rangeStarts, rangeEnds = ranges[:, 0], ranges[:, 1]
# compute number of points skipped; ie, skippedLengths[i] is the length
# of all data before the ith range that won't be extracted
rangeLengths = rangeEnds - rangeStarts
skippedLengths = rangeEnds - np.cumsum(rangeLengths)
newStartIdxs = []
newEndIdxs = []
numInstances = len(rangeStartIdxs)
inWhichRange = 0
for i in range(numInstances):
ts, te = rangeStartIdxs[i], rangeEndIdxs[i]
# find which combined range this original (start, end) pair lies in
while rangeEnds[inWhichRange] < te:
inWhichRange += 1
skippedLen = skippedLengths[inWhichRange]
newStartIdxs.append(ts - skippedLen)
newEndIdxs.append(te - skippedLen)
return np.c_[newStartIdxs, newEndIdxs].astype(np.int)
def dataNearAnnotations(X, rangeStartIdxs, rangeEndIdxs, padLen=0):
"""extract (and concatenate) all the sections of X within padLen of any of
the ranges defined by rangeStartIdxs and rangeEndIdxs"""
combinedRanges = unionOfRanges(rangeStartIdxs, rangeEndIdxs, len(X), padLen)
newIdxs = adjustedAnnotationIdxs(rangeStartIdxs, rangeEndIdxs, len(X), padLen)
newStartIdxs, newEndIdxs = newIdxs[:, 0], newIdxs[:, 1]
# print "dataNearAnnotations(): oldIdxs -> newIdxs"
# for i in range(len(newIdxs)):
# print np.r_[rangeStartIdxs[i], rangeEndIdxs[i]], "->", newIdxs[i]
keepIdxs = []
for i in range(len(combinedRanges)):
dataStart = combinedRanges[i, 0] # has padding built in
dataEnd = combinedRanges[i, 1]
idxsInRange = range(dataStart, dataEnd)
keepIdxs += list(idxsInRange)
# print("{}-{}\t".format(dataStart, dataEnd))
return X[np.array(keepIdxs)], newStartIdxs, newEndIdxs
def groupsOfAnnotationIdxsForLabels(labels, groupSize=10, shuffle=False):
"""[label] -> (dict: label -> [idxs])
Ie, return a list of lists of indices where a particular labels happens,
with the inner lists of size groupSize. The point is to pull out groups
of, say, 10 indices, where a given label occurs so we can subsequently
pull out these sections of data.
"""
groupedLabels = splitIdxsBy(lambda lbl: lbl, labels)
lbl2idxGroups = {}
for lbl in groupedLabels:
idxs = groupedLabels[lbl]
idxGroups = formGroupsOfSize(idxs, groupSize=groupSize, shuffle=shuffle)
lbl2idxGroups[lbl] = idxGroups
return lbl2idxGroups
def sectionsOfDataNearAnnotations(X, startIdxs, endIdxs, labels,
instancesPerTs=10, shuffle=False, padLen=0, keepLabels=None,
datasetName="Dataset"):
lbl2idxGroups = groupsOfAnnotationIdxsForLabels(labels,
groupSize=instancesPerTs, shuffle=shuffle)
tsList = []
for lbl in lbl2idxGroups:
if keepLabels and not (lbl in keepLabels): # only keep certain labels
continue
idxGroups = lbl2idxGroups[lbl]
for groupNum, groupIdxs in enumerate(idxGroups):
groupStartIdxs = startIdxs[groupIdxs]
groupEndIdxs = endIdxs[groupIdxs]
groupLabels = labels[groupIdxs]
data, newStartIdxs, newEndIdxs = dataNearAnnotations(X,
groupStartIdxs, groupEndIdxs, padLen=padLen)
name = "{}-class{}-group{}".format(datasetName, lbl, groupNum)
uniqId = hash(name)
ts = LabeledTimeSeries(data, startIdxs=newStartIdxs,
endIdxs=newEndIdxs, labels=groupLabels, name=name, id=uniqId)
tsList.append(ts)
return tsList
# like above, but allows multiple labels in a given section
def sectionsOfDataNearAnnotationsImpure(X, startIdxs, endIdxs, labels,
instancesPerTs=10, shuffle=False, padLen=0, maxPadJitter=0,
keepLabels=None, datasetName="Dataset"):
assert(len(startIdxs) == len(endIdxs))
assert(len(startIdxs) == len(labels))
startIdxs = np.asarray(startIdxs)
endIdxs = np.asarray(endIdxs)
# filter out labels we don't care about
if keepLabels:
allIdxs = np.arange(len(labels))
keepIdxs = [i for i in allIdxs if labels[i] in keepLabels]
keepIdxs = np.array(keepIdxs, dtype=np.int)
startIdxs = startIdxs[keepIdxs]
endIdxs = endIdxs[keepIdxs]
labels = labels[keepIdxs]
# find sections of nearby annotations in the data and group these
# sections together; we'll concat these groups together to form a ts
combinedRanges = unionOfRanges(startIdxs, endIdxs, len(X), padLen=padLen)
rangeGroups = formGroupsOfSize(combinedRanges, groupSize=instancesPerTs,
shuffle=shuffle)
# now the hard part--create a LabeledTimeSeries from each of these
# sections of signal; we have to not only find which annotations
# fall within each range, but also adjust the start and end indices
# so that they're correct in the new ts formed by concatenating the
# data in each range together
tsList = []
for groupNum, ranges in enumerate(rangeGroups):
ranges = sorted(ranges, key=lambda r: r[0]) # sort by range start idx
dataLenSoFar = 0
dataInRanges = []
startsInRanges = []
endsInRanges = []
labelsInRanges = []
for rang in ranges:
start, end = rang
firstInRange, lastInRange = whereStartEndPairsInRange(startIdxs,
endIdxs, start, end)
idxsInRange = np.arange(firstInRange, lastInRange)
# move the start and end indices around a bit so that ranges
# aren't spaced exactly uniformly, which can lead to an
# artificial semblance of regularity
if maxPadJitter > 0:
if firstInRange > 0:
firstStartIdx = startIdxs[firstInRange]
prevEndIdx = endIdxs[firstInRange-1]
gap = firstStartIdx - prevEndIdx
if gap > 1:
gap = min(gap - 1, maxPadJitter)
offset = int(np.random.rand() * gap)
start -= offset
if lastInRange < (len(startIdxs) - 1):
lastEndIdx = endIdxs[lastInRange-1] # last idx not inclusive
nextStartIdx = startIdxs[lastInRange]
gap = nextStartIdx - lastEndIdx
if gap > 1:
gap = min(gap - 1, maxPadJitter)
offset = int(np.random.rand() * gap)
end += offset
starts = startIdxs[idxsInRange] - start + dataLenSoFar
ends = endIdxs[idxsInRange] - start + dataLenSoFar
lbls = labels[idxsInRange]
startsInRanges += list(starts)
endsInRanges += list(ends)
labelsInRanges += list(lbls)
data = ensure2D(X[start:end])
dataInRanges.append(data)
dataLenSoFar += len(data)
if len(labelsInRanges) < 2: # need more than one pattern instance per ts
continue
groupData = np.vstack(dataInRanges)
groupStarts = np.array(startsInRanges, dtype=np.int)
groupEnds = np.array(endsInRanges, dtype=np.int)
groupLabels = np.array(labelsInRanges, dtype=np.object)
name = "{}-group{}".format(datasetName, groupNum)
uniqId = hash(name)
ts = LabeledTimeSeries(groupData, startIdxs=groupStarts,
endIdxs=groupEnds, labels=groupLabels, name=name, id=uniqId)
tsList.append(ts)
return tsList
# ================================================================ Concatenation
def groupDatasetByLabel(X, Y):
return splitElementsBy(lambda i, x: Y[i], X)
def formGroupsOfSize(collection, groupSize=10, shuffle=False):
# -note that having |group| = groupSize is not guaranteed;
if shuffle:
np.random.shuffle(collection)
groups = []
i = 0
while i < len(collection):
j = i + groupSize
groups.append(collection[i:j])
i += groupSize
return groups
def concatedTsList(X, Y, instancesPerTs=10, datasetName="Dataset",
enemyInstancesPerTs=0, **paddingKwargs):
"""instances -> [LabeledTimeSeries], with each pure wrt class of instances"""
groupedByClass = groupDatasetByLabel(X, Y)
# we allow at most one instance of each other class so there's only one
# repeating "pattern"
numClasses = len(groupedByClass)
if enemyInstancesPerTs > numClasses - 1:
print("concatedTsList(): WARNING: "
"enemyInstancesPerTs {} > num digits - 1; will be truncated".format(
enemyInstancesPerTs))
enemyInstancesPerTs = numClasses - 1
tsList = []
for clz, instances in groupedByClass.iteritems():
groups = formGroupsOfSize(instances, instancesPerTs)
for groupNum, group in enumerate(groups):
otherClasses = groupedByClass.keys()
otherClasses.remove(clz)
lbls = [clz] * len(group)
if enemyInstancesPerTs > 0:
enemyLbls = np.random.choice(otherClasses, enemyInstancesPerTs)
if enemyInstancesPerTs == 1:
enemyLbls = [enemyLbls]
else:
enemyLbls = list(enemyLbls)
for dgt in enemyLbls:
whichRecording = np.random.choice(groupedByClass[dgt])
group.append(whichRecording)
allIdxs = np.arange(len(group))
orderIdxs = np.random.choice(allIdxs, len(allIdxs))
np.random.shuffle(orderIdxs)
lbls = lbls + enemyLbls
lbls = np.array(lbls, dtype=np.object)
lbls = lbls[orderIdxs]
groups = [group[idx] for idx in orderIdxs]
concated, startIdxs, endIdxs = concatWithPadding(
group, **paddingKwargs)
name = "{}-class{}-group{}".format(datasetName, clz, groupNum)
uniqId = hash(name)
ts = LabeledTimeSeries(data=concated, startIdxs=startIdxs,
endIdxs=endIdxs, labels=lbls, name=name, id=uniqId)
tsList.append(ts)
return tsList
# ================================================================ Data structs
class LabeledTimeSeries(object):
def __init__(self, data, startIdxs, endIdxs=None, subseqLength=None,
labels=None, name=None, id=0):
self.data = ensure2D(data)
self.startIdxs = np.asarray(startIdxs, dtype=np.int)
self.labels = np.asarray(labels)
self.name = name
self.id = int(id)
if endIdxs is not None:
self.endIdxs = np.asarray(endIdxs, dtype=np.int)
self.subseqLength = None
elif subseqLength:
self.endIdxs = self.startIdxs + subseqLength
self.subseqLength = subseqLength
else:
raise ValueError("Either endIdxs or subseqLength must be specified!")
if labels is None or len(labels) == 0:
self.labels = np.zeros(len(startIdxs), dtype=np.int) + DEFAULT_LABEL
if startIdxs is not None and endIdxs is not None:
# equal lengths
nStart, nEnd = len(startIdxs), len(endIdxs)
if nStart != nEnd:
raise ValueError("Number of start indices must equal number"
"of end indices! {0} != {1}".format(nStart, nEnd))
# starts before or equal to ends
violators = np.where(startIdxs > endIdxs)[0]
if np.any(violators):
raise ValueError("Some start indices exceed end indices!"
"Violators at {}".format(str(violators)))
# valid indices
violators = np.where(startIdxs < 0)[0]
if np.any(violators):
raise ValueError("Some start indices < 0!"
"Violators at {}".format(str(violators)))
violators = np.where(endIdxs > len(data))[0]
if np.any(violators):
violatorValues = endIdxs[violators]
raise ValueError("Some end indices > length of data {}! "
"Violators {} at {}".format(len(data),
str(violatorValues), str(violators)))
def clone(self):
return LabeledTimeSeries(np.copy(self.data),
np.copy(self.startIdxs),
np.copy(self.endIdxs),
subseqLength=self.subseqLength,
labels=np.copy(self.labels),
name=self.name,
id=self.id
)
def plot(self, saveDir=None, capYLim=1000, ax=None, staggerHeights=True,
yFrac=.9, showBounds=True, showLabels=True, useWhichLabels=None,
linewidths=2., colors=None, **plotRectKwargs):
xlimits = [0, len(self.data)]
ylimits = [self.data.min(), min(capYLim, self.data.max())]
yMin, yMax = ylimits
if ax is None:
plt.figure(figsize=(10, 6))
ax = plt.gca()
if not hasattr(linewidths, '__len__'):
linewidths = np.zeros(self.data.shape[1]) + linewidths
hasColors = colors is not None and len(colors)
for i in range(self.data.shape[1]):
if hasColors:
ax.plot(self.data[:, i], lw=linewidths[i], color=colors[i])
else:
ax.plot(self.data[:, i], lw=linewidths[i])
ax.set_xlim(xlimits)
ax.set_ylim(ylimits)
ax.set_title(self.name)
hasUseWhichLabels = useWhichLabels is not None and len(useWhichLabels)
# plot annotations
if showLabels or showBounds:
for i in range(len(self.startIdxs)):
ts, te, label = self.startIdxs[i], self.endIdxs[i], self.labels[i]
# print "label, useWhichLabels", label,
if hasUseWhichLabels and label not in useWhichLabels:
continue
if showBounds:
plotRect(ax, ts, te, **plotRectKwargs) # show boundaries
if showLabels:
x = ts + (te - ts) / 10
if staggerHeights: # so labels don't end up on top of one another
yFrac = .67
yFrac += .04 * (i // 1 % 2)
yFrac += .08 * (i // 2 % 2)
yFrac += .16 * (i // 4 % 2)
y = yFrac * (yMax - yMin) + yMin # use yFrac passed in if not staggering
ax.annotate(label, xy=(x, y), xycoords='data')
if saveDir:
fileName = self.name + '.pdf'
if not os.path.exists(saveDir):
os.makedirs(saveDir)
path = os.path.join(saveDir, fileName)
plt.savefig(path)
return ax
def plotSubseqs(self, saveDir, **kwargs):
generateVideos(self.data, dataName=self.name, saveInDir=saveDir,
rangeStartIdxs=self.startIdxs, rangeEndIdxs=self.endIdxs,
rangeLabels=self.labels, **kwargs)
# ================================================================ Main
if __name__ == '__main__':
from doctest import testmod
testmod()
|
mit
|
tgquintela/pySpatialTools
|
pySpatialTools/Discretization/Discretization_nd/sklearn_discretization.py
|
1
|
1654
|
"""
sklearnlikediscretization
-------------------------
['n_dim', 'metric', 'format_']
required_f = ['_compute_limits', '_compute_contiguity_geom',
'_map_loc2regionid', '_map_regionid2regionlocs']
"""
from ..metricdiscretizor import BaseMetricDiscretizor
class SklearnDisc(BaseMetricDiscretizor):
"""
"""
def __init__(self, clf, limits):
self._preformat(clf)
self._initialization()
self._format_clf(clf)
self.limits = limits
def _format_clf(self, clf):
"""Format clf to be used as discretizor."""
if not "predict" in dir(clf):
raise TypeError("Incorrect sklearn cluster method.")
self.clf = clf
def _map_regionid2regionlocs(self, region_id):
pass
def _map_loc2regionid(self, locations):
"""Discretize locations returning their region_id.
Parameters
----------
locations: numpy.ndarray
the locations for which we want to obtain their region given that
discretization.
Returns
-------
regions_id: numpy.ndarray
the region_id of each location for this discretization.
"""
return self.clf.predict(locations)
def _compute_contiguity_geom(self, limits):
"Compute which regions are contiguous and returns a graph."
## TODO:
raise Exception("Not implemented function yet.")
## Obtain random points around all the r_points
## Compute the two nearest points with different region_id
## Remove repeated pairs
return
def _compute_limits(self):
pass
|
mit
|
owaiskhan/Retransmission-Combining
|
gnuradio-examples/python/pfb/chirp_channelize.py
|
7
|
6936
|
#!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 200000 # number of samples to use
self._fs = 9000 # initial sampling rate
self._M = 9 # Number of channels to channelize
# Create a set of taps for the PFB channelizer
self._taps = gr.firdes.low_pass_2(1, self._fs, 500, 20,
attenuation_dB=10, window=gr.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
repeated = True
if(repeated):
self.vco_input = gr.sig_source_f(self._fs, gr.GR_SIN_WAVE, 0.25, 110)
else:
amp = 100
data = scipy.arange(0, amp, amp/float(self._N))
self.vco_input = gr.vector_source_f(data, False)
# Build a VCO controlled by either the sinusoid or single chirp tone
# Then convert this to a complex signal
self.vco = gr.vco_f(self._fs, 225, 1)
self.f2c = gr.float_to_complex()
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = blks2.pfb_channelizer_ccf(self._M, self._taps)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = gr.vector_sink_c()
# Connect the blocks
self.connect(self.vco_input, self.vco, self.f2c)
self.connect(self.f2c, self.head, self.pfb)
self.connect(self.f2c, self.snk_i)
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(gr.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
fig3 = pylab.figure(4, figsize=(16,9), facecolor="w")
Ns = 650
Ne = 20000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs / tb._M
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = freq
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
sp3 = fig3.add_subplot(1,1,1)
p3 = sp3.plot(t_o, x_o.real)
sp3.set_xlim([min(t_o), max(t_o)+1])
sp3.set_ylim([-2, 2])
sp3.set_title("All Channels")
sp3.set_xlabel("Time (s)")
sp3.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
bhargav/scikit-learn
|
sklearn/linear_model/stochastic_gradient.py
|
34
|
50761
|
# Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (alpha * (t + t0)) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
|
bsd-3-clause
|
jreback/pandas
|
pandas/tests/plotting/test_series.py
|
2
|
29261
|
""" Test cases for Series.plot """
from datetime import datetime
from itertools import chain
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
import pandas.plotting as plotting
pytestmark = pytest.mark.slow
@td.skip_if_no_mpl
class TestSeriesPlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = "ts"
self.series = tm.makeStringSeries()
self.series.name = "series"
self.iseries = tm.makePeriodSeries()
self.iseries.name = "iseries"
def test_plot(self):
_check_plot_works(self.ts.plot, label="foo")
_check_plot_works(self.ts.plot, use_index=False)
axes = _check_plot_works(self.ts.plot, rot=0)
self._check_ticks_props(axes, xrot=0)
ax = _check_plot_works(self.ts.plot, style=".", logy=True)
self._check_ax_scales(ax, yaxis="log")
ax = _check_plot_works(self.ts.plot, style=".", logx=True)
self._check_ax_scales(ax, xaxis="log")
ax = _check_plot_works(self.ts.plot, style=".", loglog=True)
self._check_ax_scales(ax, xaxis="log", yaxis="log")
_check_plot_works(self.ts[:10].plot.bar)
_check_plot_works(self.ts.plot.area, stacked=False)
_check_plot_works(self.iseries.plot)
for kind in ["line", "bar", "barh", "kde", "hist", "box"]:
_check_plot_works(self.series[:5].plot, kind=kind)
_check_plot_works(self.series[:10].plot.barh)
ax = _check_plot_works(Series(np.random.randn(10)).plot.bar, color="black")
self._check_colors([ax.patches[0]], facecolors=["black"])
# GH 6951
ax = _check_plot_works(self.ts.plot, subplots=True)
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
def test_plot_figsize_and_title(self):
# figsize and title
_, ax = self.plt.subplots()
ax = self.series.plot(title="Test", figsize=(16, 8), ax=ax)
self._check_text_labels(ax.title, "Test")
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))
def test_dont_modify_rcParams(self):
# GH 8242
key = "axes.prop_cycle"
colors = self.plt.rcParams[key]
_, ax = self.plt.subplots()
Series([1, 2, 3]).plot(ax=ax)
assert colors == self.plt.rcParams[key]
def test_ts_line_lim(self):
fig, ax = self.plt.subplots()
ax = self.ts.plot(ax=ax)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data(orig=False)[0][0]
assert xmax >= lines[0].get_data(orig=False)[0][-1]
tm.close()
ax = self.ts.plot(secondary_y=True, ax=ax)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data(orig=False)[0][0]
assert xmax >= lines[0].get_data(orig=False)[0][-1]
def test_ts_area_lim(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.area(stacked=False, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin <= line[0]
assert xmax >= line[-1]
self._check_ticks_props(ax, xrot=0)
tm.close()
# GH 7471
_, ax = self.plt.subplots()
ax = self.ts.plot.area(stacked=False, x_compat=True, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin <= line[0]
assert xmax >= line[-1]
self._check_ticks_props(ax, xrot=30)
tm.close()
tz_ts = self.ts.copy()
tz_ts.index = tz_ts.tz_localize("GMT").tz_convert("CET")
_, ax = self.plt.subplots()
ax = tz_ts.plot.area(stacked=False, x_compat=True, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin <= line[0]
assert xmax >= line[-1]
self._check_ticks_props(ax, xrot=0)
tm.close()
_, ax = self.plt.subplots()
ax = tz_ts.plot.area(stacked=False, secondary_y=True, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin <= line[0]
assert xmax >= line[-1]
self._check_ticks_props(ax, xrot=0)
def test_area_sharey_dont_overwrite(self):
# GH37942
fig, (ax1, ax2) = self.plt.subplots(1, 2, sharey=True)
abs(self.ts).plot(ax=ax1, kind="area")
abs(self.ts).plot(ax=ax2, kind="area")
assert ax1._shared_y_axes.joined(ax1, ax2)
assert ax2._shared_y_axes.joined(ax1, ax2)
def test_label(self):
s = Series([1, 2])
_, ax = self.plt.subplots()
ax = s.plot(label="LABEL", legend=True, ax=ax)
self._check_legend_labels(ax, labels=["LABEL"])
self.plt.close()
_, ax = self.plt.subplots()
ax = s.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=["None"])
self.plt.close()
# get name from index
s.name = "NAME"
_, ax = self.plt.subplots()
ax = s.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=["NAME"])
self.plt.close()
# override the default
_, ax = self.plt.subplots()
ax = s.plot(legend=True, label="LABEL", ax=ax)
self._check_legend_labels(ax, labels=["LABEL"])
self.plt.close()
# Add lebel info, but don't draw
_, ax = self.plt.subplots()
ax = s.plot(legend=False, label="LABEL", ax=ax)
assert ax.get_legend() is None # Hasn't been drawn
ax.legend() # draw it
self._check_legend_labels(ax, labels=["LABEL"])
def test_boolean(self):
# GH 23719
s = Series([False, False, True])
_check_plot_works(s.plot, include_bool=True)
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
_check_plot_works(s.plot)
def test_line_area_nan_series(self):
values = [1, 2, np.nan, 3]
s = Series(values)
ts = Series(values, index=tm.makeDateIndex(k=4))
for d in [s, ts]:
ax = _check_plot_works(d.plot)
masked = ax.lines[0].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked.data, 2), exp)
tm.assert_numpy_array_equal(
masked.mask, np.array([False, False, True, False])
)
expected = np.array([1, 2, 0, 3], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
def test_line_use_index_false(self):
s = Series([1, 2, 3], index=["a", "b", "c"])
s.index.name = "The Index"
_, ax = self.plt.subplots()
ax = s.plot(use_index=False, ax=ax)
label = ax.get_xlabel()
assert label == ""
_, ax = self.plt.subplots()
ax2 = s.plot.bar(use_index=False, ax=ax)
label2 = ax2.get_xlabel()
assert label2 == ""
def test_bar_log(self):
expected = np.array([1e-1, 1e0, 1e1, 1e2, 1e3, 1e4])
_, ax = self.plt.subplots()
ax = Series([200, 500]).plot.bar(log=True, ax=ax)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
_, ax = self.plt.subplots()
ax = Series([200, 500]).plot.barh(log=True, ax=ax)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
tm.close()
# GH 9905
expected = np.array([1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1])
_, ax = self.plt.subplots()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind="bar", ax=ax)
ymin = 0.0007943282347242822
ymax = 0.12589254117941673
res = ax.get_ylim()
tm.assert_almost_equal(res[0], ymin)
tm.assert_almost_equal(res[1], ymax)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
_, ax = self.plt.subplots()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind="barh", ax=ax)
res = ax.get_xlim()
tm.assert_almost_equal(res[0], ymin)
tm.assert_almost_equal(res[1], ymax)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
def test_bar_ignore_index(self):
df = Series([1, 2, 3, 4], index=["a", "b", "c", "d"])
_, ax = self.plt.subplots()
ax = df.plot.bar(use_index=False, ax=ax)
self._check_text_labels(ax.get_xticklabels(), ["0", "1", "2", "3"])
def test_bar_user_colors(self):
s = Series([1, 2, 3, 4])
ax = s.plot.bar(color=["red", "blue", "blue", "red"])
result = [p.get_facecolor() for p in ax.patches]
expected = [
(1.0, 0.0, 0.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
(1.0, 0.0, 0.0, 1.0),
]
assert result == expected
def test_rotation(self):
df = DataFrame(np.random.randn(5, 5))
# Default rot 0
_, ax = self.plt.subplots()
axes = df.plot(ax=ax)
self._check_ticks_props(axes, xrot=0)
_, ax = self.plt.subplots()
axes = df.plot(rot=30, ax=ax)
self._check_ticks_props(axes, xrot=30)
def test_irregular_datetime(self):
from pandas.plotting._matplotlib.converter import DatetimeConverter
rng = date_range("1/1/2000", "3/1/2000")
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ax = ser.plot(ax=ax)
xp = DatetimeConverter.convert(datetime(1999, 1, 1), "", ax)
ax.set_xlim("1/1/1999", "1/1/2001")
assert xp == ax.get_xlim()[0]
self._check_ticks_props(ax, xrot=30)
def test_unsorted_index_xlim(self):
ser = Series(
[0.0, 1.0, np.nan, 3.0, 4.0, 5.0, 6.0],
index=[1.0, 0.0, 3.0, 2.0, np.nan, 3.0, 2.0],
)
_, ax = self.plt.subplots()
ax = ser.plot(ax=ax)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data(orig=False)[0])
assert xmax >= np.nanmax(lines[0].get_data(orig=False)[0])
def test_pie_series(self):
# if sum of values is less than 1.0, pie handle them as rate and draw
# semicircle.
series = Series(
np.random.randint(1, 5), index=["a", "b", "c", "d", "e"], name="YLABEL"
)
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, series.index)
assert ax.get_ylabel() == "YLABEL"
# without wedge labels
ax = _check_plot_works(series.plot.pie, labels=None)
self._check_text_labels(ax.texts, [""] * 5)
# with less colors than elements
color_args = ["r", "g", "b"]
ax = _check_plot_works(series.plot.pie, colors=color_args)
color_expected = ["r", "g", "b", "r", "g"]
self._check_colors(ax.patches, facecolors=color_expected)
# with labels and colors
labels = ["A", "B", "C", "D", "E"]
color_args = ["r", "g", "b", "c", "m"]
ax = _check_plot_works(series.plot.pie, labels=labels, colors=color_args)
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
# with autopct and fontsize
ax = _check_plot_works(
series.plot.pie, colors=color_args, autopct="%.2f", fontsize=7
)
pcts = [f"{s*100:.2f}" for s in series.values / series.sum()]
expected_texts = list(chain.from_iterable(zip(series.index, pcts)))
self._check_text_labels(ax.texts, expected_texts)
for t in ax.texts:
assert t.get_fontsize() == 7
# includes negative value
series = Series([1, 2, 0, 4, -1], index=["a", "b", "c", "d", "e"])
with pytest.raises(ValueError, match="pie plot doesn't allow negative values"):
series.plot.pie()
# includes nan
series = Series([1, 2, np.nan, 4], index=["a", "b", "c", "d"], name="YLABEL")
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, ["a", "b", "", "d"])
def test_pie_nan(self):
s = Series([1, np.nan, 1, 1])
_, ax = self.plt.subplots()
ax = s.plot.pie(legend=True, ax=ax)
expected = ["0", "", "2", "3"]
result = [x.get_text() for x in ax.texts]
assert result == expected
def test_df_series_secondary_legend(self):
# GH 9779
df = DataFrame(np.random.randn(30, 3), columns=list("abc"))
s = Series(np.random.randn(30), name="x")
# primary -> secondary (without passing ax)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
s.plot(legend=True, secondary_y=True, ax=ax)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=["a", "b", "c", "x (right)"])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# primary -> secondary (with passing ax)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=["a", "b", "c", "x (right)"])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (without passing ax)
_, ax = self.plt.subplots()
ax = df.plot(secondary_y=True, ax=ax)
s.plot(legend=True, secondary_y=True, ax=ax)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ["a (right)", "b (right)", "c (right)", "x (right)"]
self._check_legend_labels(ax.left_ax, labels=expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (with passing ax)
_, ax = self.plt.subplots()
ax = df.plot(secondary_y=True, ax=ax)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ["a (right)", "b (right)", "c (right)", "x (right)"]
self._check_legend_labels(ax.left_ax, expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (with passing ax)
_, ax = self.plt.subplots()
ax = df.plot(secondary_y=True, mark_right=False, ax=ax)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ["a", "b", "c", "x (right)"]
self._check_legend_labels(ax.left_ax, expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
@pytest.mark.parametrize(
"input_logy, expected_scale", [(True, "log"), ("sym", "symlog")]
)
def test_secondary_logy(self, input_logy, expected_scale):
# GH 25545
s1 = Series(np.random.randn(30))
s2 = Series(np.random.randn(30))
# GH 24980
ax1 = s1.plot(logy=input_logy)
ax2 = s2.plot(secondary_y=True, logy=input_logy)
assert ax1.get_yscale() == expected_scale
assert ax2.get_yscale() == expected_scale
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(np.random.randn(2))
_, ax = self.plt.subplots()
msg = (
"Cannot pass 'style' string with a color symbol and 'color' keyword "
"argument. Please use one or the other or pass 'style' without a color "
"symbol"
)
with pytest.raises(ValueError, match=msg):
x.plot(style="k--", color="k", ax=ax)
@td.skip_if_no_scipy
def test_kde_kwargs(self):
sample_points = np.linspace(-100, 100, 20)
_check_plot_works(self.ts.plot.kde, bw_method="scott", ind=20)
_check_plot_works(self.ts.plot.kde, bw_method=None, ind=20)
_check_plot_works(self.ts.plot.kde, bw_method=None, ind=np.int_(20))
_check_plot_works(self.ts.plot.kde, bw_method=0.5, ind=sample_points)
_check_plot_works(self.ts.plot.density, bw_method=0.5, ind=sample_points)
_, ax = self.plt.subplots()
ax = self.ts.plot.kde(logy=True, bw_method=0.5, ind=sample_points, ax=ax)
self._check_ax_scales(ax, yaxis="log")
self._check_text_labels(ax.yaxis.get_label(), "Density")
@td.skip_if_no_scipy
def test_kde_missing_vals(self):
s = Series(np.random.uniform(size=50))
s[0] = np.nan
axes = _check_plot_works(s.plot.kde)
# gh-14821: check if the values have any missing values
assert any(~np.isnan(axes.lines[0].get_xdata()))
def test_boxplot_series(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.box(logy=True, ax=ax)
self._check_ax_scales(ax, yaxis="log")
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [self.ts.name])
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [""] * len(ylabels))
def test_kind_both_ways(self):
s = Series(range(3))
kinds = (
plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds
)
for kind in kinds:
_, ax = self.plt.subplots()
s.plot(kind=kind, ax=ax)
self.plt.close()
_, ax = self.plt.subplots()
getattr(s.plot, kind)()
self.plt.close()
def test_invalid_plot_data(self):
s = Series(list("abcd"))
_, ax = self.plt.subplots()
for kind in plotting.PlotAccessor._common_kinds:
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
s.plot(kind=kind, ax=ax)
def test_valid_object_plot(self):
s = Series(range(10), dtype=object)
for kind in plotting.PlotAccessor._common_kinds:
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(["a", "b", 1.0, 2])
_, ax = self.plt.subplots()
for kind in plotting.PlotAccessor._common_kinds:
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
s.plot(kind=kind, ax=ax)
def test_invalid_kind(self):
s = Series([1, 2])
with pytest.raises(ValueError, match="invalid_kind is not a valid plot kind"):
s.plot(kind="invalid_kind")
def test_dup_datetime_index_plot(self):
dr1 = date_range("1/1/2009", periods=4)
dr2 = date_range("1/2/2009", periods=4)
index = dr1.append(dr2)
values = np.random.randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
def test_errorbar_asymmetrical(self):
# GH9536
s = Series(np.arange(10), name="x")
err = np.random.rand(2, 10)
ax = s.plot(yerr=err, xerr=err)
result = np.vstack([i.vertices[:, 1] for i in ax.collections[1].get_paths()])
expected = (err.T * np.array([-1, 1])) + s.to_numpy().reshape(-1, 1)
tm.assert_numpy_array_equal(result, expected)
msg = (
"Asymmetrical error bars should be provided "
f"with the shape \\(2, {len(s)}\\)"
)
with pytest.raises(ValueError, match=msg):
s.plot(yerr=np.random.rand(2, 11))
tm.close()
def test_errorbar_plot(self):
s = Series(np.arange(10), name="x")
s_err = np.random.randn(10)
d_err = DataFrame(np.random.randn(10, 2), index=s.index, columns=["x", "y"])
# test line and bar plots
kinds = ["line", "bar"]
for kind in kinds:
ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=1, yerr=1)
ax = _check_plot_works(s.plot, xerr=s_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
# test time series plotting
ix = date_range("1/1/2000", "1/1/2001", freq="M")
ts = Series(np.arange(12), index=ix, name="x")
ts_err = Series(np.random.randn(12), index=ix)
td_err = DataFrame(np.random.randn(12, 2), index=ix, columns=["x", "y"])
ax = _check_plot_works(ts.plot, yerr=ts_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(ts.plot, yerr=td_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
# check incorrect lengths and types
with tm.external_error_raised(ValueError):
s.plot(yerr=np.arange(11))
s_err = ["zzz"] * 10
with tm.external_error_raised(TypeError):
s.plot(yerr=s_err)
def test_table(self):
_check_plot_works(self.series.plot, table=True)
_check_plot_works(self.series.plot, table=self.series)
def test_series_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(
Series([1, 2, 3]),
plotting.PlotAccessor._series_kinds + plotting.PlotAccessor._common_kinds,
)
def test_standard_colors(self):
from pandas.plotting._matplotlib.style import get_standard_colors
for c in ["r", "red", "green", "#FF0000"]:
result = get_standard_colors(1, color=c)
assert result == [c]
result = get_standard_colors(1, color=[c])
assert result == [c]
result = get_standard_colors(3, color=c)
assert result == [c] * 3
result = get_standard_colors(3, color=[c])
assert result == [c] * 3
def test_standard_colors_all(self):
import matplotlib.colors as colors
from pandas.plotting._matplotlib.style import get_standard_colors
# multiple colors like mediumaquamarine
for c in colors.cnames:
result = get_standard_colors(num_colors=1, color=c)
assert result == [c]
result = get_standard_colors(num_colors=1, color=[c])
assert result == [c]
result = get_standard_colors(num_colors=3, color=c)
assert result == [c] * 3
result = get_standard_colors(num_colors=3, color=[c])
assert result == [c] * 3
# single letter colors like k
for c in colors.ColorConverter.colors:
result = get_standard_colors(num_colors=1, color=c)
assert result == [c]
result = get_standard_colors(num_colors=1, color=[c])
assert result == [c]
result = get_standard_colors(num_colors=3, color=c)
assert result == [c] * 3
result = get_standard_colors(num_colors=3, color=[c])
assert result == [c] * 3
def test_series_plot_color_kwargs(self):
# GH1890
_, ax = self.plt.subplots()
ax = Series(np.arange(12) + 1).plot(color="green", ax=ax)
self._check_colors(ax.get_lines(), linecolors=["green"])
def test_time_series_plot_color_kwargs(self):
# #1890
_, ax = self.plt.subplots()
ax = Series(np.arange(12) + 1, index=date_range("1/1/2000", periods=12)).plot(
color="green", ax=ax
)
self._check_colors(ax.get_lines(), linecolors=["green"])
def test_time_series_plot_color_with_empty_kwargs(self):
import matplotlib as mpl
def_colors = self._unpack_cycler(mpl.rcParams)
index = date_range("1/1/2000", periods=12)
s = Series(np.arange(1, 13), index=index)
ncolors = 3
_, ax = self.plt.subplots()
for i in range(ncolors):
ax = s.plot(ax=ax)
self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors])
def test_xticklabels(self):
# GH11529
s = Series(np.arange(10), index=[f"P{i:02d}" for i in range(10)])
_, ax = self.plt.subplots()
ax = s.plot(xticks=[0, 3, 5, 9], ax=ax)
exp = [f"P{i:02d}" for i in [0, 3, 5, 9]]
self._check_text_labels(ax.get_xticklabels(), exp)
def test_xtick_barPlot(self):
# GH28172
s = Series(range(10), index=[f"P{i:02d}" for i in range(10)])
ax = s.plot.bar(xticks=range(0, 11, 2))
exp = np.array(list(range(0, 11, 2)))
tm.assert_numpy_array_equal(exp, ax.get_xticks())
def test_custom_business_day_freq(self):
# GH7222
from pandas.tseries.offsets import CustomBusinessDay
s = Series(
range(100, 121),
index=pd.bdate_range(
start="2014-05-01",
end="2014-06-01",
freq=CustomBusinessDay(holidays=["2014-05-26"]),
),
)
_check_plot_works(s.plot)
@pytest.mark.xfail
def test_plot_accessor_updates_on_inplace(self):
s = Series([1, 2, 3, 4])
_, ax = self.plt.subplots()
ax = s.plot(ax=ax)
before = ax.xaxis.get_ticklocs()
s.drop([0, 1], inplace=True)
_, ax = self.plt.subplots()
after = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(before, after)
@pytest.mark.parametrize("kind", ["line", "area"])
def test_plot_xlim_for_series(self, kind):
# test if xlim is also correctly plotted in Series for line and area
# GH 27686
s = Series([2, 3])
_, ax = self.plt.subplots()
s.plot(kind=kind, ax=ax)
xlims = ax.get_xlim()
assert xlims[0] < 0
assert xlims[1] > 1
def test_plot_no_rows(self):
# GH 27758
df = Series(dtype=int)
assert df.empty
ax = df.plot()
assert len(ax.get_lines()) == 1
line = ax.get_lines()[0]
assert len(line.get_xdata()) == 0
assert len(line.get_ydata()) == 0
def test_plot_no_numeric_data(self):
df = Series(["a", "b", "c"])
with pytest.raises(TypeError, match="no numeric data to plot"):
df.plot()
def test_style_single_ok(self):
s = Series([1, 2])
ax = s.plot(style="s", color="C3")
assert ax.lines[0].get_color() == "C3"
@pytest.mark.parametrize(
"index_name, old_label, new_label",
[(None, "", "new"), ("old", "old", "new"), (None, "", "")],
)
@pytest.mark.parametrize("kind", ["line", "area", "bar"])
def test_xlabel_ylabel_series(self, kind, index_name, old_label, new_label):
# GH 9093
ser = Series([1, 2, 3, 4])
ser.index.name = index_name
# default is the ylabel is not shown and xlabel is index name
ax = ser.plot(kind=kind)
assert ax.get_ylabel() == ""
assert ax.get_xlabel() == old_label
# old xlabel will be overriden and assigned ylabel will be used as ylabel
ax = ser.plot(kind=kind, ylabel=new_label, xlabel=new_label)
assert ax.get_ylabel() == new_label
assert ax.get_xlabel() == new_label
@pytest.mark.parametrize(
"index",
[
pd.timedelta_range(start=0, periods=2, freq="D"),
[pd.Timedelta(days=1), pd.Timedelta(days=2)],
],
)
def test_timedelta_index(self, index):
# GH37454
xlims = (3, 1)
ax = Series([1, 2], index=index).plot(xlim=(xlims))
assert ax.get_xlim() == (3, 1)
|
bsd-3-clause
|
andyraib/data-storage
|
python_scripts/env/lib/python3.6/site-packages/pandas/tests/test_msgpack/test_extension.py
|
9
|
2160
|
from __future__ import print_function
import array
import pandas.msgpack as msgpack
from pandas.msgpack import ExtType
def test_pack_ext_type():
def p(s):
packer = msgpack.Packer()
packer.pack_ext_type(0x42, s)
return packer.bytes()
assert p(b'A') == b'\xd4\x42A' # fixext 1
assert p(b'AB') == b'\xd5\x42AB' # fixext 2
assert p(b'ABCD') == b'\xd6\x42ABCD' # fixext 4
assert p(b'ABCDEFGH') == b'\xd7\x42ABCDEFGH' # fixext 8
assert p(b'A' * 16) == b'\xd8\x42' + b'A' * 16 # fixext 16
assert p(b'ABC') == b'\xc7\x03\x42ABC' # ext 8
assert p(b'A' * 0x0123) == b'\xc8\x01\x23\x42' + b'A' * 0x0123 # ext 16
assert (p(b'A' * 0x00012345) ==
b'\xc9\x00\x01\x23\x45\x42' + b'A' * 0x00012345) # ext 32
def test_unpack_ext_type():
def check(b, expected):
assert msgpack.unpackb(b) == expected
check(b'\xd4\x42A', ExtType(0x42, b'A')) # fixext 1
check(b'\xd5\x42AB', ExtType(0x42, b'AB')) # fixext 2
check(b'\xd6\x42ABCD', ExtType(0x42, b'ABCD')) # fixext 4
check(b'\xd7\x42ABCDEFGH', ExtType(0x42, b'ABCDEFGH')) # fixext 8
check(b'\xd8\x42' + b'A' * 16, ExtType(0x42, b'A' * 16)) # fixext 16
check(b'\xc7\x03\x42ABC', ExtType(0x42, b'ABC')) # ext 8
check(b'\xc8\x01\x23\x42' + b'A' * 0x0123,
ExtType(0x42, b'A' * 0x0123)) # ext 16
check(b'\xc9\x00\x01\x23\x45\x42' + b'A' * 0x00012345,
ExtType(0x42, b'A' * 0x00012345)) # ext 32
def test_extension_type():
def default(obj):
print('default called', obj)
if isinstance(obj, array.array):
typecode = 123 # application specific typecode
data = obj.tostring()
return ExtType(typecode, data)
raise TypeError("Unknwon type object %r" % (obj, ))
def ext_hook(code, data):
print('ext_hook called', code, data)
assert code == 123
obj = array.array('d')
obj.fromstring(data)
return obj
obj = [42, b'hello', array.array('d', [1.1, 2.2, 3.3])]
s = msgpack.packb(obj, default=default)
obj2 = msgpack.unpackb(s, ext_hook=ext_hook)
assert obj == obj2
|
apache-2.0
|
StudTeam6/competition
|
sw/misc/attitude_reference/pat/utils.py
|
42
|
6283
|
#
# Copyright 2013-2014 Antoine Drouin ([email protected])
#
# This file is part of PAT.
#
# PAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PAT. If not, see <http://www.gnu.org/licenses/>.
#
"""
Utility functions
"""
import math
import numpy as np
import numpy.linalg as linalg
import pdb
"""
Unit convertions
"""
def rad_of_deg(d):
return d / 180. * math.pi
def sqrad_of_sqdeg(d):
return d / (180. * math.pi) ** 2
def deg_of_rad(r):
return r * 180. / math.pi
def sqdeg_of_sqrad(r):
return r * (180. / math.pi) ** 2
def rps_of_rpm(r):
return r * 2. * math.pi / 60.
def rpm_of_rps(r):
return r / 2. / math.pi * 60.
# http://en.wikipedia.org/wiki/Nautical_mile
def m_of_NM(nm):
return nm * 1852.
def NM_of_m(m):
return m / 1852.
# http://en.wikipedia.org/wiki/Knot_(speed)
def mps_of_kt(kt):
return kt * 0.514444
def kt_of_mps(mps):
return mps / 0.514444
# http://en.wikipedia.org/wiki/Foot_(unit)
def m_of_ft(ft):
return ft * 0.3048
def ft_of_m(m):
return m / 0.3048
# feet per minute to/from meters per second
def ftpm_of_mps(mps):
return mps * 60. * 3.28084
def mps_of_ftpm(ftpm):
return ftpm / 60. / 3.28084
"""
Cliping
"""
def norm_angle_0_2pi(a):
while a > 2. * math.pi:
a -= 2. * math.pi
while a <= 0:
a += 2. * math.pi
return a
def norm_angle_mpi_pi(a):
while a > math.pi:
a -= 2. * math.pi
while a <= -math.pi:
a += 2. * math.pi
return a
#
def saturate(_v, _min, _max):
if _v < _min:
return _min
if _v > _max:
return _max
return _v
"""
Plotting
"""
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
my_title_spec = {'color': 'k', 'fontsize': 20}
def save_if(filename):
if filename: matplotlib.pyplot.savefig(filename, dpi=80)
def prepare_fig(fig=None, window_title=None, figsize=(20.48, 10.24), margins=None):
if fig is None:
fig = plt.figure(figsize=figsize)
# else:
# plt.figure(fig.number)
if margins:
left, bottom, right, top, wspace, hspace = margins
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
hspace=hspace, wspace=wspace)
if window_title:
fig.canvas.set_window_title(window_title)
return fig
def decorate(ax, title=None, xlab=None, ylab=None, legend=None, xlim=None, ylim=None):
ax.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
ax.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
if xlab:
ax.xaxis.set_label_text(xlab)
if ylab:
ax.yaxis.set_label_text(ylab)
if title:
ax.set_title(title, my_title_spec)
if legend is not None:
ax.legend(legend, loc='best')
if xlim is not None:
ax.set_xlim(xlim[0], xlim[1])
if ylim is not None:
ax.set_ylim(ylim[0], ylim[1])
def ensure_ylim(ax, yspan):
ymin, ymax = ax.get_ylim()
if ymax - ymin < yspan:
ym = (ymin + ymax) / 2
ax.set_ylim(ym - yspan / 2, ym + yspan / 2)
def write_text(nrows, ncols, plot_number, text, colspan=1, loc=[[0.5, 9.7]], filename=None):
# ax = plt.subplot(nrows, ncols, plot_number)
gs = gridspec.GridSpec(nrows, ncols)
row, col = divmod(plot_number - 1, ncols)
ax = plt.subplot(gs[row, col:col + colspan])
plt.axis([0, 10, 0, 10])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
for i in range(0, len(text)):
plt.text(loc[i][0], loc[i][1], text[i], ha='left', va='top')
save_if(filename)
def plot_in_grid(time, plots, ncol, figure=None, window_title="None", legend=None, filename=None,
margins=(0.04, 0.08, 0.93, 0.96, 0.20, 0.34)):
nrow = math.ceil(len(plots) / float(ncol))
figsize = (10.24 * ncol, 2.56 * nrow)
figure = prepare_fig(figure, window_title, figsize=figsize, margins=margins)
# pdb.set_trace()
for i, (title, ylab, data) in enumerate(plots):
ax = figure.add_subplot(nrow, ncol, i + 1)
ax.plot(time, data)
decorate(ax, title=title, ylab=ylab)
if legend is not None:
ax.legend(legend, loc='best')
save_if(filename)
return figure
"""
Misc
"""
def num_jacobian(X, U, P, dyn):
s_size = len(X)
i_size = len(U)
epsilonX = (0.1 * np.ones(s_size)).tolist()
dX = np.diag(epsilonX)
A = np.zeros((s_size, s_size))
for i in range(0, s_size):
dx = dX[i, :]
delta_f = dyn(X + dx / 2, 0, U, P) - dyn(X - dx / 2, 0, U, P)
delta_f = delta_f / dx[i]
# print delta_f
A[:, i] = delta_f
epsilonU = (0.1 * np.ones(i_size)).tolist()
dU = np.diag(epsilonU)
B = np.zeros((s_size, i_size))
for i in range(0, i_size):
du = dU[i, :]
delta_f = dyn(X, 0, U + du / 2, P) - dyn(X, 0, U - du / 2, P)
delta_f = delta_f / du[i]
B[:, i] = delta_f
return A, B
def saturate(V, Sats):
Vsat = np.array(V)
for i in range(0, len(V)):
if Vsat[i] < Sats[i, 0]:
Vsat[i] = Sats[i, 0]
elif Vsat[i] > Sats[i, 1]:
Vsat[i] = Sats[i, 1]
return Vsat
def print_lti_dynamics(A, B, txt=None, print_original_form=False, print_modal_form=False):
if txt:
print txt
if print_original_form:
print "A\n", A
print "B\n", B
w, M = np.linalg.eig(A)
print "modes \n", w
if print_modal_form:
# print "eigen vectors\n", M
# invM = np.linalg.inv(M)
# print "invM\n", invM
# Amod = np.dot(np.dot(invM, A), M)
# print "Amod\n", Amod
for i in range(len(w)):
print w[i], "->", M[:, i]
|
gpl-2.0
|
great-expectations/great_expectations
|
great_expectations/expectations/core/expect_column_values_to_be_between.py
|
1
|
8965
|
from typing import Optional
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.expectations.util import render_evaluation_parameter_string
from ...render.renderer.renderer import renderer
from ...render.types import RenderedStringTemplateContent
from ...render.util import (
handle_strict_min_max,
num_to_str,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
from ..expectation import ColumnMapExpectation, InvalidExpectationConfigurationError
class ExpectColumnValuesToBeBetween(ColumnMapExpectation):
"""Expect column entries to be between a minimum value and a maximum value (inclusive).
expect_column_values_to_be_between is a \
:func:`column_map_expectation <great_expectations.execution_engine.execution_engine.MetaExecutionEngine
.column_map_expectation>`.
Args:
column (str): \
The column name.
min_value (comparable type or None): The minimum value for a column entry.
max_value (comparable type or None): The maximum value for a column entry.
Keyword Args:
strict_min (boolean):
If True, values must be strictly larger than min_value, default=False
strict_max (boolean):
If True, values must be strictly smaller than max_value, default=False
allow_cross_type_comparisons (boolean or None) : If True, allow comparisons between types (e.g. integer and\
string). Otherwise, attempting such comparisons will raise an exception.
parse_strings_as_datetimes (boolean or None) : If True, parse min_value, max_value, and all non-null column\
values to datetimes before making comparisons.
output_strftime_format (str or None): \
A valid strfime format for datetime output. Only used if parse_strings_as_datetimes=True.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound, and there is no minimum value checked.
* If max_value is None, then min_value is treated as a lower bound, and there is no maximum value checked.
See Also:
:func:`expect_column_value_lengths_to_be_between \
<great_expectations.execution_engine.execution_engine.ExecutionEngine
.expect_column_value_lengths_to_be_between>`
"""
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "production",
"package": "great_expectations",
"tags": ["core expectation", "column map expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
}
map_metric = "column_values.between"
success_keys = (
"min_value",
"max_value",
"strict_min",
"strict_max",
"allow_cross_type_comparisons",
"mostly",
"parse_strings_as_datetimes",
)
default_kwarg_values = {
"row_condition": None,
"condition_parser": None, # we expect this to be explicitly set whenever a row_condition is passed
"mostly": 1,
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False, # tolerance=1e-9,
"parse_strings_as_datetimes": None,
"allow_cross_type_comparisons": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
"meta": None,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
neccessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
# Setting up a configuration
super().validate_configuration(configuration)
min_val = None
max_val = None
if "min_value" in configuration.kwargs:
min_val = configuration.kwargs["min_value"]
if "max_value" in configuration.kwargs:
max_val = configuration.kwargs["max_value"]
assert (
min_val is not None or max_val is not None
), "min_value and max_value cannot both be None"
self.validate_metric_value_between_configuration(configuration=configuration)
# NOTE: This method is a pretty good example of good usage of `params`.
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"min_value",
"max_value",
"mostly",
"row_condition",
"condition_parser",
"strict_min",
"strict_max",
],
)
template_str = ""
if (params["min_value"] is None) and (params["max_value"] is None):
template_str += "may have any numerical value."
else:
at_least_str, at_most_str = handle_strict_min_max(params)
mostly_str = ""
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
mostly_str = ", at least $mostly_pct % of the time"
if params["min_value"] is not None and params["max_value"] is not None:
template_str += f"values must be {at_least_str} $min_value and {at_most_str} $max_value{mostly_str}."
elif params["min_value"] is None:
template_str += f"values must be {at_most_str} $max_value{mostly_str}."
elif params["max_value"] is None:
template_str += f"values must be {at_least_str} $min_value{mostly_str}."
if include_column_name:
template_str = "$column " + template_str
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
|
apache-2.0
|
rollend/trading-with-python
|
lib/csvDatabase.py
|
77
|
6045
|
# -*- coding: utf-8 -*-
"""
intraday data handlers in csv format.
@author: jev
"""
from __future__ import division
import pandas as pd
import datetime as dt
import os
from extra import ProgressBar
dateFormat = "%Y%m%d" # date format for converting filenames to dates
dateTimeFormat = "%Y%m%d %H:%M:%S"
def fileName2date(fName):
'''convert filename to date'''
name = os.path.splitext(fName)[0]
return dt.datetime.strptime(name.split('_')[1],dateFormat).date()
def parseDateTime(dateTimeStr):
return dt.datetime.strptime(dateTimeStr,dateTimeFormat)
def loadCsv(fName):
''' load DataFrame from csv file '''
with open(fName,'r') as f:
lines = f.readlines()
dates= []
header = [h.strip() for h in lines[0].strip().split(',')[1:]]
data = [[] for i in range(len(header))]
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(parseDateTime(fields[0]))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
return pd.DataFrame(data=dict(zip(header,data)),index=pd.Index(dates))
class HistDataCsv(object):
'''class for working with historic database in .csv format'''
def __init__(self,symbol,dbDir,autoCreateDir=False):
self.symbol = symbol
self.dbDir = os.path.normpath(os.path.join(dbDir,symbol))
if not os.path.exists(self.dbDir) and autoCreateDir:
print 'Creating data directory ', self.dbDir
os.mkdir(self.dbDir)
self.dates = []
for fName in os.listdir(self.dbDir):
self.dates.append(fileName2date(fName))
def saveData(self,date, df,lowerCaseColumns=True):
''' add data to database'''
if lowerCaseColumns: # this should provide consistency to column names. All lowercase
df.columns = [ c.lower() for c in df.columns]
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
dest = os.path.join(self.dbDir,s) # full path destination
print 'Saving data to: ', dest
df.to_csv(dest)
def loadDate(self,date):
''' load data '''
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
df = pd.DataFrame.from_csv(os.path.join(self.dbDir,s))
cols = [col.strip() for col in df.columns.tolist()]
df.columns = cols
#df = loadCsv(os.path.join(self.dbDir,s))
return df
def loadDates(self,dates):
''' load multiple dates, concantenating to one DataFrame '''
tmp =[]
print 'Loading multiple dates for ' , self.symbol
p = ProgressBar(len(dates))
for i,date in enumerate(dates):
tmp.append(self.loadDate(date))
p.animate(i+1)
print ''
return pd.concat(tmp)
def createOHLC(self):
''' create ohlc from intraday data'''
ohlc = pd.DataFrame(index=self.dates, columns=['open','high','low','close'])
for date in self.dates:
print 'Processing', date
try:
df = self.loadDate(date)
ohlc.set_value(date,'open',df['open'][0])
ohlc.set_value(date,'high',df['wap'].max())
ohlc.set_value(date,'low', df['wap'].min())
ohlc.set_value(date,'close',df['close'][-1])
except Exception as e:
print 'Could not convert:', e
return ohlc
def __repr__(self):
return '{symbol} dataset with {nrDates} days of data'.format(symbol=self.symbol, nrDates=len(self.dates))
class HistDatabase(object):
''' class working with multiple symbols at once '''
def __init__(self, dataDir):
# get symbols from directory names
symbols = []
for l in os.listdir(dataDir):
if os.path.isdir(os.path.join(dataDir,l)):
symbols.append(l)
#build dataset
self.csv = {} # dict of HistDataCsv halndlers
for symbol in symbols:
self.csv[symbol] = HistDataCsv(symbol,dataDir)
def loadDates(self,dates=None):
'''
get data for all symbols as wide panel
provide a dates list. If no dates list is provided, common dates are used.
'''
if dates is None: dates=self.commonDates
tmp = {}
for k,v in self.csv.iteritems():
tmp[k] = v.loadDates(dates)
return pd.WidePanel(tmp)
def toHDF(self,dataFile,dates=None):
''' write wide panel data to a hdfstore file '''
if dates is None: dates=self.commonDates
store = pd.HDFStore(dataFile)
wp = self.loadDates(dates)
store['data'] = wp
store.close()
@property
def commonDates(self):
''' return dates common for all symbols '''
t = [v.dates for v in self.csv.itervalues()] # get all dates in a list
d = list(set(t[0]).intersection(*t[1:]))
return sorted(d)
def __repr__(self):
s = '-----Hist CSV Database-----\n'
for k,v in self.csv.iteritems():
s+= (str(v)+'\n')
return s
#--------------------
if __name__=='__main__':
dbDir =os.path.normpath('D:/data/30sec')
vxx = HistDataCsv('VXX',dbDir)
spy = HistDataCsv('SPY',dbDir)
#
date = dt.date(2012,8,31)
print date
#
pair = pd.DataFrame({'SPY':spy.loadDate(date)['close'],'VXX':vxx.loadDate(date)['close']})
print pair.tail()
|
bsd-3-clause
|
thientu/scikit-learn
|
examples/svm/plot_svm_anova.py
|
250
|
2000
|
"""
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
peterfpeterson/mantid
|
qt/applications/workbench/workbench/plotting/propertiesdialog.py
|
3
|
16767
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2017 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
# 3rdparty imports
from mantid.plots.datafunctions import update_colorbar_scale, get_images_from_figure
from mantidqt.plotting.figuretype import FigureType, figure_type
from mantidqt.utils.qt import load_ui
from matplotlib.colors import LogNorm, Normalize
from matplotlib.ticker import ScalarFormatter, LogFormatterSciNotation
from mpl_toolkits.mplot3d.axes3d import Axes3D
from qtpy.QtGui import QDoubleValidator, QIcon
from qtpy.QtWidgets import QDialog, QWidget
TREAT_LOG_NEGATIVE_VALUES = 'clip'
class PropertiesEditorBase(QDialog):
"""Base class for all dialogs responsible for providing
access to change figure properties by clicking on the canvas"""
def __init__(self, ui_file, canvas):
"""
:param canvas: A reference to the canvas to be updated
"""
super(PropertiesEditorBase, self).__init__()
self.canvas = canvas
self.ui = load_ui(__file__, ui_file, baseinstance=self)
self.ui.buttonBox.accepted.connect(self.on_ok)
self.ui.buttonBox.rejected.connect(self.reject)
self.ui.setWindowIcon(QIcon(':/images/MantidIcon.ico'))
def on_ok(self):
try:
self.changes_accepted()
self.canvas.draw()
except Exception as exc:
# restore canvas and display error
self.error_occurred(exc)
self.canvas.draw()
else:
self.accept()
def changes_accepted(self):
raise NotImplementedError("Derived classes should override changes_accepted()")
def error_occurred(self, exc):
"""Indicates a redraw error occurred. Derived classes should override this
and revert the state of the canvas and display the error
"""
raise NotImplementedError("Derived classes should override error_occurred")
class LabelEditorModel(object):
def __init__(self, label_text):
self.label_text = label_text
class LabelEditor(PropertiesEditorBase):
"""Provides a dialog box to edit a single label"""
def __init__(self, canvas, target):
"""
:param target: A reference to the label being edited
"""
super(LabelEditor, self).__init__('labeleditor.ui', canvas)
self.ui.errors.hide()
self.target = target
self._memento = LabelEditorModel(target.get_text())
self.ui.editor.setText(self._memento.label_text)
def changes_accepted(self):
self.ui.errors.hide()
self.target.set_text(self.ui.editor.text())
def error_occurred(self, exc):
"""
Display errors to user and reset state
:param exc: The exception that occurred
"""
self.target.set_text(self._memento.label_text)
self.ui.errors.setText(str(exc).strip())
self.ui.errors.show()
class LegendEditorModel(object):
def __init__(self, label_text):
self.label_text = label_text
class LegendEditor(PropertiesEditorBase):
"""Provides a dialog box to edit a legend label"""
def __init__(self, canvas, target, target_curve):
"""
:param target: A reference to the label being edited
:param target_curve: A reference to the curve whose legend is being edited
"""
super().__init__('labeleditor.ui', canvas)
self.ui.errors.hide()
self.target = target
self.target_curve = target_curve
self._memento = LegendEditorModel(target.get_text())
self.ui.editor.setText(self._memento.label_text)
def changes_accepted(self):
self.ui.errors.hide()
self.target.set_text(self.ui.editor.text())
self.target_curve.set_label(self.ui.editor.text())
def error_occurred(self, exc):
"""
Display errors to user and reset state
:param exc: The exception that occurred
"""
self.target.set_text(self._memento.label_text)
self.ui.errors.setText(str(exc).strip())
self.ui.errors.show()
class AxisEditorModel(object):
min = None
max = None
log = None
grid = None
formatter = None
class AxisEditor(PropertiesEditorBase):
def __init__(self, canvas, axes, axis_id):
"""
:param canvas: A reference to the target canvas
:param axes: The axes object holding the properties to be edited
:param axis_id: A string ID for the axis
"""
super(AxisEditor, self).__init__('axiseditor.ui', canvas)
# suppress errors
self.ui.errors.hide()
# Ensure that only floats can be entered
self.ui.editor_min.setValidator(QDoubleValidator())
self.ui.editor_max.setValidator(QDoubleValidator())
if figure_type(canvas.figure) in [FigureType.Surface, FigureType.Wireframe, FigureType.Mesh]:
self.ui.logBox.hide()
self.ui.gridBox.hide()
self.ui.editor_format.addItem('Decimal Format')
self.ui.editor_format.addItem('Scientific Format')
self.axes = axes
self.axis_id = axis_id
self.lim_getter = getattr(axes, 'get_{}lim'.format(axis_id))
if isinstance(axes, Axes3D):
self.lim_setter = getattr(axes, 'set_{}lim3d'.format(axis_id))
else:
self.lim_setter = getattr(axes, 'set_{}lim'.format(axis_id))
self.scale_setter = getattr(axes, 'set_{}scale'.format(axis_id))
self.nonposkw = 'nonpos' + axis_id
# Grid has no direct accessor from the axes
self.axis = axes.xaxis if axis_id == 'x' else axes.yaxis
def create_model(self):
memento = AxisEditorModel()
self._memento = memento
memento.min, memento.max = getattr(self.axes, 'get_{}lim'.format(self.axis_id))()
memento.log = getattr(self.axes, 'get_{}scale'.format(self.axis_id))() != 'linear'
memento.grid = self.axis.grid_on() if hasattr(self.axis, 'grid_on') else self.axis._major_tick_kw.get('gridOn', False)
if type(self.axis.get_major_formatter()) is ScalarFormatter:
memento.formatter = 'Decimal Format'
elif type(self.axis.get_major_formatter()) is LogFormatterSciNotation:
memento.formatter = 'Scientific Format'
self._fill(memento)
def changes_accepted(self):
self.ui.errors.hide()
# apply properties
axes = self.axes
self.limit_min, self.limit_max = float(self.ui.editor_min.text()), float(self.ui.editor_max.text())
if self.ui.logBox.isChecked():
self.scale_setter('log', **{self.nonposkw: TREAT_LOG_NEGATIVE_VALUES})
self.limit_min, self.limit_max = self._check_log_limits(self.limit_min, self.limit_max)
else:
self.scale_setter('linear')
self.lim_setter(self.limit_min, self.limit_max)
self._set_tick_format()
which = 'both' if hasattr(axes, 'show_minor_gridlines') and axes.show_minor_gridlines else 'major'
axes.grid(self.ui.gridBox.isChecked(), axis=self.axis_id, which=which)
def error_occurred(self, exc):
# revert
self._fill(self._memento)
# show error
self.ui.errors.setText(str(exc).strip())
self.ui.errors.show()
def _fill(self, model):
self.ui.editor_min.setText(str(model.min))
self.ui.editor_max.setText(str(model.max))
self.ui.logBox.setChecked(model.log)
self.ui.gridBox.setChecked(model.grid)
self.ui.editor_format.setCurrentText(model.formatter)
def _check_log_limits(self, editor_min, editor_max):
# Check that the limits from the editor are sensible for a log graph
# These limits are not necessarily in numeric order we have to check both
lim_min, lim_max = self.lim_getter()
if editor_min <= 0:
editor_min = lim_min
if editor_max <= 0:
editor_max = lim_max
return editor_min, editor_max
def _set_tick_format(self):
formatter = self.ui.editor_format.currentText()
if formatter == 'Decimal Format':
fmt = ScalarFormatter(useOffset=True)
elif formatter == 'Scientific Format':
fmt = LogFormatterSciNotation()
getattr(self.axes, 'get_{}axis'.format(self.axis_id))().set_major_formatter(fmt)
return
class XAxisEditor(AxisEditor):
def __init__(self, canvas, axes):
super(XAxisEditor, self).__init__(canvas, axes, 'x')
self.create_model()
class YAxisEditor(AxisEditor):
def __init__(self, canvas, axes):
super(YAxisEditor, self).__init__(canvas, axes, 'y')
self.create_model()
class ZAxisEditor(AxisEditor):
def __init__(self, canvas, axes):
super(ZAxisEditor, self).__init__(canvas, axes, 'z')
self.create_model()
class ColorbarAxisEditor(AxisEditor):
def __init__(self, canvas, axes):
super(ColorbarAxisEditor, self).__init__(canvas, axes, 'y')
self.ui.gridBox.hide()
self.images = []
images = get_images_from_figure(canvas.figure)
# If there are an equal number of plots and colorbars so apply changes to plot with the selected colorbar
# Otherwise apply changes to all the plots in the figure
if len(images) != len(self.canvas.figure.axes) / 2:
self.images = images
else:
# apply changes to selected axes
for img in images:
if img.colorbar and img.colorbar.ax == axes:
self.images.append(img)
self.create_model()
self.ui.editor_format.setEnabled(False)
def changes_accepted(self):
self.ui.errors.hide()
if len(self.images) == 0:
raise RuntimeError("Cannot find any plot linked to this colorbar")
limit_min, limit_max = float(self.ui.editor_min.text()), float(self.ui.editor_max.text())
scale = LogNorm if self.ui.logBox.isChecked() else Normalize
if scale == LogNorm and (limit_min <= 0 or limit_max <= 0):
raise ValueError("Limits must be positive\nwhen scale is logarithmic.")
self.lim_setter(limit_min, limit_max)
for img in self.images:
update_colorbar_scale(self.canvas.figure, img, scale, limit_min, limit_max)
def create_model(self):
memento = AxisEditorModel()
self._memento = memento
if len(self.images) > 0:
memento.min, memento.max = self.images[0].get_clim()
memento.log = isinstance(self.images[0].norm, LogNorm)
memento.grid = False
self._fill(memento)
class MarkerEditor(QWidget):
def __init__(self, filename, valid_style, valid_colors, used_names=None):
"""
Widget to edit a marker properties
:param filename: name of the ui file for this widget
:param valid_style: list of valid line styles (eg. 'solid', 'dashed'...) used by matplotlib
:param valid_colors: dictionary of valid colours
keys = name of the colour
value = corresponding matplotlib name (eg. {'red': 'C4'})
"""
super(MarkerEditor, self).__init__()
self.widget = load_ui(__file__, filename, baseinstance=self)
self.widget.position.setValidator(QDoubleValidator())
self.widget.label_x_pos.setValidator(QDoubleValidator())
self.widget.label_y_pos.setValidator(QDoubleValidator())
self.colors = valid_colors
if used_names is None:
self.used_names = []
else:
self.used_names = used_names
self.widget.style.addItems(valid_style)
self.widget.color.addItems(list(valid_colors.keys()))
def set_defaults(self, marker):
"""
Set the values of all fields to the ones of the marker
"""
_color = [name for name, symbol in self.colors.items() if symbol == marker.color][0]
self.widget.name.setText(str(marker.name))
self.widget.position.setText(str(marker.get_position()))
self.widget.style.setCurrentText(str(marker.style))
self.widget.color.setCurrentText(_color)
self.widget.display_label.setChecked(marker.label_visible)
self.widget.label_x_pos.setText(str(marker.label_x_offset))
self.widget.label_y_pos.setText(str(marker.label_y_offset))
self.fixed_marker.setChecked(not marker.draggable)
def update_marker(self, marker):
"""
Update the properties of the marker with the values from the widget
"""
old_name = str(marker.name)
new_name = self.widget.name.text()
if new_name == "":
raise RuntimeError("Marker names cannot be empty")
if new_name in self.used_names and new_name != old_name:
raise RuntimeError("Marker names cannot be duplicated.\n Another marker is named '{}'"
.format(new_name))
try:
marker.set_name(new_name)
except:
marker.set_name(old_name)
raise RuntimeError("Invalid label '{}'".format(new_name))
marker.set_position(float(self.widget.position.text()))
marker.draggable = not self.widget.fixed_marker.isChecked()
marker.set_style(self.widget.style.currentText())
marker.set_color(self.colors.get(self.widget.color.currentText(), 'C2'))
marker.set_label_visible(self.widget.display_label.isChecked())
x_pos = float(self.widget.label_x_pos.text())
y_pos = float(self.widget.label_y_pos.text())
marker.set_label_position(x_pos, y_pos)
class SingleMarkerEditor(PropertiesEditorBase):
def __init__(self, canvas, marker, valid_style, valid_colors, used_names):
"""
Edit the properties of a single marker.
:param canvas: A reference to the target canvas
:param marker: The marker to be edited
:param valid_style: list of valid line styles (eg. 'solid', 'dashed'...) used by matplotlib
:param valid_colors: dictionary of valid colours
"""
super(SingleMarkerEditor, self).__init__('singlemarkereditor.ui', canvas)
self.ui.errors.hide()
self._widget = MarkerEditor('markeredit.ui', valid_style, valid_colors, used_names)
layout = self.ui.layout()
layout.addWidget(self._widget, 1, 0)
self.marker = marker
self._widget.set_defaults(self.marker)
def changes_accepted(self):
"""
Update the marker properties
"""
self.ui.errors.hide()
self._widget.update_marker(self.marker)
def error_occurred(self, exc):
self.ui.errors.setText(str(exc).strip())
self.ui.errors.show()
class GlobalMarkerEditor(PropertiesEditorBase):
def __init__(self, canvas, markers, valid_style, valid_colors):
"""
Edit the properties of a marker, this can be chosen from a list of valid markers.
:param canvas: A reference to the target canvas
:param markers: List of markers that can be edited
:param valid_style: list of valid line styles (eg. 'solid', 'dashed'...) used by matplotlib
:param valid_colors: dictionary of valid colours
"""
super(GlobalMarkerEditor, self).__init__('globalmarkereditor.ui', canvas)
self.ui.errors.hide()
self.ui.marker.currentIndexChanged.connect(self.update_marker_data)
self.markers = sorted(markers, key=lambda _marker: _marker.name)
self._names = [str(_marker.name) for _marker in self.markers]
self._widget = MarkerEditor('markeredit.ui', valid_style, valid_colors, self._names)
layout = self.ui.layout()
layout.addWidget(self._widget, 2, 0, 1, 2)
if self._names:
self.ui.marker.addItems(self._names)
else:
self._widget.setEnabled(False)
def changes_accepted(self):
"""Update the properties of the currently selected marker"""
self.ui.errors.hide()
idx = self.ui.marker.currentIndex()
self._widget.update_marker(self.markers[idx])
def error_occurred(self, exc):
self.ui.errors.setText(str(exc).strip())
self.ui.errors.show()
def update_marker_data(self, idx):
"""When changing the selected marker update the properties displayed in the editor window"""
if self.ui.marker.count == 0:
self._widget.setEnabled(False)
return
self._widget.setEnabled(True)
self._widget.set_defaults(self.markers[idx])
|
gpl-3.0
|
Sentient07/scikit-learn
|
sklearn/neighbors/tests/test_dist_metrics.py
|
36
|
6957
|
import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.neighbors import BallTree
from sklearn.utils.testing import SkipTest, assert_raises_regex
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_pickle(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
yield self.check_pickle, metric, kwargs
for metric in self.bool_metrics:
yield self.check_pickle_bool, metric
def check_pickle_bool(self, metric):
dm = DistanceMetric.get_metric(metric)
D1 = dm.pairwise(self.X1_bool)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1_bool)
assert_array_almost_equal(D1, D2)
def check_pickle(self, metric, kwargs):
dm = DistanceMetric.get_metric(metric, **kwargs)
D1 = dm.pairwise(self.X1)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1)
assert_array_almost_equal(D1, D2)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
def test_bad_pyfunc_metric():
def wrong_distance(x, y):
return "1"
X = np.ones((5, 2))
assert_raises_regex(TypeError,
"Custom distance function must accept two vectors",
BallTree, X, metric=wrong_distance)
def test_input_data_size():
# Regression test for #6288
# Previoulsly, a metric requiring a particular input dimension would fail
def custom_metric(x, y):
assert x.shape[0] == 3
return np.sum((x - y) ** 2)
rng = np.random.RandomState(0)
X = rng.rand(10, 3)
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
eucl = DistanceMetric.get_metric("euclidean")
assert_array_almost_equal(pyfunc.pairwise(X), eucl.pairwise(X))
|
bsd-3-clause
|
maheshakya/scikit-learn
|
doc/datasets/mldata_fixture.py
|
367
|
1183
|
"""Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
|
bsd-3-clause
|
frank-tancf/scikit-learn
|
sklearn/feature_extraction/hashing.py
|
74
|
6153
|
# Author: Lars Buitinck
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional, default np.float64
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional, default "dict"
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.